repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
waytai/odoo | addons/l10n_fr/wizard/fr_report_compute_resultant.py | 374 | 2312 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
from openerp.osv import fields, osv
class account_cdr_report(osv.osv_memory):
_name = 'account.cdr.report'
_description = 'Account CDR Report'
def _get_defaults(self, cr, uid, context=None):
fiscalyear_id = self.pool.get('account.fiscalyear').find(cr, uid)
return fiscalyear_id
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True),
}
_defaults = {
'fiscalyear_id': _get_defaults
}
def print_cdr_report(self, cr, uid, ids, context=None):
active_ids = context.get('active_ids', [])
data = {}
data['form'] = {}
data['ids'] = active_ids
data['form']['fiscalyear_id'] = self.browse(cr, uid, ids)[0].fiscalyear_id.id
return self.pool['report'].get_action(
cr, uid, ids, 'l10n_fr.report_l10nfrresultat', data=data, context=context
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
anentropic/django-saml2-idp | saml2idp/metadata.py | 2 | 1727 | """
Query metadata from settings.
"""
# Django imports
from django.core.exceptions import ImproperlyConfigured
# Local imports
from saml2idp_metadata import SAML2IDP_CONFIG, SAML2IDP_REMOTES
def get_config_for_acs(acs_url):
"""
Return SP configuration instance that handles acs_url.
"""
for friendlyname, config in SAML2IDP_REMOTES.items():
if config['acs_url'] == acs_url:
return config
msg = 'SAML2IDP_REMOTES is not configured to handle the AssertionConsumerService at "%s"'
raise ImproperlyConfigured(msg % resource_name)
def get_config_for_resource(resource_name):
"""
Return the SP configuration that handles a deep-link resource_name.
"""
for friendlyname, config in SAML2IDP_REMOTES.items():
links = get_links(config)
for name, pattern in links:
if name == resource_name:
return config
msg = 'SAML2IDP_REMOTES is not configured to handle a link resource "%s"'
raise ImproperlyConfigured(msg % resource_name)
def get_deeplink_resources():
"""
Returns a list of resources that can be used for deep-linking.
"""
resources = []
for key, sp_config in SAML2IDP_REMOTES.items():
links = get_links(sp_config)
for resource, patterns in links:
if '/' not in resource:
# It's a simple deeplink, which is handled by 'login_init' URL.
continue
resources.append(resource)
return resources
def get_links(sp_config):
"""
Returns a list of (resource, pattern) tuples for the 'links' for an sp.
"""
links = sp_config.get('links', [])
if type(links) is dict:
links = links.items()
return links
| mit |
pranavtbhat/EE219 | project2/c.py | 2 | 4916 | from StdSuites.AppleScript_Suite import vector
import cPickle
from sklearn.feature_extraction import text
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from pandas import DataFrame
import nltk
import operator
import os
import numpy as np
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import RegexpTokenizer
import math
# RegExpTokenizer reduces term count from 29k to 25k
class StemTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
self.snowball_stemmer = SnowballStemmer("english", ignore_stopwords=True)
self.regex_tokenizer = RegexpTokenizer(r'\w+')
def __call__(self, doc):
# tmp = [self.wnl.lemmatize(t) for t in word_tokenize(doc)]
tmp = [self.snowball_stemmer.stem(t) for t in self.regex_tokenizer.tokenize(doc)]
return tmp
def calculate_tcicf(freq, maxFreq, categories, categories_per_term):
val= ((0.5+(0.5*(freq/float(maxFreq))))*math.log10(categories/float(1+categories_per_term)))
return val
all_categories=['comp.graphics',
'comp.os.ms-windows.misc',
'comp.sys.ibm.pc.hardware',
'comp.sys.mac.hardware',
'comp.windows.x',
'rec.autos',
'rec.motorcycles',
'rec.sport.baseball',
'rec.sport.hockey',
'alt.atheism',
'sci.crypt',
'sci.electronics',
'sci.med',
'sci.space',
'soc.religion.christian',
'misc.forsale',
'talk.politics.guns',
'talk.politics.mideast',
'talk.politics.misc',
'talk.religion.misc'
]
all_docs_per_category=[]
for cat in all_categories:
categories=[cat]
all_data = fetch_20newsgroups(subset='train',categories=categories).data
temp = ""
for doc in all_data:
temp= temp + " "+doc
all_docs_per_category.append(temp)
stop_words = text.ENGLISH_STOP_WORDS
# Ignore words appearing in less than 2 documents or more than 99% documents.
# min_df reduces from 100k to 29k
vectorizer = CountVectorizer(analyzer='word',stop_words=stop_words,ngram_range=(1, 1), tokenizer=StemTokenizer(),
lowercase=True,max_df=0.99, min_df=2)
#
# test_corpus = [
# 'This document is the first document.',
# 'This is the second second document.',
# 'And the third one with extra extra extra text.',
# 'Is this the first document?',
# ]
vectorized_newsgroups_train = vectorizer.fit_transform(all_docs_per_category)
#print "All terms:", vectorizer.get_feature_names()
#print vectorized_newsgroups_train.shape
#print vectorized_newsgroups_train
def calculate():
max_term_freq_per_category=[0]*vectorized_newsgroups_train.shape[0]
category_count_per_term=[0]*vectorized_newsgroups_train.shape[1]
for i in range(0,vectorized_newsgroups_train.shape[0],1):
max_term_freq_per_category[i]=np.amax(vectorized_newsgroups_train[i,:])
for i in range(0,vectorized_newsgroups_train.shape[1],1):
for j in range(0,vectorized_newsgroups_train.shape[0],1):
category_count_per_term[i]+= (0 if vectorized_newsgroups_train[j,i]==0 else 1)
# print vectorized_newsgroups_train.shape
#
# print len(max_term_freq_per_category)
# print len(category_count_per_term)
# Calculate tc-icf - Notice the matrix is sparse!
# print len(vectorizer.get_feature_names())
tf_icf = np.zeros((len(vectorizer.get_feature_names()), vectorized_newsgroups_train.shape[1]))
for i in range(vectorized_newsgroups_train.shape[1]):
row = vectorized_newsgroups_train[:,i].toarray()
for j in range(vectorized_newsgroups_train.shape[0]):
# print row[j,0],max_term_freq_per_category[j],len(all_categories),category_count_per_term[i]
tf_icf[i][j] = calculate_tcicf(row[j,0],max_term_freq_per_category[j],len(all_categories),category_count_per_term[i])
# cPickle.dump(tf_icf,open("data/tc_icf.pkl", "wb"))
return tf_icf
# if not (os.path.isfile("data/tc_icf.pkl")):
# print "Calculating"
# tf_icf=calculate()
# else:
# tf_icf=cPickle.load(open("data/tc_icf.pkl", "rb"))
tf_icf=calculate()
# print top 10 significant term for this class
for category in [2,3,14,15]:
tficf={}
term_index=0;
for term in vectorizer.get_feature_names():
tficf[term]=tf_icf[term_index][category]
term_index+=1
significant_terms = dict(sorted(tficf.iteritems(), key=operator.itemgetter(1), reverse=True)[:10]) #get 10 significant terms
print significant_terms.keys()
| unlicense |
thebravoman/software_engineering_2016 | hm_term2_rest_api_projects/11a_03_11/gpu_catalog/settings.py | 1 | 3281 | """
Django settings for gpu_catalog project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=0x_pe%*sf#b)qo=cv10z_(vs(#ux9u1&mxnux_^%7$7w0yotw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gpus',
'gpu_catalog',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gpu_catalog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gpu_catalog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| mit |
dealien/Red-Magician | cogs/pokedex.py | 1 | 20781 | # Developed by Redjumpman for Redbot by Twentysix26
# Inspired by Danny/Rapptz pokedex for Robo Danny
# Standard Library
import aiohttp
import random
import re
# Discord and Redbot
import discord
from discord.ext import commands
from __main__ import send_cmd_help
# Third Party Libraries
try: # check if BeautifulSoup4 is installed
from bs4 import BeautifulSoup
soupAvailable = True
except ImportError:
soupAvailable = False
try: # Check if Tabulate is installed
from tabulate import tabulate
tabulateAvailable = True
except ImportError:
tabulateAvailable = False
switcher = {
"1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6,
'I': 1, 'II': 2, 'III': 3, 'IV': 4, 'V': 5, 'VI': 6,
}
pokemon_exceptions = ["Beldum", "Burmy", "Cascoon", "Caterpie", "Combee", "Cosmoem", "Cosmog",
"Ditto", "Kakuna", "Kricketot", "Magikarp", "Unown", "Weedle", "Wobbuffet",
"Wurmple", "Wynaut", "Tynamo", "Metapod", "MissingNo.", "Scatterbug",
"Silcoon", "Smeargle"]
alolan_variants = ["Rattata", "Raticate", "Raichu", "Sandshrew", "Sandslash", "Vulpix", "Ninetales",
"Diglett", "Dugtrio", "Meowth", "Persian", "Geodude", "Graveler", "Golem",
"Grimer", "Muk", "Exeggutor", "Marowak"]
class Pokedex:
"""Search for Pokemon."""
def __init__(self, bot):
self.bot = bot
self.version = "2.0.4"
@commands.group(pass_context=True, aliases=["dex"])
async def pokedex(self, ctx):
"""This is the list of pokemon queries you can perform."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@pokedex.command(name="version", pass_context=False)
async def _version_pokedex(self):
"""Get pokedex's version."""
await self.bot.say("You are running pokedex version {}".format(self.version))
@pokedex.command(name="pokemon", pass_context=False)
async def _pokemon2_pokedex(self, pokemon: str):
"""Get a pokemon's pokedex info.
Example !pokedex pokemon gengar"""
url = "http://bulbapedia.bulbagarden.net/wiki/{}".format(pokemon)
async with aiohttp.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
tables = soup.find_all("table", attrs={"class": "roundy"})
side_bar = tables[0]
a_attrs = {"title": "List of Pokémon by National Pokédex number"}
species = side_bar.find("a", attrs={"title": "Pokémon category"}).text.strip()
national_number = side_bar.find("a", attrs=a_attrs).text.strip()
japanese_name = side_bar.find("i").text.strip()
# Abilities
alolan = "Alolan {} Hidden Ability".format(pokemon.title())
rep1 = {alolan: "*({})".format(alolan)}
rep2 = {"Alolan {}".format(pokemon.title): "*(Alolan {})".format(pokemon.title())}
rep3 = {"Battle Bond Ash-Greninja": "Battle Bond (Ash-Greninja)",
"{}".format(pokemon.title()): "({})".format(pokemon.title()),
"Cosplay Pikachu": " (Cosplay Pikachu)",
" Greninja": "",
"Gen. V-V I": "", # Entei and Raikou
"Hidden Ability": "(Hidden Ability)"}
rep1 = dict((re.escape(k), v) for k, v in rep1.items())
pattern1 = re.compile("|".join(rep1.keys()))
rep2 = dict((re.escape(k), v) for k, v in rep2.items())
pattern2 = re.compile("|".join(rep2.keys()))
rep3 = dict((re.escape(k), v) for k, v in rep3.items())
pattern3 = re.compile("|".join(rep3.keys()))
td1 = side_bar.find_all('td', attrs={'class': 'roundy', 'colspan': '2'})
ab_raw = td1[1].find_all('td')
exclusions = ["Cacophony", "CacophonySummer Form", "CacophonyAutumn Form"]
if any(x in [x.get_text(strip=True) for x in ab_raw] for x in exclusions):
ab_strip = [x.get_text(strip=True)
for x in ab_raw if "Cacophony" not in x.get_text()]
ab_strip2 = [re.sub(r'\B(?=[A-Z])', r' ', x) for x in ab_strip]
ab_split = [" ".join([x.split()[0], "({} {})".format(x.split()[1], x.split()[2])])
if "Forme" in x else x for x in ab_strip2]
if [x for x in ab_split if "Forme" in x]:
formes = ab_split
else:
formes = None
ab = [pattern3.sub(lambda m: rep3[re.escape(m.group(0))], x) for x in ab_split]
else:
td_attrs = {'width': '50%', 'style': 'padding-top:3px; padding-bottom:3px'}
td1 = side_bar.find_all('td', attrs=td_attrs)
ab = [td1[0].find('span').get_text()]
formes = None
ab_format = self.abilities_parser(ab, pokemon, formes)
# Types
search_type = side_bar.find_all("table", attrs={"class": "roundy"})
types_raw = search_type[2].find_all('b')
types = [x.text.strip() for x in types_raw if x.text.strip() != "Unknown"]
try:
types_output = "{}/{}".format(types[0], types[1])
if pokemon.title() == "Rotom":
types_temp = ("{0}/{1} (Rotom)\n{2}/{3} (Heat Rotom)\n"
"{4}/{5} (Wash Rotom)\n{6}/{7} (Frost Rotom)\n"
"{8}/{9} (Fan Rotom)\n{10}/{11} (Mow Rotom)\n")
types_output = types_temp.format(*types)
except IndexError:
types_output = types[0]
# Image
img_raw = tables[2].find('a', attrs={'class', 'image'})
img = img_raw.find('img')['src']
if pokemon.title() in ["Sawsbuck", "Deerling"]:
img_raw = tables[2].find_all('a', attrs={'class', 'image'})
img_set = [x.find('img')['src'] for x in img_raw]
img = random.choice(img_set)
# Stats
rep_text = "Other Pokémon with this total"
div = soup.find('div', attrs={'id': 'mw-content-text', 'lang': 'en', 'dir': 'ltr',
'class': 'mw-content-ltr'})
stat_table = div.find('table', attrs={'align': 'left'})
raw_stats = [x.get_text(strip=True) for x in stat_table.find_all('table')]
stats = [x.replace(rep_text, "").replace(":", ": ") for x in raw_stats]
# Weaknesses / Resistances
if pokemon.title() != "Eevee":
wri_table = soup.find('table', attrs={'class': 'roundy', 'width': '100%',
'align': 'center', 'cellpadding': 0})
else:
tb_attrs = {'class': 'roundy', 'width': '100%',
'align': 'center', 'cellpadding': 0,
'style': 'border: 3px solid #6D6D4E; background: #A8A878;'}
wri_table = soup.find('table', attrs=tb_attrs)
wri_stripped = wri_table.text.strip()
wri_raw = wri_stripped.replace("\n", "")
weak, resist = self.weak_resist_builder(wri_raw)
# Color
color = self.color_lookup(types[0])
# Description
table_attrs = {'width': '100%', 'class': 'roundy',
'style': 'background: transparent; border-collapse:collapse;'}
info_search = div.find_all('table', attrs=table_attrs)
info_table = info_search[0].find_all('td', attrs={'class': 'roundy'})
description = info_table[0].text.strip()
# Title
wiki = "[{} {}]({})".format(pokemon.title(), national_number, url)
embed_disc = "\n".join([wiki, japanese_name, species])
# Build embed
embed = discord.Embed(colour=color, description=embed_disc)
embed.set_thumbnail(url=img)
embed.add_field(name="Stats", value="\n".join(stats))
embed.add_field(name="Types", value=types_output)
embed.add_field(name="Resistances", value="\n".join(resist))
embed.add_field(name="Weaknesses", value="\n".join(weak))
embed.add_field(name="Abilities", value="\n".join(ab_format))
embed.set_footer(text=description)
await self.bot.say(embed=embed)
@pokedex.command(name="moveset", pass_context=False)
async def _moveset_pokedex(self, generation: str, pokemon: str):
"""Get a pokemon's moveset by generation(1-6).
Example: !pokedex moveset V pikachu """
if len(pokemon) > 0:
gen = switcher.get(generation, 1)
try:
url = "http://pokemondb.net/pokedex/{}/moves/{}".format(pokemon, gen)
async with aiohttp.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
table = soup.find('table', attrs={'class': 'data-table wide-table'})
table_body = table.find('tbody')
rows = table_body.find_all('tr')
moves = []
for row in rows:
cols = [ele.text.strip() for ele in row.find_all('td')]
moves.append([ele for ele in cols if ele])
t = tabulate(moves, headers=["Level", "Moves", "Type", "Category", "Power",
"Accuracy"])
await self.bot.say("```{}```".format(t))
except AttributeError:
await self.bot.say("Could not locate a pokemon with that" +
" name. Try a different name.")
else:
await self.bot.say("You need to input a pokemon name to search. "
"Input a name and try again.")
@pokedex.command(name="tmset", pass_context=False)
async def _tmset_pokedex(self, generation: str, pokemon: str):
"""Get a pokemon's learnset by generation(1-6).
Example: !pokedex tmset V pikachu """
if pokemon.title() in pokemon_exceptions:
return await self.bot.say("This pokemon cannot learn TMs.")
gen = switcher.get(generation, 1)
try:
url = "http://pokemondb.net/pokedex/{}/moves/{}".format(pokemon, gen)
async with aiohttp.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
div1 = soup.find_all('div', attrs={'class': 'col desk-span-6 lap-span-12'})
div2 = div1[1].find_all('div', attrs={'class': 'colset span-full'})
print("THIS MANY DIVS {}".format(len(div2)))
if len(div2) == 1:
index = 0
else:
index = 1
table1 = div2[index].find('table', attrs={'class': 'data-table wide-table'})
table_body = table1.find('tbody')
rows = table_body.find_all('tr')
moves = []
for row in rows:
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
moves.append([ele for ele in cols if ele])
headers = ["TM", "Moves", "Type", "Category", "Power", "Accuracy"]
if len(moves) <= 30:
t = tabulate(moves, headers=headers)
await self.bot.say("```{}```".format(t))
else:
half = int(len(moves) / 2)
part1 = moves[:half]
part2 = moves[half:]
t1 = tabulate(part1, headers=headers)
t2 = tabulate(part2, headers=headers)
await self.bot.say("```{}```".format(t1))
await self.bot.say("```{}```".format(t2))
except IndexError:
await self.bot.say("Oh no! That pokemon was not available in that generation.")
except AttributeError:
await self.bot.say("Could not locate a pokemon with that"
" name. Try a different name.")
@pokedex.command(name="item", pass_context=False)
async def _item_pokedex(self, *, item: str):
"""Get a description of an item.
Use '-' for spaces. Example: !pokedex item master-ball
"""
if len(item) > 0:
item = item.replace(" ", "-").lower()
url = "http://pokemondb.net/item/{}".format(item)
async with aiohttp.get(url) as response:
try:
soup = BeautifulSoup(await response.text(), "html.parser")
divs = soup.find('p')
info = divs.get_text()
await self.bot.say("**{}:**\n```{}```".format(item.title(), info))
except AttributeError:
await self.bot.say("Cannot find an item with this name")
else:
await self.bot.say("Please input an item name.")
@pokedex.command(name="location", pass_context=False)
async def _location_pokedex(self, pokemon: str):
"""Get a pokemon's catch location.
Example !pokedex location voltorb
"""
url = "http://pokemondb.net/pokedex/{}".format(pokemon)
async with aiohttp.get(url) as response:
soup = BeautifulSoup(await response.text(), "html.parser")
loc = []
version = []
div2 = soup.find('div', attrs={'class': 'col desk-span-7 lap-span-12'})
tables = div2.find_all('table', attrs={'class': 'vitals-table'})
for table in tables:
cols = [ele.text.strip() for ele in table.find_all('td')]
loc.append([ele for ele in cols if ele])
tables2 = div2.find_all('table', attrs={'class': 'vitals-table'})
for table2 in tables2:
tcols = [ele.text.strip() for ele in table2.find_all('th')]
version.append([ele for ele in tcols if ele])
# We have to extract out the base index, because it scrapes as
# a list of a list. Then we can stack and tabulate.
extract_loc = loc[0]
extract_version = version[0]
m = list(zip(extract_version, extract_loc))
t = tabulate(m, headers=["Game Version", "Location"])
await self.bot.say("```{}```".format(t))
@pokedex.command(name="evolution", pass_context=False)
async def _evolution_pokedex(self, pokemon: str):
"""Show a pokemon's evolution chain
Example !pokedex evolution bulbasaur"""
url = "http://pokemondb.net/pokedex/{}".format(pokemon)
async with aiohttp.get(url) as response:
try:
soup = BeautifulSoup(await response.text(), "html.parser")
div = soup.find('div', attrs={'class':
'infocard-evo-list'})
evo = div.text.strip()
await self.bot.say("```{}```".format(evo))
except AttributeError:
await self.bot.say("{} does not have an evolution chain".format(pokemon))
def color_lookup(self, key):
color_table = {"Normal": 0x999966, "Fire": 0xFF6600, "Fighting": 0xFF0000,
"Water": 0x3399FF, "Flying": 0x9999FF, "Grass": 0x33FF00, "Poison": 0x660099,
"Electric": 0xFFFF00, "Ground": 0xFFCC33, "Psychic": 0xFF3399,
"Rock": 0xCC9966, "Ice": 0x99FFFF, "Bug": 0x669900, "Dragon": 0x003399,
"Ghost": 0x9933FF, "Dark": 0x333333, "Steel": 0x999999, "Fairy": 0xFF99FF}
color = color_table.get(key, 0xFFFFFF)
return color
def abilities_parser(self, abilities, pokemon, formes=None):
link = "http://bulbapedia.bulbagarden.net/wiki/"
rep = {"(Hidden Ability)": "",
"(Ash-Greninja)": "",
"(Cosplay Pikachu)": "",
"({})".format(pokemon.title()): "",
" ": "_"}
if formes:
for x in formes:
rep[x] = ""
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(rep.keys()))
fmt = "[{}]({}{}) ({})"
if len(abilities) < 2:
ab_linked = "[{}]({}{})".format(abilities[0], link, [abilities[0]])
if [x for x in abilities if "or " and pokemon.title() in abilities]:
abilities = [re.split(' or |\*', x) if 'or' in x else x for x in abilities]
ab_linked = [fmt.format(re.sub(r'\((.*?)\)', '', x), link,
pattern.sub(lambda m: rep[re.escape(m.group(0))], x),
re.search(r'\((.*?)\)', x).group(1)) if "(Hidden Ability)" in x
else "[{0}]({2}{3}) or [{1}]({2}{4})".format(x[0], x[1], link,
pattern.sub(lambda m: rep[re.escape(m.group(0))],
x[0]),
pattern.sub(lambda m: rep[re.escape(m.group(0))],
x[1]))
for x in abilities]
elif "or " in abilities[0]:
split = abilities[0].split('or ', 1)
del abilities[0]
abilities.append(split)
ab_linked = [fmt.format(re.sub(r'\((.*?)\)', '', x), link,
pattern.sub(lambda m: rep[re.escape(m.group(0))], x),
re.search(r'\((.*?)\)', x).group(1)) if "(Hidden Ability)" in x
else "[{0}]({2}{3}) or [{1}]({2}{4})".format(x[0], x[1], link,
pattern.sub(lambda m: rep[re.escape(m.group(0))],
x[0]),
pattern.sub(lambda m: rep[re.escape(m.group(0))],
x[1]))
for x in abilities]
ab_linked.reverse()
else:
ab_linked = [fmt.format(re.sub(r' \((.*?)\)', '', x), link,
pattern.sub(lambda m: rep[re.escape(m.group(0))], x),
re.search(r'\((.*?)\)', x).group(1)) if "(" in x
else "[{}]({}{})".format(x, link,
pattern.sub(lambda m: rep[re.escape(m.group(0))], x))
for x in abilities]
return ab_linked
def weak_resist_builder(self, raw):
output = []
types = ["Normal", "Flying", "Poison", "Ground", "Rock", "Dragon", "Fighting",
"Bug", "Grass", "Electric", "Fairy", "Psychic", "Ghost", "Steel",
"Fire", "Water", "Ice", "Dark", "None"]
for x in types:
match = re.search(r'{} (\w+)'.format(x), raw)
if match:
item = match.group(0)
if item.startswith('ug'):
item = "B" + item
if "1" in item:
pass
else:
output.append(item)
else:
pass
result = [x.replace('½', "0.5").replace('¼', "0.25") + "x" for x in output]
weak = [x for x in result if "2x" in x or "4x" in x]
resist = [x for x in result if "0.5x" in x or "0.25x" in x]
if len(weak) == 0:
weak = ["None"]
if len(resist) == 0:
resist = ["None"]
return weak, resist
def setup(bot):
if not soupAvailable:
raise RuntimeError("You need to run \'pip3 install beautifulsoup4\' in command prompt.")
elif not tabulateAvailable:
raise RuntimeError("You need to run \'pip3 install tabulate\' in command prompt.")
else:
bot.add_cog(Pokedex(bot))
| gpl-3.0 |
geimer/easybuild-framework | easybuild/toolchains/fft/fftw.py | 3 | 2437 | ##
# Copyright 2012-2014 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Support for FFTW (Fastest Fourier Transform in the West) as toolchain FFT library.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
from distutils.version import LooseVersion
from easybuild.tools.toolchain.fft import Fft
class Fftw(Fft):
"""FFTW FFT library"""
FFT_MODULE_NAME = ['FFTW']
def _set_fftw_variables(self):
suffix = ''
version = self.get_software_version(self.FFT_MODULE_NAME)[0]
if LooseVersion(version) < LooseVersion('2') or LooseVersion(version) >= LooseVersion('4'):
self.log.raiseException("_set_fft_variables: FFTW unsupported version %s (major should be 2 or 3)" % version)
elif LooseVersion(version) > LooseVersion('2'):
suffix = '3'
# order matters!
fftw_libs = ["fftw%s" % suffix]
if self.options['usempi']:
fftw_libs.insert(0, "fftw%s_mpi" % suffix)
self.FFT_LIB = fftw_libs
def _set_fft_variables(self):
self._set_fftw_variables()
super(Fftw, self)._set_fft_variables()
## TODO can these be replaced with the FFT ones?
self.variables.join('FFTW_INC_DIR', 'FFT_INC_DIR')
self.variables.join('FFTW_LIB_DIR', 'FFT_LIB_DIR')
if 'FFT_STATIC_LIBS' in self.variables:
self.variables.join('FFTW_STATIC_LIBS', 'FFT_STATIC_LIBS')
| gpl-2.0 |
jhancock93/autorest | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/Http/autoresthttpinfrastructuretestservice/operations/multiple_responses_operations.py | 14 | 57800 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrest.exceptions import HttpOperationError
from .. import models
class MultipleResponsesOperations(object):
"""MultipleResponsesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get200_model204_no_model_default_error200_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with valid payload: {'statusCode': '200'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/204/none/default/Error/response/200/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model204_no_model_default_error204_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 204 response with no payload.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/204/none/default/Error/response/204/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model204_no_model_default_error201_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 201 response with valid payload: {'statusCode': '201'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/204/none/default/Error/response/201/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model204_no_model_default_error202_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 202 response with no payload:.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/204/none/default/Error/response/202/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model204_no_model_default_error400_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with valid error payload: {'status': 400,
'message': 'client error'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/204/none/default/Error/response/400/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model201_model_default_error200_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with valid payload: {'statusCode': '200'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/201/B/default/Error/response/200/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if response.status_code == 201:
deserialized = self._deserialize('B', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model201_model_default_error201_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 201 response with valid payload: {'statusCode': '201',
'textStatusCode': 'Created'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/201/B/default/Error/response/201/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if response.status_code == 201:
deserialized = self._deserialize('B', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model201_model_default_error400_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with valid payload: {'code': '400', 'message':
'client error'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/201/B/default/Error/response/400/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if response.status_code == 201:
deserialized = self._deserialize('B', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model_a201_model_c404_model_ddefault_error200_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with valid payload: {'statusCode': '200'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/201/C/404/D/default/Error/response/200/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 201, 404]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if response.status_code == 201:
deserialized = self._deserialize('C', response)
if response.status_code == 404:
deserialized = self._deserialize('D', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model_a201_model_c404_model_ddefault_error201_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with valid payload: {'httpCode': '201'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/201/C/404/D/default/Error/response/201/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 201, 404]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if response.status_code == 201:
deserialized = self._deserialize('C', response)
if response.status_code == 404:
deserialized = self._deserialize('D', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model_a201_model_c404_model_ddefault_error404_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with valid payload: {'httpStatusCode': '404'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/201/C/404/D/default/Error/response/404/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 201, 404]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if response.status_code == 201:
deserialized = self._deserialize('C', response)
if response.status_code == 404:
deserialized = self._deserialize('D', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model_a201_model_c404_model_ddefault_error400_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with valid payload: {'code': '400', 'message':
'client error'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: object
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/200/A/201/C/404/D/default/Error/response/400/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 201, 404]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if response.status_code == 201:
deserialized = self._deserialize('C', response)
if response.status_code == 404:
deserialized = self._deserialize('D', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get202_none204_none_default_error202_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 202 response with no payload.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/202/none/204/none/default/Error/response/202/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [202, 204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get202_none204_none_default_error204_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 204 response with no payload.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/202/none/204/none/default/Error/response/204/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [202, 204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get202_none204_none_default_error400_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with valid payload: {'code': '400', 'message':
'client error'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>`
"""
# Construct URL
url = '/http/payloads/202/none/204/none/default/Error/response/400/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [202, 204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get202_none204_none_default_none202_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 202 response with an unexpected payload {'property': 'value'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/202/none/204/none/default/none/response/202/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [202, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get202_none204_none_default_none204_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 204 response with no payload.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/202/none/204/none/default/none/response/204/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [202, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get202_none204_none_default_none400_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with no payload.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/202/none/204/none/default/none/response/400/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [202, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get202_none204_none_default_none400_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with an unexpected payload {'property': 'value'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/202/none/204/none/default/none/response/400/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [202, 204]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_default_model_a200_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with valid payload: {'statusCode': '200'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`MyException<fixtures.acceptancetestshttp.models.MyException>`
"""
# Construct URL
url = '/http/payloads/default/A/response/200/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.MyException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_default_model_a200_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with no payload.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`MyException<fixtures.acceptancetestshttp.models.MyException>`
"""
# Construct URL
url = '/http/payloads/default/A/response/200/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.MyException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_default_model_a400_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with valid payload: {'statusCode': '400'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`MyException<fixtures.acceptancetestshttp.models.MyException>`
"""
# Construct URL
url = '/http/payloads/default/A/response/400/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.MyException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_default_model_a400_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with no payload.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`MyException<fixtures.acceptancetestshttp.models.MyException>`
"""
# Construct URL
url = '/http/payloads/default/A/response/400/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise models.MyException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_default_none200_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with invalid payload: {'statusCode': '200'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/default/none/response/200/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_default_none200_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with no payload.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/default/none/response/200/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_default_none400_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with valid payload: {'statusCode': '400'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/default/none/response/400/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_default_none400_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with no payload.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/default/none/response/400/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code < 200 or response.status_code >= 300:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get200_model_a200_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with no payload, when a payload is expected -
client should return a null object of thde type for model A.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/200/A/response/200/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model_a200_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with payload {'statusCode': '200'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/200/A/response/200/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model_a200_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with invalid payload {'statusCodeInvalid': '200'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/200/A/response/200/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model_a400_none(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 400 response with no payload client should treat as an http
error with no error model.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/200/A/response/400/none'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model_a400_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with payload {'statusCode': '400'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/200/A/response/400/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model_a400_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 200 response with invalid payload {'statusCodeInvalid': '400'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/200/A/response/400/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get200_model_a202_valid(
self, custom_headers=None, raw=False, **operation_config):
"""Send a 202 response with payload {'statusCode': '202'}.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`A <fixtures.acceptancetestshttp.models.A>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = '/http/payloads/200/A/response/202/valid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('A', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/whoosh/codec/whoosh3.py | 30 | 42321 | # Copyright 2012 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module implements a "codec" for writing/reading Whoosh X indexes.
"""
import struct
from array import array
from collections import defaultdict
from whoosh import columns, formats
from whoosh.compat import b, bytes_type, string_type, integer_types
from whoosh.compat import dumps, loads, iteritems, xrange
from whoosh.codec import base
from whoosh.filedb import compound, filetables
from whoosh.matching import ListMatcher, ReadTooFar, LeafMatcher
from whoosh.reading import TermInfo, TermNotFound
from whoosh.system import emptybytes
from whoosh.system import _SHORT_SIZE, _INT_SIZE, _LONG_SIZE, _FLOAT_SIZE
from whoosh.system import pack_ushort, unpack_ushort
from whoosh.system import pack_int, unpack_int, pack_long, unpack_long
from whoosh.util.numlists import delta_encode, delta_decode
from whoosh.util.numeric import length_to_byte, byte_to_length
try:
import zlib
except ImportError:
zlib = None
# This byte sequence is written at the start of a posting list to identify the
# codec/version
WHOOSH3_HEADER_MAGIC = b("W3Bl")
# Column type to store field length info
LENGTHS_COLUMN = columns.NumericColumn("B", default=0)
# Column type to store pointers to vector posting lists
VECTOR_COLUMN = columns.NumericColumn("I")
# Column type to store vector posting list lengths
VECTOR_LEN_COLUMN = columns.NumericColumn("i")
# Column type to store values of stored fields
STORED_COLUMN = columns.PickleColumn(columns.CompressedBytesColumn())
class W3Codec(base.Codec):
# File extensions
TERMS_EXT = ".trm" # Term index
POSTS_EXT = ".pst" # Term postings
VPOSTS_EXT = ".vps" # Vector postings
COLUMN_EXT = ".col" # Per-document value columns
def __init__(self, blocklimit=128, compression=3, inlinelimit=1):
self._blocklimit = blocklimit
self._compression = compression
self._inlinelimit = inlinelimit
# def automata(self):
# Per-document value writer
def per_document_writer(self, storage, segment):
return W3PerDocWriter(self, storage, segment)
# Inverted index writer
def field_writer(self, storage, segment):
return W3FieldWriter(self, storage, segment)
# Postings
def postings_writer(self, dbfile, byteids=False):
return W3PostingsWriter(dbfile, blocklimit=self._blocklimit,
byteids=byteids, compression=self._compression,
inlinelimit=self._inlinelimit)
def postings_reader(self, dbfile, terminfo, format_, term=None, scorer=None):
if terminfo.is_inlined():
# If the postings were inlined into the terminfo object, pull them
# out and use a ListMatcher to wrap them in a Matcher interface
ids, weights, values = terminfo.inlined_postings()
m = ListMatcher(ids, weights, values, format_, scorer=scorer,
term=term, terminfo=terminfo)
else:
offset, length = terminfo.extent()
m = W3LeafMatcher(dbfile, offset, length, format_, term=term,
scorer=scorer)
return m
# Readers
def per_document_reader(self, storage, segment):
return W3PerDocReader(storage, segment)
def terms_reader(self, storage, segment):
tiname = segment.make_filename(self.TERMS_EXT)
tilen = storage.file_length(tiname)
tifile = storage.open_file(tiname)
postfile = segment.open_file(storage, self.POSTS_EXT)
return W3TermsReader(self, tifile, tilen, postfile)
# Graph methods provided by CodecWithGraph
# Columns
def supports_columns(self):
return True
@classmethod
def column_filename(cls, segment, fieldname):
ext = "".join((".", fieldname, cls.COLUMN_EXT))
return segment.make_filename(ext)
# Segments and generations
def new_segment(self, storage, indexname):
return W3Segment(self, indexname)
# Common functions
def _vecfield(fieldname):
return "_%s_vec" % fieldname
def _lenfield(fieldname):
return "_%s_len" % fieldname
# Per-doc information writer
class W3PerDocWriter(base.PerDocWriterWithColumns):
def __init__(self, codec, storage, segment):
self._codec = codec
self._storage = storage
self._segment = segment
tempst = storage.temp_storage("%s.tmp" % segment.indexname)
self._cols = compound.CompoundWriter(tempst)
self._colwriters = {}
self._create_column("_stored", STORED_COLUMN)
self._fieldlengths = defaultdict(int)
self._doccount = 0
self._docnum = None
self._storedfields = None
self._indoc = False
self.is_closed = False
# We'll wait to create the vector file until someone actually tries
# to add a vector
self._vpostfile = None
def _create_file(self, ext):
return self._segment.create_file(self._storage, ext)
def _has_column(self, fieldname):
return fieldname in self._colwriters
def _create_column(self, fieldname, column):
writers = self._colwriters
if fieldname in writers:
raise Exception("Already added column %r" % fieldname)
f = self._cols.create_file(fieldname)
writers[fieldname] = column.writer(f)
def _get_column(self, fieldname):
return self._colwriters[fieldname]
def _prep_vectors(self):
self._vpostfile = self._create_file(W3Codec.VPOSTS_EXT)
# We'll use offset==0 as a marker for "no vectors", so we can't start
# postings at position 0, so just write a few header bytes :)
self._vpostfile.write(b("VPST"))
def start_doc(self, docnum):
if self._indoc:
raise Exception("Called start_doc when already in a doc")
if docnum != self._doccount:
raise Exception("Called start_doc(%r) was expecting %r"
% (docnum, self._doccount))
self._docnum = docnum
self._doccount += 1
self._storedfields = {}
self._indoc = True
def add_field(self, fieldname, fieldobj, value, length):
if value is not None:
self._storedfields[fieldname] = value
if length:
# Add byte to length column
lenfield = _lenfield(fieldname)
lb = length_to_byte(length)
self.add_column_value(lenfield, LENGTHS_COLUMN, lb)
# Add length to total field length
self._fieldlengths[fieldname] += length
def add_vector_items(self, fieldname, fieldobj, items):
if self._vpostfile is None:
self._prep_vectors()
# Write vector postings
vpostwriter = self._codec.postings_writer(self._vpostfile, byteids=True)
vpostwriter.start_postings(fieldobj.vector, W3TermInfo())
for text, weight, vbytes in items:
vpostwriter.add_posting(text, weight, vbytes)
# finish_postings() returns terminfo object
vinfo = vpostwriter.finish_postings()
# Add row to vector lookup column
vecfield = _vecfield(fieldname) # Compute vector column name
offset, length = vinfo.extent()
self.add_column_value(vecfield, VECTOR_COLUMN, offset)
self.add_column_value(vecfield + "L", VECTOR_LEN_COLUMN, length)
def finish_doc(self):
sf = self._storedfields
if sf:
self.add_column_value("_stored", STORED_COLUMN, sf)
sf.clear()
self._indoc = False
def _column_filename(self, fieldname):
return W3Codec.column_filename(self._segment, fieldname)
def close(self):
if self._indoc is not None:
# Called close without calling finish_doc
self.finish_doc()
self._segment._fieldlengths = self._fieldlengths
# Finish open columns and close the columns writer
for writer in self._colwriters.values():
writer.finish(self._doccount)
self._cols.save_as_files(self._storage, self._column_filename)
# If vectors were written, close the vector writers
if self._vpostfile:
self._vpostfile.close()
self.is_closed = True
class W3FieldWriter(base.FieldWriter):
def __init__(self, codec, storage, segment):
self._codec = codec
self._storage = storage
self._segment = segment
self._fieldname = None
self._fieldid = None
self._btext = None
self._fieldobj = None
self._format = None
_tifile = self._create_file(W3Codec.TERMS_EXT)
self._tindex = filetables.OrderedHashWriter(_tifile)
self._fieldmap = self._tindex.extras["fieldmap"] = {}
self._postfile = self._create_file(W3Codec.POSTS_EXT)
self._postwriter = None
self._infield = False
self.is_closed = False
def _create_file(self, ext):
return self._segment.create_file(self._storage, ext)
def start_field(self, fieldname, fieldobj):
fmap = self._fieldmap
if fieldname in fmap:
self._fieldid = fmap[fieldname]
else:
self._fieldid = len(fmap)
fmap[fieldname] = self._fieldid
self._fieldname = fieldname
self._fieldobj = fieldobj
self._format = fieldobj.format
self._infield = True
# Start a new postwriter for this field
self._postwriter = self._codec.postings_writer(self._postfile)
def start_term(self, btext):
if self._postwriter is None:
raise Exception("Called start_term before start_field")
self._btext = btext
self._postwriter.start_postings(self._fieldobj.format, W3TermInfo())
def add(self, docnum, weight, vbytes, length):
self._postwriter.add_posting(docnum, weight, vbytes, length)
def finish_term(self):
terminfo = self._postwriter.finish_postings()
# Add row to term info table
keybytes = pack_ushort(self._fieldid) + self._btext
valbytes = terminfo.to_bytes()
self._tindex.add(keybytes, valbytes)
# FieldWriterWithGraph.add_spell_word
def finish_field(self):
if not self._infield:
raise Exception("Called finish_field before start_field")
self._infield = False
self._postwriter = None
def close(self):
self._tindex.close()
self._postfile.close()
self.is_closed = True
# Reader objects
class W3PerDocReader(base.PerDocumentReader):
def __init__(self, storage, segment):
self._storage = storage
self._segment = segment
self._doccount = segment.doc_count_all()
self._vpostfile = None
self._colfiles = {}
self._readers = {}
self._minlengths = {}
self._maxlengths = {}
def close(self):
for colfile, _, _ in self._colfiles.values():
colfile.close()
if self._vpostfile:
self._vpostfile.close()
def doc_count(self):
return self._doccount - self._segment.deleted_count()
def doc_count_all(self):
return self._doccount
# Deletions
def has_deletions(self):
return self._segment.has_deletions()
def is_deleted(self, docnum):
return self._segment.is_deleted(docnum)
def deleted_docs(self):
return self._segment.deleted_docs()
# Columns
def has_column(self, fieldname):
filename = W3Codec.column_filename(self._segment, fieldname)
return self._storage.file_exists(filename)
def _get_column_file(self, fieldname):
filename = W3Codec.column_filename(self._segment, fieldname)
length = self._storage.file_length(filename)
colfile = self._storage.open_file(filename)
return colfile, 0, length
def column_reader(self, fieldname, column):
if fieldname not in self._colfiles:
self._colfiles[fieldname] = self._get_column_file(fieldname)
colfile, offset, length = self._colfiles[fieldname]
return column.reader(colfile, offset, length, self._doccount)
# Lengths
def _cached_reader(self, fieldname, column):
if fieldname in self._readers:
return self._readers[fieldname]
else:
if not self.has_column(fieldname):
return None
reader = self.column_reader(fieldname, column)
self._readers[fieldname] = reader
return reader
def doc_field_length(self, docnum, fieldname, default=0):
if docnum > self._doccount:
raise IndexError("Asked for docnum %r of %d"
% (docnum, self._doccount))
lenfield = _lenfield(fieldname)
reader = self._cached_reader(lenfield, LENGTHS_COLUMN)
if reader is None:
return default
lbyte = reader[docnum]
if lbyte:
return byte_to_length(lbyte)
def field_length(self, fieldname):
return self._segment._fieldlengths.get(fieldname, 0)
def _minmax_length(self, fieldname, op, cache):
if fieldname in cache:
return cache[fieldname]
lenfield = _lenfield(fieldname)
reader = self._cached_reader(lenfield, LENGTHS_COLUMN)
length = byte_to_length(op(reader))
cache[fieldname] = length
return length
def min_field_length(self, fieldname):
return self._minmax_length(fieldname, min, self._minlengths)
def max_field_length(self, fieldname):
return self._minmax_length(fieldname, max, self._maxlengths)
# Vectors
def _prep_vectors(self):
f = self._segment.open_file(self._storage, W3Codec.VPOSTS_EXT)
self._vpostfile = f
def _vector_extent(self, docnum, fieldname):
if docnum > self._doccount:
raise IndexError("Asked for document %r of %d"
% (docnum, self._doccount))
vecfield = _vecfield(fieldname) # Compute vector column name
# Get the offset from the vector offset column
offset = self._cached_reader(vecfield, VECTOR_COLUMN)[docnum]
# Get the length from the length column, if it exists, otherwise return
# -1 for the length (backwards compatibility with old dev versions)
lreader = self._cached_reader(vecfield + "L", VECTOR_COLUMN)
if lreader:
length = [docnum]
else:
length = -1
return offset, length
def has_vector(self, docnum, fieldname):
return (self.has_column(_vecfield(fieldname))
and self._vector_extent(docnum, fieldname))
def vector(self, docnum, fieldname, format_):
if self._vpostfile is None:
self._prep_vectors()
offset, length = self._vector_extent(docnum, fieldname)
m = W3LeafMatcher(self._vpostfile, offset, length, format_,
byteids=True)
return m
# Stored fields
def stored_fields(self, docnum):
reader = self._cached_reader("_stored", STORED_COLUMN)
v = reader[docnum]
if v is None:
v = {}
return v
class W3FieldCursor(base.FieldCursor):
def __init__(self, tindex, fieldname, keycoder, keydecoder, fieldobj):
self._tindex = tindex
self._fieldname = fieldname
self._keycoder = keycoder
self._keydecoder = keydecoder
self._fieldobj = fieldobj
prefixbytes = keycoder(fieldname, b'')
self._startpos = self._tindex.closest_key_pos(prefixbytes)
self._pos = self._startpos
self._text = None
self._datapos = None
self._datalen = None
self.next()
def first(self):
self._pos = self._startpos
return self.next()
def find(self, term):
if not isinstance(term, bytes_type):
term = self._fieldobj.to_bytes(term)
key = self._keycoder(self._fieldname, term)
self._pos = self._tindex.closest_key_pos(key)
return self.next()
def next(self):
if self._pos is not None:
keyrng = self._tindex.key_and_range_at(self._pos)
if keyrng is not None:
keybytes, datapos, datalen = keyrng
fname, text = self._keydecoder(keybytes)
if fname == self._fieldname:
self._pos = datapos + datalen
self._text = self._fieldobj.from_bytes(text)
self._datapos = datapos
self._datalen = datalen
return self._text
self._text = self._pos = self._datapos = self._datalen = None
return None
def text(self):
return self._text
def term_info(self):
if self._pos is None:
return None
databytes = self._tindex.dbfile.get(self._datapos, self._datalen)
return W3TermInfo.from_bytes(databytes)
def is_valid(self):
return self._pos is not None
class W3TermsReader(base.TermsReader):
def __init__(self, codec, dbfile, length, postfile):
self._codec = codec
self._dbfile = dbfile
self._tindex = filetables.OrderedHashReader(dbfile, length)
self._fieldmap = self._tindex.extras["fieldmap"]
self._postfile = postfile
self._fieldunmap = [None] * len(self._fieldmap)
for fieldname, num in iteritems(self._fieldmap):
self._fieldunmap[num] = fieldname
def _keycoder(self, fieldname, tbytes):
assert isinstance(tbytes, bytes_type), "tbytes=%r" % tbytes
fnum = self._fieldmap.get(fieldname, 65535)
return pack_ushort(fnum) + tbytes
def _keydecoder(self, keybytes):
fieldid = unpack_ushort(keybytes[:_SHORT_SIZE])[0]
return self._fieldunmap[fieldid], keybytes[_SHORT_SIZE:]
def _range_for_key(self, fieldname, tbytes):
return self._tindex.range_for_key(self._keycoder(fieldname, tbytes))
def __contains__(self, term):
return self._keycoder(*term) in self._tindex
def indexed_field_names(self):
return self._fieldmap.keys()
def cursor(self, fieldname, fieldobj):
tindex = self._tindex
coder = self._keycoder
decoder = self._keydecoder
return W3FieldCursor(tindex, fieldname, coder, decoder, fieldobj)
def terms(self):
keydecoder = self._keydecoder
return (keydecoder(keybytes) for keybytes in self._tindex.keys())
def terms_from(self, fieldname, prefix):
prefixbytes = self._keycoder(fieldname, prefix)
keydecoder = self._keydecoder
return (keydecoder(keybytes) for keybytes
in self._tindex.keys_from(prefixbytes))
def items(self):
tidecoder = W3TermInfo.from_bytes
keydecoder = self._keydecoder
return ((keydecoder(keybytes), tidecoder(valbytes))
for keybytes, valbytes in self._tindex.items())
def items_from(self, fieldname, prefix):
prefixbytes = self._keycoder(fieldname, prefix)
tidecoder = W3TermInfo.from_bytes
keydecoder = self._keydecoder
return ((keydecoder(keybytes), tidecoder(valbytes))
for keybytes, valbytes in self._tindex.items_from(prefixbytes))
def term_info(self, fieldname, tbytes):
key = self._keycoder(fieldname, tbytes)
try:
return W3TermInfo.from_bytes(self._tindex[key])
except KeyError:
raise TermNotFound("No term %s:%r" % (fieldname, tbytes))
def frequency(self, fieldname, tbytes):
datapos = self._range_for_key(fieldname, tbytes)[0]
return W3TermInfo.read_weight(self._dbfile, datapos)
def doc_frequency(self, fieldname, tbytes):
datapos = self._range_for_key(fieldname, tbytes)[0]
return W3TermInfo.read_doc_freq(self._dbfile, datapos)
def matcher(self, fieldname, tbytes, format_, scorer=None):
terminfo = self.term_info(fieldname, tbytes)
m = self._codec.postings_reader(self._postfile, terminfo, format_,
term=(fieldname, tbytes), scorer=scorer)
return m
def close(self):
self._tindex.close()
self._postfile.close()
# Postings
class W3PostingsWriter(base.PostingsWriter):
"""This object writes posting lists to the postings file. It groups postings
into blocks and tracks block level statistics to makes it easier to skip
through the postings.
"""
def __init__(self, postfile, blocklimit, byteids=False, compression=3,
inlinelimit=1):
self._postfile = postfile
self._blocklimit = blocklimit
self._byteids = byteids
self._compression = compression
self._inlinelimit = inlinelimit
self._blockcount = 0
self._format = None
self._terminfo = None
def written(self):
return self._blockcount > 0
def start_postings(self, format_, terminfo):
# Start a new term
if self._terminfo:
# If self._terminfo is not None, that means we are already in a term
raise Exception("Called start in a term")
assert isinstance(format_, formats.Format)
self._format = format_
# Reset block count
self._blockcount = 0
# Reset block bufferg
self._new_block()
# Remember terminfo object passed to us
self._terminfo = terminfo
# Remember where we started in the posting file
self._startoffset = self._postfile.tell()
def add_posting(self, id_, weight, vbytes, length=None):
# Add a posting to the buffered block
# If the number of buffered postings == the block limit, write out the
# buffered block and reset before adding this one
if len(self._ids) >= self._blocklimit:
self._write_block()
# Check types
if self._byteids:
assert isinstance(id_, string_type), "id_=%r" % id_
else:
assert isinstance(id_, integer_types), "id_=%r" % id_
assert isinstance(weight, (int, float)), "weight=%r" % weight
assert isinstance(vbytes, bytes_type), "vbytes=%r" % vbytes
assert length is None or isinstance(length, integer_types)
self._ids.append(id_)
self._weights.append(weight)
if weight > self._maxweight:
self._maxweight = weight
if vbytes:
self._values.append(vbytes)
if length:
minlength = self._minlength
if minlength is None or length < minlength:
self._minlength = length
if length > self._maxlength:
self._maxlength = length
def finish_postings(self):
terminfo = self._terminfo
# If we have fewer than "inlinelimit" postings in this posting list,
# "inline" the postings into the terminfo instead of writing them to
# the posting file
if not self.written() and len(self) < self._inlinelimit:
terminfo.add_block(self)
terminfo.set_inline(self._ids, self._weights, self._values)
else:
# If there are leftover items in the current block, write them out
if self._ids:
self._write_block(last=True)
startoffset = self._startoffset
length = self._postfile.tell() - startoffset
terminfo.set_extent(startoffset, length)
# Clear self._terminfo to indicate we're between terms
self._terminfo = None
# Return the current terminfo object
return terminfo
def _new_block(self):
# Reset block buffer
# List of IDs (docnums for regular posting list, terms for vector PL)
self._ids = [] if self._byteids else array("I")
# List of weights
self._weights = array("f")
# List of encoded payloads
self._values = []
# Statistics
self._minlength = None
self._maxlength = 0
self._maxweight = 0
def _write_block(self, last=False):
# Write the buffered block to the postings file
# If this is the first block, write a small header first
if not self._blockcount:
self._postfile.write(WHOOSH3_HEADER_MAGIC)
# Add this block's statistics to the terminfo object, which tracks the
# overall statistics for all term postings
self._terminfo.add_block(self)
# Minify the IDs, weights, and values, and put them in a tuple
data = (self._mini_ids(), self._mini_weights(), self._mini_values())
# Pickle the tuple
databytes = dumps(data)
# If the pickle is less than 20 bytes, don't bother compressing
if len(databytes) < 20:
comp = 0
# Compress the pickle (if self._compression > 0)
comp = self._compression
if comp:
databytes = zlib.compress(databytes, comp)
# Make a tuple of block info. The posting reader can check this info
# and decide whether to skip the block without having to decompress the
# full block data
#
# - Number of postings in block
# - Last ID in block
# - Maximum weight in block
# - Compression level
# - Minimum length byte
# - Maximum length byte
ids = self._ids
infobytes = dumps((len(ids), ids[-1], self._maxweight, comp,
length_to_byte(self._minlength),
length_to_byte(self._maxlength),
))
# Write block length
postfile = self._postfile
blocklength = len(infobytes) + len(databytes)
if last:
# If this is the last block, use a negative number
blocklength *= -1
postfile.write_int(blocklength)
# Write block info
postfile.write(infobytes)
# Write block data
postfile.write(databytes)
self._blockcount += 1
# Reset block buffer
self._new_block()
# Methods to reduce the byte size of the various lists
def _mini_ids(self):
# Minify IDs
ids = self._ids
if not self._byteids:
ids = delta_encode(ids)
return tuple(ids)
def _mini_weights(self):
# Minify weights
weights = self._weights
if all(w == 1.0 for w in weights):
return None
elif all(w == weights[0] for w in weights):
return weights[0]
else:
return tuple(weights)
def _mini_values(self):
# Minify values
fixedsize = self._format.fixed_value_size()
values = self._values
if fixedsize is None or fixedsize < 0:
vs = tuple(values)
elif fixedsize == 0:
vs = None
else:
vs = emptybytes.join(values)
return vs
# Block stats methods
def __len__(self):
# Returns the number of unwritten buffered postings
return len(self._ids)
def min_id(self):
# First ID in the buffered block
return self._ids[0]
def max_id(self):
# Last ID in the buffered block
return self._ids[-1]
def min_length(self):
# Shortest field length in the buffered block
return self._minlength
def max_length(self):
# Longest field length in the buffered block
return self._maxlength
def max_weight(self):
# Highest weight in the buffered block
return self._maxweight
class W3LeafMatcher(LeafMatcher):
"""Reads on-disk postings from the postings file and presents the
:class:`whoosh.matching.Matcher` interface.
"""
def __init__(self, postfile, startoffset, length, format_, term=None,
byteids=None, scorer=None):
self._postfile = postfile
self._startoffset = startoffset
self._length = length
self.format = format_
self._term = term
self._byteids = byteids
self.scorer = scorer
self._fixedsize = self.format.fixed_value_size()
# Read the header tag at the start of the postings
self._read_header()
# "Reset" to read the first block
self.reset()
def _read_header(self):
# Seek to the start of the postings and check the header tag
postfile = self._postfile
postfile.seek(self._startoffset)
magic = postfile.read(4)
if magic != WHOOSH3_HEADER_MAGIC:
raise Exception("Block tag error %r" % magic)
# Remember the base offset (start of postings, after the header)
self._baseoffset = postfile.tell()
def reset(self):
# Reset block stats
self._blocklength = None
self._maxid = None
self._maxweight = None
self._compression = None
self._minlength = None
self._maxlength = None
self._lastblock = False
self._atend = False
# Consume first block
self._goto(self._baseoffset)
def _goto(self, position):
# Read the posting block at the given position
postfile = self._postfile
# Reset block data -- we'll lazy load the data from the new block as
# needed
self._data = None
self._ids = None
self._weights = None
self._values = None
# Reset pointer into the block
self._i = 0
# Seek to the start of the block
postfile.seek(position)
# Read the block length
length = postfile.read_int()
# If the block length is negative, that means this is the last block
if length < 0:
self._lastblock = True
length *= -1
# Remember the offset of the next block
self._nextoffset = position + _INT_SIZE + length
# Read the pickled block info tuple
info = postfile.read_pickle()
# Remember the offset of the block's data
self._dataoffset = postfile.tell()
# Decompose the info tuple to set the current block info
(self._blocklength, self._maxid, self._maxweight, self._compression,
mnlen, mxlen) = info
self._minlength = byte_to_length(mnlen)
self._maxlength = byte_to_length(mxlen)
def _next_block(self):
if self._atend:
# We were already at the end, and yet somebody called _next_block()
# again, so something is wrong somewhere
raise Exception("No next block")
elif self._lastblock:
# Reached the end of the postings
self._atend = True
else:
# Go to the next block
self._goto(self._nextoffset)
def _skip_to_block(self, skipwhile):
# Skip blocks as long as the skipwhile() function returns True
skipped = 0
while self.is_active() and skipwhile():
self._next_block()
skipped += 1
return skipped
def is_active(self):
return not self._atend and self._i < self._blocklength
def id(self):
# Get the current ID (docnum for regular postings, term for vector)
# If we haven't loaded the block IDs yet, load them now
if self._ids is None:
self._read_ids()
return self._ids[self._i]
def weight(self):
# Get the weight for the current posting
# If we haven't loaded the block weights yet, load them now
if self._weights is None:
self._read_weights()
return self._weights[self._i]
def value(self):
# Get the value for the current posting
# If we haven't loaded the block values yet, load them now
if self._values is None:
self._read_values()
return self._values[self._i]
def next(self):
# Move to the next posting
# Increment the in-block pointer
self._i += 1
# If we reached the end of the block, move to the next block
if self._i == self._blocklength:
self._next_block()
return True
else:
return False
def skip_to(self, targetid):
# Skip to the next ID equal to or greater than the given target ID
if not self.is_active():
raise ReadTooFar
# If we're already at or past target ID, do nothing
if targetid <= self.id():
return
# Skip to the block that would contain the target ID
block_max_id = self.block_max_id
if targetid > block_max_id():
self._skip_to_block(lambda: targetid > block_max_id())
# Iterate through the IDs in the block until we find or pass the
# target
while self.is_active() and self.id() < targetid:
self.next()
def skip_to_quality(self, minquality):
# Skip blocks until we find one that might exceed the given minimum
# quality
block_quality = self.block_quality
# If the quality of this block is already higher than the minimum,
# do nothing
if block_quality() > minquality:
return 0
# Skip blocks as long as the block quality is not greater than the
# minimum
return self._skip_to_block(lambda: block_quality() <= minquality)
def block_min_id(self):
if self._ids is None:
self._read_ids()
return self._ids[0]
def block_max_id(self):
return self._maxid
def block_min_length(self):
return self._minlength
def block_max_length(self):
return self._maxlength
def block_max_weight(self):
return self._maxweight
def _read_data(self):
# Load block data tuple from disk
datalen = self._nextoffset - self._dataoffset
b = self._postfile.get(self._dataoffset, datalen)
# Decompress the pickled data if necessary
if self._compression:
b = zlib.decompress(b)
# Unpickle the data tuple and save it in an attribute
self._data = loads(b)
def _read_ids(self):
# If we haven't loaded the data from disk yet, load it now
if self._data is None:
self._read_data()
ids = self._data[0]
# De-minify the IDs
if not self._byteids:
ids = tuple(delta_decode(ids))
self._ids = ids
def _read_weights(self):
# If we haven't loaded the data from disk yet, load it now
if self._data is None:
self._read_data()
weights = self._data[1]
# De-minify the weights
postcount = self._blocklength
if weights is None:
self._weights = array("f", (1.0 for _ in xrange(postcount)))
elif isinstance(weights, float):
self._weights = array("f", (weights for _ in xrange(postcount)))
else:
self._weights = weights
def _read_values(self):
# If we haven't loaded the data from disk yet, load it now
if self._data is None:
self._read_data()
# De-minify the values
fixedsize = self._fixedsize
vs = self._data[2]
if fixedsize is None or fixedsize < 0:
self._values = vs
elif fixedsize is 0:
self._values = (None,) * self._blocklength
else:
assert isinstance(vs, bytes_type)
self._values = tuple(vs[i:i + fixedsize]
for i in xrange(0, len(vs), fixedsize))
# Term info implementation
class W3TermInfo(TermInfo):
# B | Flags
# f | Total weight
# I | Total doc freq
# B | Min length (encoded as byte)
# B | Max length (encoded as byte)
# f | Max weight
# I | Minimum (first) ID
# I | Maximum (last) ID
_struct = struct.Struct("!BfIBBfII")
def __init__(self, *args, **kwargs):
TermInfo.__init__(self, *args, **kwargs)
self._offset = None
self._length = None
self._inlined = None
def add_block(self, block):
self._weight += sum(block._weights)
self._df += len(block)
ml = block.min_length()
if self._minlength is None:
self._minlength = ml
else:
self._minlength = min(self._minlength, ml)
self._maxlength = max(self._maxlength, block.max_length())
self._maxweight = max(self._maxweight, block.max_weight())
if self._minid is None:
self._minid = block.min_id()
self._maxid = block.max_id()
def set_extent(self, offset, length):
self._offset = offset
self._length = length
def extent(self):
return self._offset, self._length
def set_inlined(self, ids, weights, values):
self._inlined = (tuple(ids), tuple(weights), tuple(values))
def is_inlined(self):
return self._inlined is not None
def inlined_postings(self):
return self._inlined
def to_bytes(self):
isinlined = self.is_inlined()
# Encode the lengths as 0-255 values
minlength = (0 if self._minlength is None
else length_to_byte(self._minlength))
maxlength = length_to_byte(self._maxlength)
# Convert None values to the out-of-band NO_ID constant so they can be
# stored as unsigned ints
minid = 0xffffffff if self._minid is None else self._minid
maxid = 0xffffffff if self._maxid is None else self._maxid
# Pack the term info into bytes
st = self._struct.pack(isinlined, self._weight, self._df,
minlength, maxlength, self._maxweight,
minid, maxid)
if isinlined:
# Postings are inlined - dump them using the pickle protocol
postbytes = dumps(self._inlined, -1)
else:
postbytes = pack_long(self._offset) + pack_int(self._length)
st += postbytes
return st
@classmethod
def from_bytes(cls, s):
st = cls._struct
vals = st.unpack(s[:st.size])
terminfo = cls()
flags = vals[0]
terminfo._weight = vals[1]
terminfo._df = vals[2]
terminfo._minlength = byte_to_length(vals[3])
terminfo._maxlength = byte_to_length(vals[4])
terminfo._maxweight = vals[5]
terminfo._minid = None if vals[6] == 0xffffffff else vals[6]
terminfo._maxid = None if vals[7] == 0xffffffff else vals[7]
if flags:
# Postings are stored inline
terminfo._inlined = loads(s[st.size:])
else:
# Last bytes are pointer into posting file and length
offpos = st.size
lenpos = st.size + _LONG_SIZE
terminfo._offset = unpack_long(s[offpos:lenpos])[0]
terminfo._length = unpack_int(s[lenpos:lenpos + _INT_SIZE])
return terminfo
@classmethod
def read_weight(cls, dbfile, datapos):
return dbfile.get_float(datapos + 1)
@classmethod
def read_doc_freq(cls, dbfile, datapos):
return dbfile.get_uint(datapos + 1 + _FLOAT_SIZE)
@classmethod
def read_min_and_max_length(cls, dbfile, datapos):
lenpos = datapos + 1 + _FLOAT_SIZE + _INT_SIZE
ml = byte_to_length(dbfile.get_byte(lenpos))
xl = byte_to_length(dbfile.get_byte(lenpos + 1))
return ml, xl
@classmethod
def read_max_weight(cls, dbfile, datapos):
weightspos = datapos + 1 + _FLOAT_SIZE + _INT_SIZE + 2
return dbfile.get_float(weightspos)
# Segment implementation
class W3Segment(base.Segment):
def __init__(self, codec, indexname, doccount=0, segid=None, deleted=None):
self.indexname = indexname
self.segid = self._random_id() if segid is None else segid
self._codec = codec
self._doccount = doccount
self._deleted = deleted
self.compound = False
def codec(self, **kwargs):
return self._codec
def set_doc_count(self, dc):
self._doccount = dc
def doc_count_all(self):
return self._doccount
def deleted_count(self):
if self._deleted is None:
return 0
return len(self._deleted)
def deleted_docs(self):
if self._deleted is None:
return ()
else:
return iter(self._deleted)
def delete_document(self, docnum, delete=True):
if delete:
if self._deleted is None:
self._deleted = set()
self._deleted.add(docnum)
elif self._deleted is not None and docnum in self._deleted:
self._deleted.clear(docnum)
def is_deleted(self, docnum):
if self._deleted is None:
return False
return docnum in self._deleted
| mit |
beernarrd/gramps | gramps/gen/utils/docgen/tabbeddoc.py | 10 | 1567 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2003 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class TabbedDoc:
def __init__(self, columns):
self.columns = columns
self.name = ""
def creator(self, name):
self.name = name
def open(self,filename):
pass
def close(self):
pass
def start_page(self):
pass
def end_page(self):
pass
def start_paragraph(self):
pass
def end_paragraph(self):
pass
def start_table(self):
pass
def end_table(self):
pass
def start_row(self):
pass
def end_row(self):
pass
def write_cell(self, text):
pass
| gpl-2.0 |
odoobgorg/odoo | addons/website_hr_recruitment/controllers/main.py | 43 | 4069 | # -*- coding: utf-8 -*-
import base64
from openerp import SUPERUSER_ID
from openerp import http
from openerp.tools.translate import _
from openerp.http import request
from openerp.addons.website.models.website import slug
class website_hr_recruitment(http.Controller):
@http.route([
'/jobs',
'/jobs/country/<model("res.country"):country>',
'/jobs/department/<model("hr.department"):department>',
'/jobs/country/<model("res.country"):country>/department/<model("hr.department"):department>',
'/jobs/office/<int:office_id>',
'/jobs/country/<model("res.country"):country>/office/<int:office_id>',
'/jobs/department/<model("hr.department"):department>/office/<int:office_id>',
'/jobs/country/<model("res.country"):country>/department/<model("hr.department"):department>/office/<int:office_id>',
], type='http', auth="public", website=True)
def jobs(self, country=None, department=None, office_id=None, **kwargs):
env = request.env(context=dict(request.env.context, show_address=True, no_tag_br=True))
Country = env['res.country']
Jobs = env['hr.job']
# List jobs available to current UID
job_ids = Jobs.search([], order="website_published desc,no_of_recruitment desc").ids
# Browse jobs as superuser, because address is restricted
jobs = Jobs.sudo().browse(job_ids)
# Deduce departments and offices of those jobs
departments = set(j.department_id for j in jobs if j.department_id)
offices = set(j.address_id for j in jobs if j.address_id)
countries = set(o.country_id for o in offices if o.country_id)
# Default search by user country
if not (country or department or office_id or kwargs.get('all_countries')):
country_code = request.session['geoip'].get('country_code')
if country_code:
countries_ = Country.search([('code', '=', country_code)])
country = countries_[0] if countries_ else None
if not any(j for j in jobs if j.address_id and j.address_id.country_id == country):
country = False
# Filter the matching one
if country and not kwargs.get('all_countries'):
jobs = (j for j in jobs if j.address_id is None or j.address_id.country_id and j.address_id.country_id.id == country.id)
if department:
jobs = (j for j in jobs if j.department_id and j.department_id.id == department.id)
if office_id:
jobs = (j for j in jobs if j.address_id and j.address_id.id == office_id)
# Render page
return request.website.render("website_hr_recruitment.index", {
'jobs': jobs,
'countries': countries,
'departments': departments,
'offices': offices,
'country_id': country,
'department_id': department,
'office_id': office_id,
})
@http.route('/jobs/add', type='http', auth="user", website=True)
def jobs_add(self, **kwargs):
job = request.env['hr.job'].create({
'name': _('New Job Offer'),
})
return request.redirect("/jobs/detail/%s?enable_editor=1" % slug(job))
@http.route('/jobs/detail/<model("hr.job"):job>', type='http', auth="public", website=True)
def jobs_detail(self, job, **kwargs):
return request.render("website_hr_recruitment.detail", {
'job': job,
'main_object': job,
})
@http.route('/jobs/apply/<model("hr.job"):job>', type='http', auth="public", website=True)
def jobs_apply(self, job, **kwargs):
error = {}
default = {}
if 'website_hr_recruitment_error' in request.session:
error = request.session.pop('website_hr_recruitment_error')
default = request.session.pop('website_hr_recruitment_default')
return request.render("website_hr_recruitment.apply", {
'job': job,
'error': error,
'default': default,
})
| gpl-3.0 |
rgerkin/neuroConstruct | lib/jython/Lib/site-packages/xlrd/xlsx.py | 17 | 31809 | ##
# Portions copyright (c) 2008-2012 Stephen John Machin, Lingfo Pty Ltd
# This module is part of the xlrd package, which is released under a BSD-style licence.
##
from __future__ import print_function, unicode_literals
DEBUG = 0
import sys
import re
from .timemachine import *
from .book import Book, Name
from .biffh import error_text_from_code, XLRDError, XL_CELL_BLANK, XL_CELL_TEXT, XL_CELL_BOOLEAN, XL_CELL_ERROR
from .formatting import is_date_format_string, Format, XF
from .sheet import Sheet
DLF = sys.stdout # Default Log File
ET = None
ET_has_iterparse = False
def ensure_elementtree_imported(verbosity, logfile):
global ET, ET_has_iterparse
if ET is not None:
return
if "IronPython" in sys.version:
import xml.etree.ElementTree as ET
#### 2.7.2.1: fails later with
#### NotImplementedError: iterparse is not supported on IronPython. (CP #31923)
else:
try: import xml.etree.cElementTree as ET
except ImportError:
try: import cElementTree as ET
except ImportError:
try: import lxml.etree as ET
except ImportError:
try: import xml.etree.ElementTree as ET
except ImportError:
try: import elementtree.ElementTree as ET
except ImportError:
raise Exception("Failed to import an ElementTree implementation")
if hasattr(ET, 'iterparse'):
_dummy_stream = BYTES_IO(b'')
try:
ET.iterparse(_dummy_stream)
ET_has_iterparse = True
except NotImplementedError:
pass
if verbosity:
etree_version = repr([
(item, getattr(ET, item))
for item in ET.__dict__.keys()
if item.lower().replace('_', '') == 'version'
])
print(ET.__file__, ET.__name__, etree_version, ET_has_iterparse, file=logfile)
def split_tag(tag):
pos = tag.rfind('}') + 1
if pos >= 2:
return tag[:pos], tag[pos:]
return '', tag
def augment_keys(adict, uri):
# uri must already be enclosed in {}
for x in list(adict.keys()):
adict[uri + x] = adict[x]
_UPPERCASE_1_REL_INDEX = {} # Used in fast conversion of column names (e.g. "XFD") to indices (16383)
for _x in xrange(26):
_UPPERCASE_1_REL_INDEX["ABCDEFGHIJKLMNOPQRSTUVWXYZ"[_x]] = _x + 1
for _x in "123456789":
_UPPERCASE_1_REL_INDEX[_x] = 0
del _x
def cell_name_to_rowx_colx(cell_name, letter_value=_UPPERCASE_1_REL_INDEX):
# Extract column index from cell name
# A<row number> => 0, Z =>25, AA => 26, XFD => 16383
colx = 0
charx = -1
try:
for c in cell_name:
charx += 1
lv = letter_value[c]
if lv:
colx = colx * 26 + lv
else: # start of row number; can't be '0'
colx = colx - 1
assert 0 <= colx < X12_MAX_COLS
break
except KeyError:
raise Exception('Unexpected character %r in cell name %r' % (c, cell_name))
rowx = int(cell_name[charx:]) - 1
return rowx, colx
error_code_from_text = {}
for _code, _text in error_text_from_code.items():
error_code_from_text[_text] = _code
# === X12 === Excel 2007 .xlsx ===============================================
U_SSML12 = "{http://schemas.openxmlformats.org/spreadsheetml/2006/main}"
U_ODREL = "{http://schemas.openxmlformats.org/officeDocument/2006/relationships}"
U_PKGREL = "{http://schemas.openxmlformats.org/package/2006/relationships}"
U_CP = "{http://schemas.openxmlformats.org/package/2006/metadata/core-properties}"
U_DC = "{http://purl.org/dc/elements/1.1/}"
U_DCTERMS = "{http://purl.org/dc/terms/}"
XML_SPACE_ATTR = "{http://www.w3.org/XML/1998/namespace}space"
XML_WHITESPACE = "\t\n \r"
X12_MAX_ROWS = 2 ** 20
X12_MAX_COLS = 2 ** 14
V_TAG = U_SSML12 + 'v' # cell child: value
F_TAG = U_SSML12 + 'f' # cell child: formula
IS_TAG = U_SSML12 + 'is' # cell child: inline string
def unescape(s,
subber=re.compile(r'_x[0-9A-Fa-f]{4,4}_', re.UNICODE).sub,
repl=lambda mobj: unichr(int(mobj.group(0)[2:6], 16)),
):
if "_" in s:
return subber(repl, s)
return s
def cooked_text(self, elem):
t = elem.text
if t is None:
return ''
if elem.get(XML_SPACE_ATTR) != 'preserve':
t = t.strip(XML_WHITESPACE)
return ensure_unicode(unescape(t))
def get_text_from_si_or_is(self, elem, r_tag=U_SSML12+'r', t_tag=U_SSML12 +'t'):
"Returns unescaped unicode"
accum = []
for child in elem:
# self.dump_elem(child)
tag = child.tag
if tag == t_tag:
t = cooked_text(self, child)
if t: # note: .text attribute can be None
accum.append(t)
elif tag == r_tag:
for tnode in child:
if tnode.tag == t_tag:
t = cooked_text(self, tnode)
if t:
accum.append(t)
return ''.join(accum)
def map_attributes(amap, elem, obj):
for xml_attr, obj_attr, cnv_func_or_const in amap:
if not xml_attr:
setattr(obj, obj_attr, cnv_func_or_const)
continue
if not obj_attr: continue #### FIX ME ####
raw_value = elem.get(xml_attr)
cooked_value = cnv_func_or_const(raw_value)
setattr(obj, obj_attr, cooked_value)
def cnv_ST_Xstring(s):
if s is None: return ""
return ensure_unicode(s)
def cnv_xsd_unsignedInt(s):
if not s:
return None
value = int(s)
assert value >= 0
return value
def cnv_xsd_boolean(s):
if not s:
return 0
if s in ("1", "true", "on"):
return 1
if s in ("0", "false", "off"):
return 0
raise ValueError("unexpected xsd:boolean value: %r" % s)
_defined_name_attribute_map = (
("name", "name", cnv_ST_Xstring, ),
("comment", "", cnv_ST_Xstring, ),
("customMenu", "", cnv_ST_Xstring, ),
("description", "", cnv_ST_Xstring, ),
("help", "", cnv_ST_Xstring, ),
("statusBar", "", cnv_ST_Xstring, ),
("localSheetId", "scope", cnv_xsd_unsignedInt, ),
("hidden", "hidden", cnv_xsd_boolean, ),
("function", "func", cnv_xsd_boolean, ),
("vbProcedure", "vbasic", cnv_xsd_boolean, ),
("xlm", "macro", cnv_xsd_boolean, ),
("functionGroupId", "funcgroup", cnv_xsd_unsignedInt, ),
("shortcutKey", "", cnv_ST_Xstring, ),
("publishToServer", "", cnv_xsd_boolean, ),
("workbookParameter", "", cnv_xsd_boolean, ),
("", "any_err", 0, ),
("", "any_external", 0, ),
("", "any_rel", 0, ),
("", "basic_formula_len", 0, ),
("", "binary", 0, ),
("", "builtin", 0, ),
("", "complex", 0, ),
("", "evaluated", 0, ),
("", "excel_sheet_index", 0, ),
("", "excel_sheet_num", 0, ),
("", "option_flags", 0, ),
("", "result", None, ),
("", "stack", None, ),
)
def make_name_access_maps(bk):
name_and_scope_map = {} # (name.lower(), scope): Name_object
name_map = {} # name.lower() : list of Name_objects (sorted in scope order)
num_names = len(bk.name_obj_list)
for namex in xrange(num_names):
nobj = bk.name_obj_list[namex]
name_lcase = nobj.name.lower()
key = (name_lcase, nobj.scope)
if key in name_and_scope_map:
msg = 'Duplicate entry %r in name_and_scope_map' % (key, )
if 0:
raise XLRDError(msg)
else:
if bk.verbosity:
print(msg, file=bk.logfile)
name_and_scope_map[key] = nobj
if name_lcase in name_map:
name_map[name_lcase].append((nobj.scope, nobj))
else:
name_map[name_lcase] = [(nobj.scope, nobj)]
for key in name_map.keys():
alist = name_map[key]
alist.sort()
name_map[key] = [x[1] for x in alist]
bk.name_and_scope_map = name_and_scope_map
bk.name_map = name_map
class X12General(object):
def process_stream(self, stream, heading=None):
if self.verbosity >= 2 and heading is not None:
fprintf(self.logfile, "\n=== %s ===\n", heading)
self.tree = ET.parse(stream)
getmethod = self.tag2meth.get
for elem in self.tree.getiterator():
if self.verbosity >= 3:
self.dump_elem(elem)
meth = getmethod(elem.tag)
if meth:
meth(self, elem)
self.finish_off()
def finish_off(self):
pass
def dump_elem(self, elem):
fprintf(self.logfile,
"===\ntag=%r len=%d attrib=%r text=%r tail=%r\n",
split_tag(elem.tag)[1], len(elem), elem.attrib, elem.text, elem.tail)
def dumpout(self, fmt, *vargs):
text = (12 * ' ' + fmt + '\n') % vargs
self.logfile.write(text)
class X12Book(X12General):
def __init__(self, bk, logfile=DLF, verbosity=False):
self.bk = bk
self.logfile = logfile
self.verbosity = verbosity
self.bk.nsheets = 0
self.bk.props = {}
self.relid2path = {}
self.relid2reltype = {}
self.sheet_targets = [] # indexed by sheetx
self.sheetIds = [] # indexed by sheetx
core_props_menu = {
U_CP+"lastModifiedBy": ("last_modified_by", cnv_ST_Xstring),
U_DC+"creator": ("creator", cnv_ST_Xstring),
U_DCTERMS+"modified": ("modified", cnv_ST_Xstring),
U_DCTERMS+"created": ("created", cnv_ST_Xstring),
}
def process_coreprops(self, stream):
if self.verbosity >= 2:
fprintf(self.logfile, "\n=== coreProps ===\n")
self.tree = ET.parse(stream)
getmenu = self.core_props_menu.get
props = {}
for elem in self.tree.getiterator():
if self.verbosity >= 3:
self.dump_elem(elem)
menu = getmenu(elem.tag)
if menu:
attr, func = menu
value = func(elem.text)
props[attr] = value
self.bk.user_name = props.get('last_modified_by') or props.get('creator')
self.bk.props = props
if self.verbosity >= 2:
fprintf(self.logfile, "props: %r\n", props)
self.finish_off()
def process_rels(self, stream):
if self.verbosity >= 2:
fprintf(self.logfile, "\n=== Relationships ===\n")
tree = ET.parse(stream)
r_tag = U_PKGREL + 'Relationship'
for elem in tree.findall(r_tag):
rid = elem.get('Id')
target = elem.get('Target')
reltype = elem.get('Type').split('/')[-1]
if self.verbosity >= 2:
self.dumpout('Id=%r Type=%r Target=%r', rid, reltype, target)
self.relid2reltype[rid] = reltype
# self.relid2path[rid] = 'xl/' + target
if target.startswith('/'):
self.relid2path[rid] = target[1:] # drop the /
else:
self.relid2path[rid] = 'xl/' + target
def do_defined_name(self, elem):
#### UNDER CONSTRUCTION ####
if 0 and self.verbosity >= 3:
self.dump_elem(elem)
nobj = Name()
bk = self.bk
nobj.bk = bk
nobj.name_index = len(bk.name_obj_list)
bk.name_obj_list.append(nobj)
nobj.name = elem.get('name')
nobj.raw_formula = None # compiled bytecode formula -- not in XLSX
nobj.formula_text = cooked_text(self, elem)
map_attributes(_defined_name_attribute_map, elem, nobj)
if nobj.scope is None:
nobj.scope = -1 # global
if nobj.name.startswith("_xlnm."):
nobj.builtin = 1
if self.verbosity >= 2:
nobj.dump(header='=== Name object ===')
def do_defined_names(self, elem):
for child in elem:
self.do_defined_name(child)
make_name_access_maps(self.bk)
def do_sheet(self, elem):
bk = self.bk
sheetx = bk.nsheets
# print elem.attrib
rid = elem.get(U_ODREL + 'id')
sheetId = int(elem.get('sheetId'))
name = unescape(ensure_unicode(elem.get('name')))
reltype = self.relid2reltype[rid]
target = self.relid2path[rid]
if self.verbosity >= 2:
self.dumpout(
'sheetx=%d sheetId=%r rid=%r type=%r name=%r',
sheetx, sheetId, rid, reltype, name)
if reltype != 'worksheet':
if self.verbosity >= 2:
self.dumpout('Ignoring sheet of type %r (name=%r)', reltype, name)
return
state = elem.get('state')
visibility_map = {
None: 0,
'visible': 0,
'hidden': 1,
'veryHidden': 2
}
bk._sheet_visibility.append(visibility_map[state])
sheet = Sheet(bk, position=None, name=name, number=sheetx)
sheet.utter_max_rows = X12_MAX_ROWS
sheet.utter_max_cols = X12_MAX_COLS
bk._sheet_list.append(sheet)
bk._sheet_names.append(name)
bk.nsheets += 1
self.sheet_targets.append(target)
self.sheetIds.append(sheetId)
def do_workbookpr(self, elem):
datemode = cnv_xsd_boolean(elem.get('date1904'))
if self.verbosity >= 2:
self.dumpout('datemode=%r', datemode)
self.bk.datemode = datemode
tag2meth = {
'definedNames': do_defined_names,
'workbookPr': do_workbookpr,
'sheet': do_sheet,
}
augment_keys(tag2meth, U_SSML12)
class X12SST(X12General):
def __init__(self, bk, logfile=DLF, verbosity=0):
self.bk = bk
self.logfile = logfile
self.verbosity = verbosity
if ET_has_iterparse:
self.process_stream = self.process_stream_iterparse
else:
self.process_stream = self.process_stream_findall
def process_stream_iterparse(self, stream, heading=None):
if self.verbosity >= 2 and heading is not None:
fprintf(self.logfile, "\n=== %s ===\n", heading)
si_tag = U_SSML12 + 'si'
elemno = -1
sst = self.bk._sharedstrings
for event, elem in ET.iterparse(stream):
if elem.tag != si_tag: continue
elemno = elemno + 1
if self.verbosity >= 3:
fprintf(self.logfile, "element #%d\n", elemno)
self.dump_elem(elem)
result = get_text_from_si_or_is(self, elem)
sst.append(result)
elem.clear() # destroy all child elements
if self.verbosity >= 2:
self.dumpout('Entries in SST: %d', len(sst))
if self.verbosity >= 3:
for x, s in enumerate(sst):
fprintf(self.logfile, "SST x=%d s=%r\n", x, s)
def process_stream_findall(self, stream, heading=None):
if self.verbosity >= 2 and heading is not None:
fprintf(self.logfile, "\n=== %s ===\n", heading)
self.tree = ET.parse(stream)
si_tag = U_SSML12 + 'si'
elemno = -1
sst = self.bk._sharedstrings
for elem in self.tree.findall(si_tag):
elemno = elemno + 1
if self.verbosity >= 3:
fprintf(self.logfile, "element #%d\n", elemno)
self.dump_elem(elem)
result = get_text_from_si_or_is(self, elem)
sst.append(result)
if self.verbosity >= 2:
self.dumpout('Entries in SST: %d', len(sst))
class X12Styles(X12General):
def __init__(self, bk, logfile=DLF, verbosity=0):
self.bk = bk
self.logfile = logfile
self.verbosity = verbosity
self.xf_counts = [0, 0]
self.xf_type = None
self.fmt_is_date = {}
for x in list(range(14, 23)) + list(range(45, 48)): #### hard-coding FIX ME ####
self.fmt_is_date[x] = 1
# dummy entry for XF 0 in case no Styles section
self.bk._xf_index_to_xl_type_map[0] = 2
# fill_in_standard_formats(bk) #### pre-integration kludge
def do_cellstylexfs(self, elem):
self.xf_type = 0
def do_cellxfs(self, elem):
self.xf_type = 1
def do_numfmt(self, elem):
formatCode = ensure_unicode(elem.get('formatCode'))
numFmtId = int(elem.get('numFmtId'))
is_date = is_date_format_string(self.bk, formatCode)
self.fmt_is_date[numFmtId] = is_date
fmt_obj = Format(numFmtId, is_date + 2, formatCode)
self.bk.format_map[numFmtId] = fmt_obj
if self.verbosity >= 3:
self.dumpout('numFmtId=%d formatCode=%r is_date=%d', numFmtId, formatCode, is_date)
def do_xf(self, elem):
if self.xf_type != 1:
#### ignoring style XFs for the moment
return
xfx = self.xf_counts[self.xf_type]
self.xf_counts[self.xf_type] = xfx + 1
xf = XF()
self.bk.xf_list.append(xf)
self.bk.xfcount += 1
numFmtId = int(elem.get('numFmtId', '0'))
xf.format_key = numFmtId
is_date = self.fmt_is_date.get(numFmtId, 0)
self.bk._xf_index_to_xl_type_map[xfx] = is_date + 2
if self.verbosity >= 3:
self.dumpout(
'xfx=%d numFmtId=%d',
xfx, numFmtId,
)
self.dumpout(repr(self.bk._xf_index_to_xl_type_map))
tag2meth = {
'cellStyleXfs': do_cellstylexfs,
'cellXfs': do_cellxfs,
'numFmt': do_numfmt,
'xf': do_xf,
}
augment_keys(tag2meth, U_SSML12)
class X12Sheet(X12General):
def __init__(self, sheet, logfile=DLF, verbosity=0):
self.sheet = sheet
self.logfile = logfile
self.verbosity = verbosity
self.rowx = -1 # We may need to count them.
self.bk = sheet.book
self.sst = self.bk._sharedstrings
self.merged_cells = sheet.merged_cells
self.warned_no_cell_name = 0
self.warned_no_row_num = 0
if ET_has_iterparse:
self.process_stream = self.own_process_stream
def own_process_stream(self, stream, heading=None):
if self.verbosity >= 2 and heading is not None:
fprintf(self.logfile, "\n=== %s ===\n", heading)
getmethod = self.tag2meth.get
row_tag = U_SSML12 + "row"
self_do_row = self.do_row
for event, elem in ET.iterparse(stream):
if elem.tag == row_tag:
self_do_row(elem)
elem.clear() # destroy all child elements (cells)
elif elem.tag == U_SSML12 + "dimension":
self.do_dimension(elem)
elif elem.tag == U_SSML12 + "mergeCell":
self.do_merge_cell(elem)
self.finish_off()
def process_comments_stream(self, stream):
root = ET.parse(stream).getroot()
author_list = root[0]
assert author_list.tag == U_SSML12 + 'authors'
authors = [elem.text for elem in author_list]
comment_list = root[1]
assert comment_list.tag == U_SSML12 + 'commentList'
cell_note_map = self.sheet.cell_note_map
from .sheet import Note
text_tag = U_SSML12 + 'text'
r_tag = U_SSML12 + 'r'
t_tag = U_SSML12 + 't'
for elem in comment_list.findall(U_SSML12 + 'comment'):
ts = elem.findall('./' + text_tag + '/' + t_tag)
ts += elem.findall('./' + text_tag + '/' + r_tag + '/' + t_tag)
ref = elem.get('ref')
note = Note()
note.author = authors[int(elem.get('authorId'))]
note.rowx, note.colx = coords = cell_name_to_rowx_colx(ref)
note.text = ''
for t in ts:
note.text += cooked_text(self, t)
cell_note_map[coords] = note
def do_dimension(self, elem):
ref = elem.get('ref') # example: "A1:Z99" or just "A1"
if ref:
# print >> self.logfile, "dimension: ref=%r" % ref
last_cell_ref = ref.split(':')[-1] # example: "Z99"
rowx, colx = cell_name_to_rowx_colx(last_cell_ref)
self.sheet._dimnrows = rowx + 1
self.sheet._dimncols = colx + 1
def do_merge_cell(self, elem):
# The ref attribute should be a cell range like "B1:D5".
ref = elem.get('ref')
if ref:
first_cell_ref, last_cell_ref = ref.split(':')
first_rowx, first_colx = cell_name_to_rowx_colx(first_cell_ref)
last_rowx, last_colx = cell_name_to_rowx_colx(last_cell_ref)
self.merged_cells.append((first_rowx, last_rowx + 1,
first_colx, last_colx + 1))
def do_row(self, row_elem):
def bad_child_tag(child_tag):
raise Exception('cell type %s has unexpected child <%s> at rowx=%r colx=%r' % (cell_type, child_tag, rowx, colx))
row_number = row_elem.get('r')
if row_number is None: # Yes, it's optional.
self.rowx += 1
explicit_row_number = 0
if self.verbosity and not self.warned_no_row_num:
self.dumpout("no row number; assuming rowx=%d", self.rowx)
self.warned_no_row_num = 1
else:
self.rowx = int(row_number) - 1
explicit_row_number = 1
assert 0 <= self.rowx < X12_MAX_ROWS
rowx = self.rowx
colx = -1
if self.verbosity >= 3:
self.dumpout("<row> row_number=%r rowx=%d explicit=%d",
row_number, self.rowx, explicit_row_number)
letter_value = _UPPERCASE_1_REL_INDEX
for cell_elem in row_elem:
cell_name = cell_elem.get('r')
if cell_name is None: # Yes, it's optional.
colx += 1
if self.verbosity and not self.warned_no_cell_name:
self.dumpout("no cellname; assuming rowx=%d colx=%d", rowx, colx)
self.warned_no_cell_name = 1
else:
# Extract column index from cell name
# A<row number> => 0, Z =>25, AA => 26, XFD => 16383
colx = 0
charx = -1
try:
for c in cell_name:
charx += 1
if c == '$':
continue
lv = letter_value[c]
if lv:
colx = colx * 26 + lv
else: # start of row number; can't be '0'
colx = colx - 1
assert 0 <= colx < X12_MAX_COLS
break
except KeyError:
raise Exception('Unexpected character %r in cell name %r' % (c, cell_name))
if explicit_row_number and cell_name[charx:] != row_number:
raise Exception('cell name %r but row number is %r' % (cell_name, row_number))
xf_index = int(cell_elem.get('s', '0'))
cell_type = cell_elem.get('t', 'n')
tvalue = None
formula = None
if cell_type == 'n':
# n = number. Most frequent type.
# <v> child contains plain text which can go straight into float()
# OR there's no text in which case it's a BLANK cell
for child in cell_elem:
child_tag = child.tag
if child_tag == V_TAG:
tvalue = child.text
elif child_tag == F_TAG:
formula = cooked_text(self, child)
else:
raise Exception('unexpected tag %r' % child_tag)
if not tvalue:
if self.bk.formatting_info:
self.sheet.put_cell(rowx, colx, XL_CELL_BLANK, '', xf_index)
else:
self.sheet.put_cell(rowx, colx, None, float(tvalue), xf_index)
elif cell_type == "s":
# s = index into shared string table. 2nd most frequent type
# <v> child contains plain text which can go straight into int()
for child in cell_elem:
child_tag = child.tag
if child_tag == V_TAG:
tvalue = child.text
elif child_tag == F_TAG:
# formula not expected here, but gnumeric does it.
formula = child.text
else:
bad_child_tag(child_tag)
if not tvalue:
# <c r="A1" t="s"/>
if self.bk.formatting_info:
self.sheet.put_cell(rowx, colx, XL_CELL_BLANK, '', xf_index)
else:
value = self.sst[int(tvalue)]
self.sheet.put_cell(rowx, colx, XL_CELL_TEXT, value, xf_index)
elif cell_type == "str":
# str = string result from formula.
# Should have <f> (formula) child; however in one file, all text cells are str with no formula.
# <v> child can contain escapes
for child in cell_elem:
child_tag = child.tag
if child_tag == V_TAG:
tvalue = cooked_text(self, child)
elif child_tag == F_TAG:
formula = cooked_text(self, child)
else:
bad_child_tag(child_tag)
# assert tvalue is not None and formula is not None
# Yuk. Fails with file created by gnumeric -- no tvalue!
self.sheet.put_cell(rowx, colx, XL_CELL_TEXT, tvalue, xf_index)
elif cell_type == "b":
# b = boolean
# <v> child contains "0" or "1"
# Maybe the data should be converted with cnv_xsd_boolean;
# ECMA standard is silent; Excel 2007 writes 0 or 1
for child in cell_elem:
child_tag = child.tag
if child_tag == V_TAG:
tvalue = child.text
elif child_tag == F_TAG:
formula = cooked_text(self, child)
else:
bad_child_tag(child_tag)
self.sheet.put_cell(rowx, colx, XL_CELL_BOOLEAN, int(tvalue), xf_index)
elif cell_type == "e":
# e = error
# <v> child contains e.g. "#REF!"
for child in cell_elem:
child_tag = child.tag
if child_tag == V_TAG:
tvalue = child.text
elif child_tag == F_TAG:
formula = cooked_text(self, child)
else:
bad_child_tag(child_tag)
value = error_code_from_text[tvalue]
self.sheet.put_cell(rowx, colx, XL_CELL_ERROR, value, xf_index)
elif cell_type == "inlineStr":
# Not expected in files produced by Excel.
# Only possible child is <is>.
# It's a way of allowing 3rd party s/w to write text (including rich text) cells
# without having to build a shared string table
for child in cell_elem:
child_tag = child.tag
if child_tag == IS_TAG:
tvalue = get_text_from_si_or_is(self, child)
else:
bad_child_tag(child_tag)
assert tvalue is not None
self.sheet.put_cell(rowx, colx, XL_CELL_TEXT, tvalue, xf_index)
else:
raise Exception("Unknown cell type %r in rowx=%d colx=%d" % (cell_type, rowx, colx))
tag2meth = {
'row': do_row,
}
augment_keys(tag2meth, U_SSML12)
def open_workbook_2007_xml(
zf,
component_names,
logfile=sys.stdout,
verbosity=0,
use_mmap=0,
formatting_info=0,
on_demand=0,
ragged_rows=0,
):
ensure_elementtree_imported(verbosity, logfile)
bk = Book()
bk.logfile = logfile
bk.verbosity = verbosity
bk.formatting_info = formatting_info
if formatting_info:
raise NotImplementedError("formatting_info=True not yet implemented")
bk.use_mmap = False #### Not supported initially
bk.on_demand = on_demand
if on_demand:
if verbosity:
print("WARNING *** on_demand=True not yet implemented; falling back to False", file=bk.logfile)
bk.on_demand = False
bk.ragged_rows = ragged_rows
x12book = X12Book(bk, logfile, verbosity)
zflo = zf.open('xl/_rels/workbook.xml.rels')
x12book.process_rels(zflo)
del zflo
zflo = zf.open('xl/workbook.xml')
x12book.process_stream(zflo, 'Workbook')
del zflo
props_name = 'docProps/core.xml'
if props_name in component_names:
zflo = zf.open(props_name)
x12book.process_coreprops(zflo)
x12sty = X12Styles(bk, logfile, verbosity)
if 'xl/styles.xml' in component_names:
zflo = zf.open('xl/styles.xml')
x12sty.process_stream(zflo, 'styles')
del zflo
else:
# seen in MS sample file MergedCells.xlsx
pass
sst_fname = 'xl/sharedStrings.xml'
x12sst = X12SST(bk, logfile, verbosity)
if sst_fname in component_names:
zflo = zf.open(sst_fname)
x12sst.process_stream(zflo, 'SST')
del zflo
for sheetx in range(bk.nsheets):
fname = x12book.sheet_targets[sheetx]
zflo = zf.open(fname)
sheet = bk._sheet_list[sheetx]
x12sheet = X12Sheet(sheet, logfile, verbosity)
heading = "Sheet %r (sheetx=%d) from %r" % (sheet.name, sheetx, fname)
x12sheet.process_stream(zflo, heading)
del zflo
comments_fname = 'xl/comments%d.xml' % (sheetx + 1)
if comments_fname in component_names:
comments_stream = zf.open(comments_fname)
x12sheet.process_comments_stream(comments_stream)
del comments_stream
sheet.tidy_dimensions()
return bk
| gpl-2.0 |
marinho/geraldo | site/newsite/site-geraldo/django/core/paginator.py | 356 | 4021 | from math import ceil
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True):
self.object_list = object_list
self.per_page = per_page
self.orphans = orphans
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
def validate_number(self, number):
"Validates the given 1-based page number."
try:
number = int(number)
except ValueError:
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"Returns a Page object for the given 1-based page number."
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return Page(self.object_list[bottom:top], number, self)
def _get_count(self):
"Returns the total number of objects, across all pages."
if self._count is None:
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"Returns the total number of pages."
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return range(1, self.num_pages + 1)
page_range = property(_get_page_range)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(object):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.number + 1
def previous_page_number(self):
return self.number - 1
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
| lgpl-3.0 |
mpalmi/clip | packages/scap-security-guide/scap-security-guide-0.1.25/shared/modules/splitchecks_module.py | 4 | 3741 | #!/usr/bin/python
import sys
import os
import errno
import string
import re
from optparse import OptionParser
import lxml.etree as ET
xmlns = {
"o": "http://oval.mitre.org/XMLSchema/oval-definitions-5",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"oval": "http://oval.mitre.org/XMLSchema/oval-common-5",
"unix": "http://oval.mitre.org/XMLSchema/oval-definitions-5#unix",
"linux": "http://oval.mitre.org/XMLSchema/oval-definitions-5#linux",
"ind": "http://oval.mitre.org/XMLSchema/oval-definitions-5#independent",
}
def parse_options():
usage = "usage: %prog [options] input_file [input_file . . .]"
parser = OptionParser(usage=usage, version="%prog ")
parser.add_option("-o", dest="out_dname", default="/tmp/checks",
help="name of output directory. If unspecified, default is a new directory \"/tmp/checks\"")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
return (options, args)
# look for any occurrences of these attributes, and then gather the node
# referenced
def gather_refs(element, defn):
items_with_refs = element.findall(".//*[@test_ref]")
items_with_refs.extend(element.findall(".//*[@var_ref]"))
items_with_refs.extend(element.findall(".//*[@state_ref]"))
items_with_refs.extend(element.findall(".//*[@object_ref]"))
for item in items_with_refs:
for attr in item.attrib.keys():
if attr.endswith("_ref"):
ident = item.get(attr)
referenced_item = id_element_map[ident]
if referenced_item not in def_reflist_map[defn]:
def_reflist_map[defn].append(referenced_item)
gather_refs(referenced_item, defn)
def gather_refs_for_defs(tree):
defn_elements = tree.getiterator("{" + xmlns["o"] + "}definition")
# initialize dictionary, which maps definitions to a list of those things
# it references
for defn in defn_elements:
def_reflist_map[defn] = []
for defn in defn_elements:
gather_refs(defn, defn)
def output_checks(dname):
try:
os.makedirs(dname)
except OSError, e:
if e.errno != errno.EEXIST:
raise
# use namespace prefix-to-uri defined above, to provide abbreviations
for prefix, uri in xmlns.iteritems():
ET.register_namespace(prefix, uri)
os.chdir(dname)
for defn, reflist in def_reflist_map.iteritems():
# create filename from id attribute, get rid of punctuation
fname = defn.get("id")
fname = fname.translate(string.maketrans("", ""),
string.punctuation) + ".xml"
# output definition, and then all elements that the definition
# references
outstring = ET.tostring(defn)
for ref in reflist:
outstring = outstring + ET.tostring(ref)
with open(fname, 'w+') as xml_file:
# giant kludge: get rid of per-node namespace attributes
outstring = re.sub(r"\s+xmlns[^\s]+ ", " ", outstring)
xml_file.write("<def-group>\n" + outstring + "</def-group>")
return
def gather_ids_for_elements(tree):
for element in tree.findall(".//*[@id]"):
id_element_map[element.get("id")] = element
id_element_map = {} # map of ids to elements
def_reflist_map = {} # map of definitions to lists of elements it references
def main():
(options, args) = parse_options()
for fname in args:
tree = ET.parse(fname)
# ET.dump(tree)
gather_ids_for_elements(tree)
gather_refs_for_defs(tree)
output_checks(options.out_dname)
sys.exit(0)
if __name__ == "__main__":
main()
| apache-2.0 |
ruuk/script.module.youtube.dl | lib/youtube_dl/extractor/mgoon.py | 64 | 2696 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
qualities,
unified_strdate,
)
class MgoonIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?:(:?m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)|
video\.mgoon\.com)/(?P<id>[0-9]+)'''
_API_URL = 'http://mpos.mgoon.com/player/video?id={0:}'
_TESTS = [
{
'url': 'http://m.mgoon.com/ch/hi6618/v/5582148',
'md5': 'dd46bb66ab35cf6d51cc812fd82da79d',
'info_dict': {
'id': '5582148',
'uploader_id': 'hi6618',
'duration': 240.419,
'upload_date': '20131220',
'ext': 'mp4',
'title': 'md5:543aa4c27a4931d371c3f433e8cebebc',
'thumbnail': r're:^https?://.*\.jpg$',
}
},
{
'url': 'http://www.mgoon.com/play/view/5582148',
'only_matching': True,
},
{
'url': 'http://video.mgoon.com/5582148',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
data = self._download_json(self._API_URL.format(video_id), video_id)
if data.get('errorInfo', {}).get('code') != 'NONE':
raise ExtractorError('%s encountered an error: %s' % (
self.IE_NAME, data['errorInfo']['message']), expected=True)
v_info = data['videoInfo']
title = v_info.get('v_title')
thumbnail = v_info.get('v_thumbnail')
duration = v_info.get('v_duration')
upload_date = unified_strdate(v_info.get('v_reg_date'))
uploader_id = data.get('userInfo', {}).get('u_alias')
if duration:
duration /= 1000.0
age_limit = None
if data.get('accessInfo', {}).get('code') == 'VIDEO_STATUS_ADULT':
age_limit = 18
formats = []
get_quality = qualities(['360p', '480p', '720p', '1080p'])
for fmt in data['videoFiles']:
formats.append({
'format_id': fmt['label'],
'quality': get_quality(fmt['label']),
'url': fmt['url'],
'ext': fmt['format'],
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'thumbnail': thumbnail,
'duration': duration,
'upload_date': upload_date,
'uploader_id': uploader_id,
'age_limit': age_limit,
}
| gpl-2.0 |
TakayukiSakai/tensorflow | tensorflow/python/ops/sparse_grad.py | 3 | 9601 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in sparse_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
ops.NoGradient("SparseAddGrad")
ops.NoGradient("SparseConcat")
ops.NoGradient("SparseToDense")
@ops.RegisterGradient("SparseReorder")
def _SparseReorderGrad(op, unused_output_indices_grad, output_values_grad):
"""Gradients for the SparseReorder op.
Args:
op: the SparseReorder op
unused_output_indices_grad: the incoming gradients of the output indices
output_values_grad: the incoming gradients of the output values
Returns:
Gradient for each of the 3 input tensors:
(input_indices, input_values, input_shape)
The gradients for input_indices and input_shape is None.
"""
input_indices = op.inputs[0]
input_shape = op.inputs[2]
num_entries = array_ops.shape(input_indices)[0]
entry_indices = math_ops.range(num_entries)
sp_unordered = ops.SparseTensor(input_indices, entry_indices, input_shape)
sp_ordered = sparse_ops.sparse_reorder(sp_unordered)
inverted_permutation = array_ops.invert_permutation(sp_ordered.values)
return (None,
array_ops.gather(output_values_grad, inverted_permutation),
None)
@ops.RegisterGradient("SparseAdd")
def _SparseAddGrad(op, *grads):
"""The backward operator for the SparseAdd op.
The SparseAdd op calculates A + B, where A, B, and the sum are all represented
as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
values of A and B.
Args:
op: the SparseAdd op
*grads: the incoming gradients, one element per output of `op`
Returns:
Gradient for each of the 6 input tensors of SparseAdd:
(a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)
The gradients for the indices, shapes, and the threshold are None.
"""
val_grad = grads[1]
a_indices = op.inputs[0]
b_indices = op.inputs[3]
sum_indices = op.outputs[0]
# NOTE: we do not need to take `thresh` into account, since it simply affects
# the non-zero elements of the sum, and we will peek into `sum_indices` in the
# gradient op.
# pylint: disable=protected-access
a_val_grad, b_val_grad = gen_sparse_ops._sparse_add_grad(val_grad, a_indices,
b_indices,
sum_indices)
a_val_grad.set_shape(op.inputs[1].get_shape())
b_val_grad.set_shape(op.inputs[4].get_shape())
# (a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh)
return (None, a_val_grad, None, None, b_val_grad, None, None)
@ops.RegisterGradient("SparseTensorDenseAdd")
def _SparseTensorDenseAddGrad(op, out_grad):
sp_indices = op.inputs[0]
# (sparse_indices, sparse_values, sparse_shape, dense)
return (None, array_ops.gather_nd(out_grad, sp_indices), None, out_grad)
@ops.RegisterGradient("SparseReduceSum")
def _SparseReduceSumGrad(op, out_grad):
"""Similar to gradient for the Sum Op (i.e. tf.reduce_sum())."""
sp_indices = op.inputs[0]
sp_shape = op.inputs[2]
output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3])
out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims)
scale = sp_shape // math_ops.to_int64(output_shape_kept_dims)
# (sparse_indices, sparse_values, sparse_shape, reduction_axes)
return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale),
None, None)
@ops.RegisterGradient("SparseTensorDenseMatMul")
def _SparseTensorDenseMatMulGrad(op, grad):
"""Gradients for the dense tensor in the SparseTensorDenseMatMul op.
If either input is complex, no gradient is provided.
Args:
op: the SparseTensorDenseMatMul op
grad: the incoming gradient
Returns:
Gradient for each of the 4 input tensors:
(sparse_indices, sparse_values, sparse_shape, dense_tensor)
The gradients for indices and shape are None.
Raises:
TypeError: When the two operands don't have the same type.
"""
sp_t = ops.SparseTensor(*op.inputs[:3])
adj_a = op.get_attr("adjoint_a")
adj_b = op.get_attr("adjoint_b")
a_type = sp_t.values.dtype.base_dtype
b_type = op.inputs[3].dtype.base_dtype
if a_type != b_type:
raise TypeError("SparseTensorDenseMatMul op received operands with "
"different types: ", a_type, " and ", b_type)
if a_type in (ops.dtypes.complex64, ops.dtypes.complex128):
raise NotImplementedError("SparseTensorDenseMatMul op does not support "
"complex gradients.")
# gradient w.r.t. dense
b_grad = sparse_ops.sparse_tensor_dense_matmul(sp_t, grad,
adjoint_a=not adj_a)
if adj_b:
b_grad = array_ops.transpose(b_grad)
# gradient w.r.t. sparse values
a_indices = op.inputs[0]
b = op.inputs[3]
rows = a_indices[:, 0]
cols = a_indices[:, 1]
# TODO(zongheng, ebrevdo): add conjugates in the right places when complex
# values are allowed.
# TODO(zongheng): these gather calls could potentially duplicate rows/cols in
# memory. If there is a need, we should look into implementing this more
# intelligently to avoid duplicating data.
parts_a = array_ops.gather(grad, rows if not adj_a else cols)
parts_b = array_ops.gather(b if not adj_b else array_ops.transpose(b),
cols if not adj_a else rows)
a_values_grad = math_ops.reduce_sum(parts_a * parts_b, reduction_indices=1)
# gradients w.r.t. (a_indices, a_values, a_shape, b)
return (None, a_values_grad, None, b_grad)
@ops.RegisterGradient("SparseDenseCwiseAdd")
def _SparseDenseCwiseAddGrad(unused_op, unused_grad):
raise NotImplementedError("Gradient for SparseDenseCwiseAdd is currently not"
" implemented yet.")
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
"""Common code for SparseDenseCwise{Mul,Div} gradients."""
x_indices = op.inputs[0]
x_shape = op.inputs[2]
y = op.inputs[3]
y_shape = math_ops.to_int64(array_ops.shape(y))
num_added_dims = array_ops.expand_dims(
array_ops.size(x_shape) - array_ops.size(y_shape), 0)
augmented_y_shape = array_ops.concat(0, [array_ops.ones(num_added_dims,
ops.dtypes.int64),
y_shape])
scaling = x_shape // augmented_y_shape
scaled_indices = x_indices // scaling
scaled_indices = array_ops.slice(scaled_indices,
array_ops.concat(0, [[0], num_added_dims]),
[-1, -1])
dense_vals = array_ops.gather_nd(y, scaled_indices)
if is_mul:
dx = grad * dense_vals
dy_val = grad * op.inputs[1]
else:
dx = grad / dense_vals
dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
# indices can repeat after scaling, so we can't use sparse_to_dense().
dy = sparse_ops.sparse_add(
array_ops.zeros_like(y),
ops.SparseTensor(scaled_indices, dy_val, y_shape))
# (sp_indices, sp_vals, sp_shape, dense)
return (None, dx, None, dy)
@ops.RegisterGradient("SparseDenseCwiseMul")
def _SparseDenseCwiseMulGrad(op, grad):
"""Gradients for SparseDenseCwiseMul."""
return _SparseDenseCwiseMulOrDivGrad(op, grad, True)
@ops.RegisterGradient("SparseDenseCwiseDiv")
def _SparseDenseCwiseDivGrad(op, grad):
"""Gradients for SparseDenseCwiseDiv."""
return _SparseDenseCwiseMulOrDivGrad(op, grad, False)
@ops.RegisterGradient("SparseSoftmax")
def _SparseSoftmaxGrad(op, grad):
"""Gradients for SparseSoftmax.
The calculation is the same as SoftmaxGrad:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
where we now only operate on the non-zero values present in the SparseTensors.
Args:
op: the SparseSoftmax op.
grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values.
Returns:
Gradients w.r.t. the input (sp_indices, sp_values, sp_shape).
"""
indices, shape = op.inputs[0], op.inputs[2]
out_vals = op.outputs[0]
sp_output = ops.SparseTensor(indices, out_vals, shape)
sp_grad = ops.SparseTensor(indices, grad, shape)
sp_product = ops.SparseTensor(
indices, sp_output.values * sp_grad.values, shape)
# [..., B, 1], dense.
sum_reduced = -sparse_ops.sparse_reduce_sum(sp_product, [-1], keep_dims=True)
# sparse [..., B, C] + dense [..., B, 1] with broadcast; outputs sparse.
sp_sum = sparse_ops.sparse_dense_cwise_add(sp_grad, sum_reduced)
grad_x = sp_sum.values * sp_output.values
return [None, grad_x, None]
| apache-2.0 |
computersalat/ansible | test/units/playbook/test_included_file.py | 38 | 11094 | # (c) 2016, Adrian Likins <alikins@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pytest
from units.compat.mock import MagicMock
from units.mock.loader import DictDataLoader
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.executor import task_result
from ansible.playbook.included_file import IncludedFile
from ansible.errors import AnsibleParserError
@pytest.fixture
def mock_iterator():
mock_iterator = MagicMock(name='MockIterator')
mock_iterator._play = MagicMock(name='MockPlay')
return mock_iterator
@pytest.fixture
def mock_variable_manager():
# TODO: can we use a real VariableManager?
mock_variable_manager = MagicMock(name='MockVariableManager')
mock_variable_manager.get_vars.return_value = dict()
return mock_variable_manager
def test_equals_ok():
uuid = '111-111'
parent = MagicMock(name='MockParent')
parent._uuid = uuid
task = MagicMock(name='MockTask')
task._uuid = uuid
task._parent = parent
inc_a = IncludedFile('a.yml', {}, {}, task)
inc_b = IncludedFile('a.yml', {}, {}, task)
assert inc_a == inc_b
def test_equals_different_tasks():
parent = MagicMock(name='MockParent')
parent._uuid = '111-111'
task_a = MagicMock(name='MockTask')
task_a._uuid = '11-11'
task_a._parent = parent
task_b = MagicMock(name='MockTask')
task_b._uuid = '22-22'
task_b._parent = parent
inc_a = IncludedFile('a.yml', {}, {}, task_a)
inc_b = IncludedFile('a.yml', {}, {}, task_b)
assert inc_a != inc_b
def test_equals_different_parents():
parent_a = MagicMock(name='MockParent')
parent_a._uuid = '111-111'
parent_b = MagicMock(name='MockParent')
parent_b._uuid = '222-222'
task_a = MagicMock(name='MockTask')
task_a._uuid = '11-11'
task_a._parent = parent_a
task_b = MagicMock(name='MockTask')
task_b._uuid = '11-11'
task_b._parent = parent_b
inc_a = IncludedFile('a.yml', {}, {}, task_a)
inc_b = IncludedFile('a.yml', {}, {}, task_b)
assert inc_a != inc_b
def test_included_file_instantiation():
filename = 'somefile.yml'
inc_file = IncludedFile(filename=filename, args={}, vars={}, task=None)
assert isinstance(inc_file, IncludedFile)
assert inc_file._filename == filename
assert inc_file._args == {}
assert inc_file._vars == {}
assert inc_file._task is None
def test_process_include_results(mock_iterator, mock_variable_manager):
hostname = "testhost1"
hostname2 = "testhost2"
parent_task_ds = {'debug': 'msg=foo'}
parent_task = Task.load(parent_task_ds)
parent_task._play = None
task_ds = {'include': 'include_test.yml'}
loaded_task = TaskInclude.load(task_ds, task_include=parent_task)
return_data = {'include': 'include_test.yml'}
# The task in the TaskResult has to be a TaskInclude so it has a .static attr
result1 = task_result.TaskResult(host=hostname, task=loaded_task, return_data=return_data)
result2 = task_result.TaskResult(host=hostname2, task=loaded_task, return_data=return_data)
results = [result1, result2]
fake_loader = DictDataLoader({'include_test.yml': ""})
res = IncludedFile.process_include_results(results, mock_iterator, fake_loader, mock_variable_manager)
assert isinstance(res, list)
assert len(res) == 1
assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml')
assert res[0]._hosts == ['testhost1', 'testhost2']
assert res[0]._args == {}
assert res[0]._vars == {}
def test_process_include_diff_files(mock_iterator, mock_variable_manager):
hostname = "testhost1"
hostname2 = "testhost2"
parent_task_ds = {'debug': 'msg=foo'}
parent_task = Task.load(parent_task_ds)
parent_task._play = None
task_ds = {'include': 'include_test.yml'}
loaded_task = TaskInclude.load(task_ds, task_include=parent_task)
loaded_task._play = None
child_task_ds = {'include': 'other_include_test.yml'}
loaded_child_task = TaskInclude.load(child_task_ds, task_include=loaded_task)
loaded_child_task._play = None
return_data = {'include': 'include_test.yml'}
# The task in the TaskResult has to be a TaskInclude so it has a .static attr
result1 = task_result.TaskResult(host=hostname, task=loaded_task, return_data=return_data)
return_data = {'include': 'other_include_test.yml'}
result2 = task_result.TaskResult(host=hostname2, task=loaded_child_task, return_data=return_data)
results = [result1, result2]
fake_loader = DictDataLoader({'include_test.yml': "",
'other_include_test.yml': ""})
res = IncludedFile.process_include_results(results, mock_iterator, fake_loader, mock_variable_manager)
assert isinstance(res, list)
assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml')
assert res[1]._filename == os.path.join(os.getcwd(), 'other_include_test.yml')
assert res[0]._hosts == ['testhost1']
assert res[1]._hosts == ['testhost2']
assert res[0]._args == {}
assert res[1]._args == {}
assert res[0]._vars == {}
assert res[1]._vars == {}
def test_process_include_simulate_free(mock_iterator, mock_variable_manager):
hostname = "testhost1"
hostname2 = "testhost2"
parent_task_ds = {'debug': 'msg=foo'}
parent_task1 = Task.load(parent_task_ds)
parent_task2 = Task.load(parent_task_ds)
parent_task1._play = None
parent_task2._play = None
task_ds = {'include': 'include_test.yml'}
loaded_task1 = TaskInclude.load(task_ds, task_include=parent_task1)
loaded_task2 = TaskInclude.load(task_ds, task_include=parent_task2)
return_data = {'include': 'include_test.yml'}
# The task in the TaskResult has to be a TaskInclude so it has a .static attr
result1 = task_result.TaskResult(host=hostname, task=loaded_task1, return_data=return_data)
result2 = task_result.TaskResult(host=hostname2, task=loaded_task2, return_data=return_data)
results = [result1, result2]
fake_loader = DictDataLoader({'include_test.yml': ""})
res = IncludedFile.process_include_results(results, mock_iterator, fake_loader, mock_variable_manager)
assert isinstance(res, list)
assert len(res) == 2
assert res[0]._filename == os.path.join(os.getcwd(), 'include_test.yml')
assert res[1]._filename == os.path.join(os.getcwd(), 'include_test.yml')
assert res[0]._hosts == ['testhost1']
assert res[1]._hosts == ['testhost2']
assert res[0]._args == {}
assert res[1]._args == {}
assert res[0]._vars == {}
assert res[1]._vars == {}
def test_process_include_simulate_free_block_role_tasks(mock_iterator,
mock_variable_manager):
"""Test loading the same role returns different included files
In the case of free, we may end up with included files from roles that
have the same parent but are different tasks. Previously the comparison
for equality did not check if the tasks were the same and only checked
that the parents were the same. This lead to some tasks being run
incorrectly and some tasks being silient dropped."""
fake_loader = DictDataLoader({
'include_test.yml': "",
'/etc/ansible/roles/foo_role/tasks/task1.yml': """
- debug: msg=task1
""",
'/etc/ansible/roles/foo_role/tasks/task2.yml': """
- debug: msg=task2
""",
})
hostname = "testhost1"
hostname2 = "testhost2"
role1_ds = {
'name': 'task1 include',
'include_role': {
'name': 'foo_role',
'tasks_from': 'task1.yml'
}
}
role2_ds = {
'name': 'task2 include',
'include_role': {
'name': 'foo_role',
'tasks_from': 'task2.yml'
}
}
parent_task_ds = {
'block': [
role1_ds,
role2_ds
]
}
parent_block = Block.load(parent_task_ds, loader=fake_loader)
parent_block._play = None
include_role1_ds = {
'include_args': {
'name': 'foo_role',
'tasks_from': 'task1.yml'
}
}
include_role2_ds = {
'include_args': {
'name': 'foo_role',
'tasks_from': 'task2.yml'
}
}
include_role1 = IncludeRole.load(role1_ds,
block=parent_block,
loader=fake_loader)
include_role2 = IncludeRole.load(role2_ds,
block=parent_block,
loader=fake_loader)
result1 = task_result.TaskResult(host=hostname,
task=include_role1,
return_data=include_role1_ds)
result2 = task_result.TaskResult(host=hostname2,
task=include_role2,
return_data=include_role2_ds)
results = [result1, result2]
res = IncludedFile.process_include_results(results,
mock_iterator,
fake_loader,
mock_variable_manager)
assert isinstance(res, list)
# we should get two different includes
assert len(res) == 2
assert res[0]._filename == 'foo_role'
assert res[1]._filename == 'foo_role'
# with different tasks
assert res[0]._task != res[1]._task
assert res[0]._hosts == ['testhost1']
assert res[1]._hosts == ['testhost2']
assert res[0]._args == {}
assert res[1]._args == {}
assert res[0]._vars == {}
assert res[1]._vars == {}
def test_empty_raw_params():
parent_task_ds = {'debug': 'msg=foo'}
parent_task = Task.load(parent_task_ds)
parent_task._play = None
task_ds_list = [
{
'include': ''
},
{
'include_tasks': ''
},
{
'import_tasks': ''
}
]
for task_ds in task_ds_list:
with pytest.raises(AnsibleParserError):
TaskInclude.load(task_ds, task_include=parent_task)
| gpl-3.0 |
drawks/ansible | contrib/inventory/vbox.py | 52 | 3266 | #!/usr/bin/env python
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
from subprocess import Popen, PIPE
import json
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
VBOX = "VBoxManage"
def get_hosts(host=None):
returned = {}
try:
if host:
p = Popen([VBOX, 'showvminfo', host], stdout=PIPE)
else:
returned = {'all': set(), '_metadata': {}}
p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
except Exception:
sys.exit(1)
hostvars = {}
prevkey = pref_k = ''
for line in p.stdout.readlines():
try:
k, v = line.split(':', 1)
except Exception:
continue
if k == '':
continue
v = v.strip()
if k.startswith('Name'):
if v not in hostvars:
curname = v
hostvars[curname] = {}
try: # try to get network info
x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE)
ipinfo = x.stdout.read()
if 'Value' in ipinfo:
a, ip = ipinfo.split(':', 1)
hostvars[curname]['ansible_ssh_host'] = ip.strip()
except Exception:
pass
continue
if not host:
if k == 'Groups':
for group in v.split('/'):
if group:
if group not in returned:
returned[group] = set()
returned[group].add(curname)
returned['all'].add(curname)
continue
pref_k = 'vbox_' + k.strip().replace(' ', '_')
if k.startswith(' '):
if prevkey not in hostvars[curname]:
hostvars[curname][prevkey] = {}
hostvars[curname][prevkey][pref_k] = v
else:
if v != '':
hostvars[curname][pref_k] = v
prevkey = pref_k
if not host:
returned['_metadata']['hostvars'] = hostvars
else:
returned = hostvars[host]
return returned
if __name__ == '__main__':
inventory = {}
hostname = None
if len(sys.argv) > 1:
if sys.argv[1] == "--host":
hostname = sys.argv[2]
if hostname:
inventory = get_hosts(hostname)
else:
inventory = get_hosts()
sys.stdout.write(json.dumps(inventory, indent=2, cls=SetEncoder))
| gpl-3.0 |
detiber/ansible | lib/ansible/modules/windows/win_domain_membership.py | 32 | 3374 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION='''
module: win_domain_membership
short_description: Manage domain/workgroup membership for a Windows host
version_added: 2.3
description:
- Manages domain membership or workgroup membership for a Windows host. Also supports hostname changes. This module may require
subsequent use of the M(win_reboot) action if changes are made.
options:
dns_domain_name:
description:
- when C(state) is C(domain), the DNS name of the domain to which the targeted Windows host should be joined
domain_admin_user:
description:
- username of a domain admin for the target domain (required to join or leave the domain)
required: true
domain_admin_password:
description:
- password for the specified C(domain_admin_user)
hostname:
description:
- the desired hostname for the Windows host
state:
description:
- whether the target host should be a member of a domain or workgroup
choices:
- domain
- workgroup
workgroup_name:
description:
- when C(state) is C(workgroup), the name of the workgroup that the Windows host should be in
author:
- Matt Davis (@nitzmahone)
'''
RETURN='''
reboot_required:
description: True if changes were made that require a reboot.
returned: always
type: boolean
sample: true
'''
EXAMPLES='''
# host should be a member of domain ansible.vagrant; module will ensure the hostname is mydomainclient
# and will use the passed credentials to join domain if necessary.
# Ansible connection should use local credentials if possible.
# If a reboot is required, the second task will trigger one and wait until the host is available.
- hosts: winclient
gather_facts: no
tasks:
- win_domain_membership:
dns_domain_name: ansible.vagrant
hostname: mydomainclient
domain_admin_user: testguy@ansible.vagrant
domain_admin_password: password123!
state: domain
register: domain_state
- win_reboot:
when: domain_state.reboot_required
# Host should be in workgroup mywg- module will use the passed credentials to clean-unjoin domain if possible.
# Ansible connection should use local credentials if possible.
# The domain admin credentials can be sourced from a vault-encrypted variable
- hosts: winclient
gather_facts: no
tasks:
- win_domain_membership:
workgroup_name: mywg
domain_admin_user: '{{ win_domain_admin_user }}'
domain_admin_password: '{{ win_domain_admin_password }}'
state: workgroup
'''
| gpl-3.0 |
beni55/cayley | svg/svg.py | 39 | 3624 | # Copyright 2014 The Cayley Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import svgwrite
import math
import random
dwg = svgwrite.Drawing((210,130))
node_radius = 15
big_radius = 50
fan_dist = 90
center = (105,65)
edge_stroke = 2.5
edge_color = '#555555'
fan_color = '#aa0000'
b = '#4285F4'
r = '#DB4437'
g = '#0F9D58'
y = '#F4B400'
n = '#999999'
center_colors = [b, r, y, b, r, y]
edge_color = n
fan_color = n
def center_list(center, big_r):
x, y = center
out = []
for i in range(0,6):
ox = x + (math.cos(2 * math.pi * i / 6.0) * big_r)
oy = y - (math.sin(2 * math.pi * i / 6.0) * big_r)
out.append((ox, oy))
return out
cx, cy = center
ring_centers = center_list(center, big_radius)
outer_left = (cx - fan_dist, cy)
outer_right = (cx + fan_dist, cy)
left = ring_centers[3]
right = ring_centers[0]
all_lines = []
l = dwg.add(dwg.line(outer_left, left))
l.stroke(edge_color, edge_stroke)
all_lines.append(l)
l = dwg.add(dwg.line(outer_right, right))
l.stroke(edge_color, edge_stroke)
all_lines.append(l)
for i, c in enumerate(ring_centers):
for j, d in enumerate(ring_centers):
if i > j or i == j:
continue
if (i % 3) == (j % 3):
continue
if (i % 3) == 1 and (j % 3) == 2:
continue
if (j % 3) == 1 and (i % 3) == 2:
continue
if i == 0 and j == 3:
continue
if i == 3 and j == 0:
continue
l = dwg.add(dwg.line(c,d))
l.stroke(edge_color, edge_stroke)
all_lines.append(l)
circle_elems = []
for i, c in enumerate(ring_centers):
elem = dwg.add(dwg.circle(c, node_radius, fill=center_colors[i]))
circle_elems.append(elem)
left_circle = dwg.add(dwg.circle(outer_left, node_radius, fill=fan_color))
right_circle = dwg.add(dwg.circle(outer_right, node_radius, fill=fan_color))
anims = []
def flash(element, orig_color, start, is_line=False):
prop = "fill"
if is_line:
prop = "stroke"
a = svgwrite.animate.Animate(prop, href=element)
a['from'] = orig_color
a['to'] = g
a['begin'] = "+%0.2fs" % start
a['dur'] = "1.0s"
dwg.add(a)
anims.append(a)
a = svgwrite.animate.Animate(prop, href=element)
a['from'] = g
a['to'] = orig_color
a['begin'] = "+%0.2fs" % (start + 1.0)
a['dur'] = "1.2s"
dwg.add(a)
anims.append(a)
return a
dwg.saveas("cayley.svg")
first = flash(left_circle, n, 0)
flash(all_lines[0], n, 0.5, True)
flash(all_lines[7], n, 1.0, True)
flash(all_lines[3], n, 1.5, True)
flash(all_lines[9], n, 1.0, True)
flash(all_lines[5], n, 1.5, True)
flash(all_lines[1], n, 2.0, True)
flash(right_circle, n, 2.5)
flash(left_circle, n, 3.5)
flash(all_lines[0], n, 4.0, True)
flash(all_lines[6], n, 4.5, True)
flash(all_lines[4], n, 5.0, True)
flash(all_lines[8], n, 4.5, True)
flash(all_lines[2], n, 5.0, True)
flash(all_lines[1], n, 5.5, True)
final = flash(right_circle, n, 6.0)
for anim in anims:
anim["begin"] = anim["begin"] + "; " + final.get_id() + ".end" + anim["begin"]
dwg.saveas("cayley_active.svg")
| apache-2.0 |
manjunaths/tensorflow | tensorflow/contrib/layers/__init__.py | 9 | 4048 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network layers, regularizers, summaries, etc.
## Higher level ops for building neural network layers.
This package provides several ops that take care of creating variables that are
used internally in a consistent way and provide the building blocks for many
common machine learning algorithms.
@@avg_pool2d
@@batch_norm
@@convolution2d
@@conv2d_in_plane
@@convolution2d_in_plane
@@conv2d_transpose
@@convolution2d_transpose
@@dropout
@@flatten
@@fully_connected
@@layer_norm
@@linear
@@max_pool2d
@@one_hot_encoding
@@relu
@@relu6
@@repeat
@@safe_embedding_lookup_sparse
@@separable_conv2d
@@separable_convolution2d
@@softmax
@@stack
@@unit_norm
@@embed_sequence
Aliases for fully_connected which set a default activation function are
available: `relu`, `relu6` and `linear`.
`stack` operation is also available. It builds a stack of layers by applying
a layer repeatedly.
## Regularizers
Regularization can help prevent overfitting. These have the signature
`fn(weights)`. The loss is typically added to
`tf.GraphKeys.REGULARIZATION_LOSSES`.
@@apply_regularization
@@l1_regularizer
@@l2_regularizer
@@sum_regularizer
## Initializers
Initializers are used to initialize variables with sensible values given their
size, data type, and purpose.
@@xavier_initializer
@@xavier_initializer_conv2d
@@variance_scaling_initializer
## Optimization
Optimize weights given a loss.
@@optimize_loss
## Summaries
Helper functions to summarize specific variables or ops.
@@summarize_activation
@@summarize_tensor
@@summarize_tensors
@@summarize_collection
The layers module defines convenience functions `summarize_variables`,
`summarize_weights` and `summarize_biases`, which set the `collection` argument
of `summarize_collection` to `VARIABLES`, `WEIGHTS` and `BIASES`, respectively.
@@summarize_activations
## Feature columns
Feature columns provide a mechanism to map data to a model.
@@bucketized_column
@@check_feature_columns
@@create_feature_spec_for_parsing
@@crossed_column
@@embedding_column
@@scattered_embedding_column
@@input_from_feature_columns
@@joint_weighted_sum_from_feature_columns
@@make_place_holder_tensors_for_base_features
@@multi_class_target
@@one_hot_column
@@parse_feature_columns_from_examples
@@parse_feature_columns_from_sequence_examples
@@real_valued_column
@@shared_embedding_columns
@@sparse_column_with_hash_bucket
@@sparse_column_with_integerized_feature
@@sparse_column_with_keys
@@weighted_sparse_column
@@weighted_sum_from_feature_columns
@@infer_real_valued_columns
@@sequence_input_from_feature_columns
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.layers.python.layers import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['bias_add',
'conv2d',
'feature_column',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu',
'OPTIMIZER_CLS_NAMES',
'regression_target',
'SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY',
'summaries']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
Austin503/support-tools | googlecode-issues-exporter/bitbucket_issue_converter_test.py | 90 | 6338 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the BitBucket Services."""
# pylint: disable=missing-docstring,protected-access
import unittest
import bitbucket_issue_converter
import issues
from issues_test import DEFAULT_USERNAME
from issues_test import SINGLE_COMMENT
from issues_test import SINGLE_ISSUE
from issues_test import COMMENT_ONE
from issues_test import COMMENT_TWO
from issues_test import COMMENT_THREE
from issues_test import COMMENTS_DATA
from issues_test import NO_ISSUE_DATA
from issues_test import USER_MAP
# The BitBucket username.
BITBUCKET_USERNAME = DEFAULT_USERNAME
# The BitBucket repo name.
BITBUCKET_REPO = "repo"
class TestUserService(unittest.TestCase):
"""Tests for the UserService."""
def setUp(self):
self._bitbucket_user_service = bitbucket_issue_converter.UserService()
def testIsUser123(self):
is_user = self._bitbucket_user_service.IsUser("username123")
self.assertTrue(is_user)
def testIsUser321(self):
is_user = self._bitbucket_user_service.IsUser("username321")
self.assertTrue(is_user)
class TestIssueService(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self._bitbucket_issue_service = bitbucket_issue_converter.IssueService()
self.maxDiff = None
def testCreateIssue(self):
issue_body = {
"assignee": "default_username",
"content": (
"```\none\n```\n\nReported by `a_uthor` on last year\n"
"- **Labels added**: added-label\n"
"- **Labels removed**: removed-label\n"),
"content_updated_on": "last month",
"created_on": "last year",
"id": 1,
"kind": "bug",
"priority": "minor",
"reporter": None,
"status": "resolved",
"title": "issue_title",
"updated_on": "last year",
}
issue_number = self._bitbucket_issue_service.CreateIssue(SINGLE_ISSUE)
self.assertEqual(1, issue_number)
actual = self._bitbucket_issue_service._bitbucket_issues[0]
# The comment body gets rewritten to preserve the origin ID.
issue_body["content"] = (
"Originally reported on Google Code with ID 1\n" + issue_body["content"])
self.assertEqual(issue_body, actual)
def testCloseIssue(self):
# no-op
self._bitbucket_issue_service.CloseIssue(123)
# TODO(chris): Add testcase for an issue comment with attachments.
def testCreateComment(self):
comment_body = {
"content": (
"```\none\n```\n\nReported by `a_uthor` on last year\n"
"- **Labels added**: added-label\n"
"- **Labels removed**: removed-label\n"),
"created_on": "last year",
"id": 1,
"issue": 1,
"updated_on": "last year",
"user": "a_uthor",
}
self._bitbucket_issue_service.CreateComment(
1, SINGLE_COMMENT)
actual = self._bitbucket_issue_service._bitbucket_comments[0]
self.assertEqual(comment_body, actual)
class TestIssueExporter(unittest.TestCase):
"""Tests for the IssueService."""
def setUp(self):
self._bitbucket_user_service = bitbucket_issue_converter.UserService()
self._bitbucket_issue_service = bitbucket_issue_converter.IssueService()
self.issue_exporter = issues.IssueExporter(
self._bitbucket_issue_service, self._bitbucket_user_service,
NO_ISSUE_DATA, BITBUCKET_REPO, USER_MAP)
self.issue_exporter.Init()
def testCreateIssue(self):
issue_number = self.issue_exporter._CreateIssue(SINGLE_ISSUE)
self.assertEqual(1, issue_number)
def testCreateComments(self):
self.assertEqual(0, self.issue_exporter._comment_number)
self.issue_exporter._CreateComments(COMMENTS_DATA, 1234, SINGLE_ISSUE)
self.assertEqual(4, self.issue_exporter._comment_number)
def testStart(self):
self.issue_exporter._issue_json_data = [
{
"id": "1",
"title": "Title1",
"state": "open",
"status": "New",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO, COMMENT_THREE],
},
"labels": ["Type-Issue", "Priority-High"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User1"
},
"published": "last year",
"updated": "last month",
},
{
"id": "2",
"title": "Title2",
"state": "closed",
"status": "Fixed",
"owner": {"kind": "projecthosting#issuePerson",
"name": "User2"
},
"labels": [],
"comments": {
"items": [COMMENT_ONE],
},
"published": "last month",
"updated": "last week",
},
{
"id": "3",
"title": "Title3",
"state": "closed",
"status": "WontFix",
"comments": {
"items": [COMMENT_ONE, COMMENT_TWO],
},
"labels": ["Type-Defect"],
"owner": {"kind": "projecthosting#issuePerson",
"name": "User3"
},
"published": "last week",
"updated": "yesterday",
}]
self.issue_exporter.Init()
self.issue_exporter.Start()
self.assertEqual(3, self.issue_exporter._issue_total)
self.assertEqual(3, self.issue_exporter._issue_number)
# Comment counts are per issue and should match the numbers from the last
# issue created, minus one for the first comment, which is really
# the issue description.
self.assertEqual(1, self.issue_exporter._comment_number)
self.assertEqual(1, self.issue_exporter._comment_total)
if __name__ == "__main__":
unittest.main(buffer=True)
| apache-2.0 |
madphysicist/numpy | numpy/typing/tests/data/pass/scalars.py | 2 | 2937 | import sys
import datetime as dt
import pytest
import numpy as np
# Construction
class D:
def __index__(self) -> int:
return 0
class C:
def __complex__(self) -> complex:
return 3j
class B:
def __int__(self) -> int:
return 4
class A:
def __float__(self) -> float:
return 4.0
np.complex64(3j)
np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
np.complex64("1.2")
np.complex128(b"2j")
np.int8(4)
np.int16(3.4)
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")
np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")
if sys.version_info >= (3, 8):
np.uint64(D())
np.float32(D())
np.complex64(D())
np.bytes_(b"hello")
np.bytes_("hello", 'utf-8')
np.bytes_("hello", encoding='utf-8')
np.str_("hello")
np.str_(b"hello", 'utf-8')
np.str_(b"hello", encoding='utf-8')
# Array-ish semantics
np.int8().real
np.int16().imag
np.int32().data
np.int64().flags
np.uint8().itemsize * 2
np.uint16().ndim + 1
np.uint32().strides
np.uint64().shape
# Time structures
np.datetime64()
np.datetime64(0, "D")
np.datetime64(0, b"D")
np.datetime64(0, ('ms', 3))
np.datetime64("2019")
np.datetime64(b"2019")
np.datetime64("2019", "D")
np.datetime64(np.datetime64())
np.datetime64(dt.datetime(2000, 5, 3))
np.datetime64(None)
np.datetime64(None, "D")
np.timedelta64()
np.timedelta64(0)
np.timedelta64(0, "D")
np.timedelta64(0, ('ms', 3))
np.timedelta64(0, b"D")
np.timedelta64("3")
np.timedelta64(b"5")
np.timedelta64(np.timedelta64(2))
np.timedelta64(dt.timedelta(2))
np.timedelta64(None)
np.timedelta64(None, "D")
np.void(1)
np.void(np.int64(1))
np.void(True)
np.void(np.bool_(True))
np.void(b"test")
np.void(np.bytes_("test"))
# Protocols
i8 = np.int64()
u8 = np.uint64()
f8 = np.float64()
c16 = np.complex128()
b_ = np.bool_()
td = np.timedelta64()
U = np.str_("1")
S = np.bytes_("1")
AR = np.array(1, dtype=np.float64)
int(i8)
int(u8)
int(f8)
int(b_)
int(td)
int(U)
int(S)
int(AR)
with pytest.warns(np.ComplexWarning):
int(c16)
float(i8)
float(u8)
float(f8)
float(b_)
float(td)
float(U)
float(S)
float(AR)
with pytest.warns(np.ComplexWarning):
float(c16)
complex(i8)
complex(u8)
complex(f8)
complex(c16)
complex(b_)
complex(td)
complex(U)
complex(AR)
# Misc
c16.dtype
c16.real
c16.imag
c16.real.real
c16.real.imag
c16.ndim
c16.size
c16.itemsize
c16.shape
c16.strides
c16.squeeze()
c16.byteswap()
c16.transpose()
# Aliases
np.str0()
np.bool8()
np.bytes0()
np.string_()
np.object0()
np.void0(0)
np.byte()
np.short()
np.intc()
np.intp()
np.int0()
np.int_()
np.longlong()
np.ubyte()
np.ushort()
np.uintc()
np.uintp()
np.uint0()
np.uint()
np.ulonglong()
np.half()
np.single()
np.double()
np.float_()
np.longdouble()
np.longfloat()
np.csingle()
np.singlecomplex()
np.cdouble()
np.complex_()
np.cfloat()
np.clongdouble()
np.clongfloat()
np.longcomplex()
| bsd-3-clause |
Danielhiversen/home-assistant | homeassistant/components/fan/template.py | 3 | 13211 | """
Support for Template fans.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/fan.template/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.fan import (
SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH, SUPPORT_SET_SPEED, SUPPORT_OSCILLATE,
FanEntity, ATTR_SPEED, ATTR_OSCILLATING, ENTITY_ID_FORMAT,
SUPPORT_DIRECTION, DIRECTION_FORWARD, DIRECTION_REVERSE, ATTR_DIRECTION)
from homeassistant.const import (
CONF_FRIENDLY_NAME, CONF_VALUE_TEMPLATE, CONF_ENTITY_ID,
STATE_ON, STATE_OFF, MATCH_ALL, EVENT_HOMEASSISTANT_START,
STATE_UNKNOWN)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.script import Script
_LOGGER = logging.getLogger(__name__)
CONF_FANS = 'fans'
CONF_SPEED_LIST = 'speeds'
CONF_SPEED_TEMPLATE = 'speed_template'
CONF_OSCILLATING_TEMPLATE = 'oscillating_template'
CONF_DIRECTION_TEMPLATE = 'direction_template'
CONF_ON_ACTION = 'turn_on'
CONF_OFF_ACTION = 'turn_off'
CONF_SET_SPEED_ACTION = 'set_speed'
CONF_SET_OSCILLATING_ACTION = 'set_oscillating'
CONF_SET_DIRECTION_ACTION = 'set_direction'
_VALID_STATES = [STATE_ON, STATE_OFF]
_VALID_OSC = [True, False]
_VALID_DIRECTIONS = [DIRECTION_FORWARD, DIRECTION_REVERSE]
FAN_SCHEMA = vol.Schema({
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_SPEED_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATING_TEMPLATE): cv.template,
vol.Optional(CONF_DIRECTION_TEMPLATE): cv.template,
vol.Required(CONF_ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Required(CONF_OFF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_SET_SPEED_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_SET_OSCILLATING_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_SET_DIRECTION_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(
CONF_SPEED_LIST,
default=[SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
): cv.ensure_list,
vol.Optional(CONF_ENTITY_ID): cv.entity_ids
})
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Required(CONF_FANS): vol.Schema({cv.slug: FAN_SCHEMA}),
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None
):
"""Set up the Template Fans."""
fans = []
for device, device_config in config[CONF_FANS].items():
friendly_name = device_config.get(CONF_FRIENDLY_NAME, device)
state_template = device_config[CONF_VALUE_TEMPLATE]
speed_template = device_config.get(CONF_SPEED_TEMPLATE)
oscillating_template = device_config.get(
CONF_OSCILLATING_TEMPLATE
)
direction_template = device_config.get(CONF_DIRECTION_TEMPLATE)
on_action = device_config[CONF_ON_ACTION]
off_action = device_config[CONF_OFF_ACTION]
set_speed_action = device_config.get(CONF_SET_SPEED_ACTION)
set_oscillating_action = device_config.get(CONF_SET_OSCILLATING_ACTION)
set_direction_action = device_config.get(CONF_SET_DIRECTION_ACTION)
speed_list = device_config[CONF_SPEED_LIST]
entity_ids = set()
manual_entity_ids = device_config.get(CONF_ENTITY_ID)
for template in (state_template, speed_template, oscillating_template,
direction_template):
if template is None:
continue
template.hass = hass
if entity_ids == MATCH_ALL or manual_entity_ids is not None:
continue
template_entity_ids = template.extract_entities()
if template_entity_ids == MATCH_ALL:
entity_ids = MATCH_ALL
else:
entity_ids |= set(template_entity_ids)
if manual_entity_ids is not None:
entity_ids = manual_entity_ids
elif entity_ids != MATCH_ALL:
entity_ids = list(entity_ids)
fans.append(
TemplateFan(
hass, device, friendly_name,
state_template, speed_template, oscillating_template,
direction_template, on_action, off_action, set_speed_action,
set_oscillating_action, set_direction_action, speed_list,
entity_ids
)
)
async_add_entities(fans)
class TemplateFan(FanEntity):
"""A template fan component."""
def __init__(self, hass, device_id, friendly_name,
state_template, speed_template, oscillating_template,
direction_template, on_action, off_action, set_speed_action,
set_oscillating_action, set_direction_action, speed_list,
entity_ids):
"""Initialize the fan."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass)
self._name = friendly_name
self._template = state_template
self._speed_template = speed_template
self._oscillating_template = oscillating_template
self._direction_template = direction_template
self._supported_features = 0
self._on_script = Script(hass, on_action)
self._off_script = Script(hass, off_action)
self._set_speed_script = None
if set_speed_action:
self._set_speed_script = Script(hass, set_speed_action)
self._set_oscillating_script = None
if set_oscillating_action:
self._set_oscillating_script = Script(hass, set_oscillating_action)
self._set_direction_script = None
if set_direction_action:
self._set_direction_script = Script(hass, set_direction_action)
self._state = STATE_OFF
self._speed = None
self._oscillating = None
self._direction = None
self._template.hass = self.hass
if self._speed_template:
self._speed_template.hass = self.hass
self._supported_features |= SUPPORT_SET_SPEED
if self._oscillating_template:
self._oscillating_template.hass = self.hass
self._supported_features |= SUPPORT_OSCILLATE
if self._direction_template:
self._direction_template.hass = self.hass
self._supported_features |= SUPPORT_DIRECTION
self._entities = entity_ids
# List of valid speeds
self._speed_list = speed_list
@property
def name(self):
"""Return the display name of this fan."""
return self._name
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return self._speed_list
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
@property
def speed(self):
"""Return the current speed."""
return self._speed
@property
def oscillating(self):
"""Return the oscillation state."""
return self._oscillating
@property
def direction(self):
"""Return the oscillation state."""
return self._direction
@property
def should_poll(self):
"""Return the polling state."""
return False
# pylint: disable=arguments-differ
async def async_turn_on(self, speed: str = None) -> None:
"""Turn on the fan."""
await self._on_script.async_run(context=self._context)
self._state = STATE_ON
if speed is not None:
await self.async_set_speed(speed)
# pylint: disable=arguments-differ
async def async_turn_off(self) -> None:
"""Turn off the fan."""
await self._off_script.async_run(context=self._context)
self._state = STATE_OFF
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan."""
if self._set_speed_script is None:
return
if speed in self._speed_list:
self._speed = speed
await self._set_speed_script.async_run(
{ATTR_SPEED: speed}, context=self._context)
else:
_LOGGER.error(
'Received invalid speed: %s. Expected: %s.',
speed, self._speed_list)
async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation of the fan."""
if self._set_oscillating_script is None:
return
if oscillating in _VALID_OSC:
self._oscillating = oscillating
await self._set_oscillating_script.async_run(
{ATTR_OSCILLATING: oscillating}, context=self._context)
else:
_LOGGER.error(
'Received invalid oscillating value: %s. Expected: %s.',
oscillating, ', '.join(_VALID_OSC))
async def async_set_direction(self, direction: str) -> None:
"""Set the direction of the fan."""
if self._set_direction_script is None:
return
if direction in _VALID_DIRECTIONS:
self._direction = direction
await self._set_direction_script.async_run(
{ATTR_DIRECTION: direction}, context=self._context)
else:
_LOGGER.error(
'Received invalid direction: %s. Expected: %s.',
direction, ', '.join(_VALID_DIRECTIONS))
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def template_fan_state_listener(entity, old_state, new_state):
"""Handle target device state changes."""
self.async_schedule_update_ha_state(True)
@callback
def template_fan_startup(event):
"""Update template on startup."""
self.hass.helpers.event.async_track_state_change(
self._entities, template_fan_state_listener)
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, template_fan_startup)
async def async_update(self):
"""Update the state from the template."""
# Update state
try:
state = self._template.async_render()
except TemplateError as ex:
_LOGGER.error(ex)
state = None
self._state = None
# Validate state
if state in _VALID_STATES:
self._state = state
elif state == STATE_UNKNOWN:
self._state = None
else:
_LOGGER.error(
'Received invalid fan is_on state: %s. Expected: %s.',
state, ', '.join(_VALID_STATES))
self._state = None
# Update speed if 'speed_template' is configured
if self._speed_template is not None:
try:
speed = self._speed_template.async_render()
except TemplateError as ex:
_LOGGER.error(ex)
speed = None
self._state = None
# Validate speed
if speed in self._speed_list:
self._speed = speed
elif speed == STATE_UNKNOWN:
self._speed = None
else:
_LOGGER.error(
'Received invalid speed: %s. Expected: %s.',
speed, self._speed_list)
self._speed = None
# Update oscillating if 'oscillating_template' is configured
if self._oscillating_template is not None:
try:
oscillating = self._oscillating_template.async_render()
except TemplateError as ex:
_LOGGER.error(ex)
oscillating = None
self._state = None
# Validate osc
if oscillating == 'True' or oscillating is True:
self._oscillating = True
elif oscillating == 'False' or oscillating is False:
self._oscillating = False
elif oscillating == STATE_UNKNOWN:
self._oscillating = None
else:
_LOGGER.error(
'Received invalid oscillating: %s. Expected: True/False.',
oscillating)
self._oscillating = None
# Update direction if 'direction_template' is configured
if self._direction_template is not None:
try:
direction = self._direction_template.async_render()
except TemplateError as ex:
_LOGGER.error(ex)
direction = None
self._state = None
# Validate speed
if direction in _VALID_DIRECTIONS:
self._direction = direction
elif direction == STATE_UNKNOWN:
self._direction = None
else:
_LOGGER.error(
'Received invalid direction: %s. Expected: %s.',
direction, ', '.join(_VALID_DIRECTIONS))
self._direction = None
| mit |
davidmueller13/f2fs-backport | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
peterbe/airmozilla | airmozilla/manage/forms.py | 1 | 9589 | import pytz
from django import forms
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.contrib.flatpages.models import FlatPage
from funfactory.urlresolvers import reverse
from airmozilla.base.forms import BaseModelForm
from airmozilla.main.models import (Approval, Category, Event, EventOldSlug,
Location, Participant, Tag, Template)
TIMEZONE_CHOICES = [(tz, tz.replace('_', ' ')) for tz in pytz.common_timezones]
class UserEditForm(BaseModelForm):
class Meta:
model = User
fields = ('is_active', 'is_staff', 'is_superuser', 'groups')
def clean(self):
cleaned_data = super(UserEditForm, self).clean()
is_active = cleaned_data.get('is_active')
is_staff = cleaned_data.get('is_staff')
is_superuser = cleaned_data.get('is_superuser')
groups = cleaned_data.get('groups')
if is_superuser and not is_staff:
raise forms.ValidationError('Superusers must be staff.')
if is_staff and not is_active:
raise forms.ValidationError('Staff must be active.')
if is_staff and not is_superuser and not groups:
raise forms.ValidationError(
'Non-superuser staff must belong to a group.'
)
return cleaned_data
class GroupEditForm(BaseModelForm):
def __init__(self, *args, **kwargs):
super(GroupEditForm, self).__init__(*args, **kwargs)
self.fields['name'].required = True
choices = self.fields['permissions'].choices
self.fields['permissions'] = forms.MultipleChoiceField(
choices=choices,
widget=forms.CheckboxSelectMultiple,
required=False
)
class Meta:
model = Group
class UserFindForm(BaseModelForm):
class Meta:
model = User
fields = ('email',)
def clean_email(self):
email = self.cleaned_data['email']
try:
user = User.objects.get(email__iexact=email)
except User.DoesNotExist:
raise forms.ValidationError('User with this email not found.')
return user.email
class EventRequestForm(BaseModelForm):
tags = forms.CharField(required=False)
participants = forms.CharField(required=False)
timezone = forms.ChoiceField(
choices=TIMEZONE_CHOICES,
initial=settings.TIME_ZONE, label='Time zone'
)
def __init__(self, *args, **kwargs):
super(EventRequestForm, self).__init__(*args, **kwargs)
self.fields['participants'].help_text = (
'<a href="%s" class="btn" target="_blank">'
'<i class="icon-plus-sign"></i>'
'New Participant'
'</a>' % reverse('manage:participant_new'))
self.fields['location'].help_text = (
'<a href="%s" class="btn" target="_blank">'
'<i class="icon-plus-sign"></i>'
'New location'
'</a>' % reverse('manage:location_new'))
self.fields['category'].help_text = (
'<a href="%s" class="btn" target="_blank">'
'<i class="icon-plus-sign"></i>'
'New category'
'</a>' % reverse('manage:category_new'))
self.fields['placeholder_img'].label = 'Placeholder image'
if 'instance' in kwargs:
event = kwargs['instance']
approvals = event.approval_set.all()
self.initial['approvals'] = [app.group for app in approvals]
if event.pk:
tag_format = lambda objects: ','.join(map(unicode, objects))
participants_formatted = tag_format(event.participants.all())
tags_formatted = tag_format(event.tags.all())
self.initial['tags'] = tags_formatted
self.initial['participants'] = participants_formatted
def clean_tags(self):
tags = self.cleaned_data['tags']
split_tags = [t.strip() for t in tags.split(',') if t.strip()]
final_tags = []
for tag_name in split_tags:
t, __ = Tag.objects.get_or_create(name=tag_name)
final_tags.append(t)
return final_tags
def clean_participants(self):
participants = self.cleaned_data['participants']
split_participants = [p.strip() for p in participants.split(',')
if p.strip()]
final_participants = []
for participant_name in split_participants:
p = Participant.objects.get(name=participant_name)
final_participants.append(p)
return final_participants
def clean_slug(self):
"""Enforce unique slug across current slugs and old slugs."""
slug = self.cleaned_data['slug']
if (Event.objects.filter(slug=slug).exclude(pk=self.instance.id)
or EventOldSlug.objects.filter(slug=slug)):
raise forms.ValidationError('This slug is already in use.')
return slug
class Meta:
model = Event
widgets = {
'description': forms.Textarea(attrs={'rows': 4}),
'short_description': forms.Textarea(attrs={'rows': 2}),
'call_info': forms.Textarea(attrs={'rows': 3}),
'additional_links': forms.Textarea(attrs={'rows': 3}),
'template_environment': forms.Textarea(attrs={'rows': 3}),
'additional_links': forms.Textarea(attrs={'rows': 3}),
'start_time': forms.DateTimeInput(format='%Y-%m-%d %H:%M'),
'archive_time': forms.DateTimeInput(format='%Y-%m-%d %H:%M'),
}
exclude = ('featured', 'status', 'archive_time', 'slug')
# Fields specified to enforce order
fields = (
'title', 'placeholder_img', 'description',
'short_description', 'location', 'start_time', 'timezone',
'participants', 'category', 'tags', 'call_info',
'additional_links', 'public'
)
class EventEditForm(EventRequestForm):
approvals = forms.ModelMultipleChoiceField(
queryset=Group.objects.filter(permissions__codename='change_approval'),
required=False,
widget=forms.CheckboxSelectMultiple()
)
class Meta(EventRequestForm.Meta):
exclude = ('archive_time',)
# Fields specified to enforce order
fields = (
'title', 'slug', 'status', 'public', 'featured', 'template',
'template_environment', 'placeholder_img', 'location',
'description', 'short_description', 'start_time', 'archive_time',
'timezone', 'participants', 'category', 'tags', 'call_info',
'additional_links', 'approvals'
)
class EventExperiencedRequestForm(EventEditForm):
class Meta(EventEditForm.Meta):
#widgets = EventRequestForm.Meta.widgets
#widgets['approvals'] = forms.CheckboxSelectMultiple()
#widgets['approvals'] = forms.Textarea()
exclude = ('featured', 'archive_time', 'slug')
# Fields specified to enforce order
fields = (
'title', 'status', 'public', 'template',
'template_environment', 'placeholder_img', 'description',
'short_description', 'location', 'start_time', 'timezone',
'participants', 'category', 'tags', 'call_info',
'additional_links', 'public', 'approvals'
)
class EventArchiveForm(BaseModelForm):
archive_time = forms.IntegerField()
def __init__(self, *args, **kwargs):
super(EventArchiveForm, self).__init__(*args, **kwargs)
self.fields['archive_time'].help_text = (
'<div id="archive_time_slider"></div>'
)
class Meta(EventRequestForm.Meta):
exclude = ()
fields = ('template', 'template_environment')
class EventFindForm(BaseModelForm):
class Meta:
model = Event
fields = ('title',)
def clean_title(self):
title = self.cleaned_data['title']
if not Event.objects.filter(title__icontains=title):
raise forms.ValidationError('No event with this title found.')
return title
class ParticipantEditForm(BaseModelForm):
class Meta:
model = Participant
exclude = ('creator', 'clear_token')
class ParticipantFindForm(BaseModelForm):
class Meta:
model = Participant
fields = ('name',)
def clean_name(self):
name = self.cleaned_data['name']
if not Participant.objects.filter(name__icontains=name):
raise forms.ValidationError('No participant with this name found.')
return name
class CategoryForm(BaseModelForm):
class Meta:
model = Category
class TemplateEditForm(BaseModelForm):
class Meta:
model = Template
widgets = {
'content': forms.Textarea(attrs={'rows': 20})
}
class LocationEditForm(BaseModelForm):
timezone = forms.ChoiceField(choices=TIMEZONE_CHOICES)
def __init__(self, *args, **kwargs):
super(LocationEditForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs:
initial = kwargs['instance'].timezone
else:
initial = settings.TIME_ZONE
self.initial['timezone'] = initial
class Meta:
model = Location
class ApprovalForm(BaseModelForm):
class Meta:
model = Approval
fields = ('comment',)
widgets = {
'comment': forms.Textarea(attrs={'rows': 3})
}
class FlatPageEditForm(BaseModelForm):
class Meta:
model = FlatPage
fields = ('url', 'title', 'content')
| bsd-3-clause |
carlodri/moviepy | moviepy/video/compositing/concatenate.py | 14 | 4364 | import numpy as np
from moviepy.tools import deprecated_version_of
from moviepy.video.VideoClip import VideoClip, ColorClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
from moviepy.audio.AudioClip import CompositeAudioClip
from moviepy.video.compositing.on_color import on_color
def concatenate_videoclips(clips, method="chain", transition=None,
bg_color=None, ismask=False, padding = 0):
""" Concatenates several video clips
Returns a video clip made by clip by concatenating several video clips.
(Concatenated means that they will be played one after another).
There are two methods:
- method="chain": will produce a clip that simply outputs
the frames of the succesive clips, without any correction if they are
not of the same size of anything. If none of the clips have masks the
resulting clip has no mask, else the mask is a concatenation of masks
(using completely opaque for clips that don't have masks, obviously).
If you have clips of different size and you want to write directly the
result of the concatenation to a file, use the method "compose" instead.
- method="compose", if the clips do not have the same
resolution, the final resolution will be such that no clip has
to be resized.
As a consequence the final clip has the height of the highest
clip and the width of the widest clip of the list. All the
clips with smaller dimensions will appear centered. The border
will be transparent if mask=True, else it will be of the
color specified by ``bg_color``.
If all clips with a fps attribute have the same fps, it becomes the fps of
the result.
Parameters
-----------
clips
A list of video clips which must all have their ``duration``
attributes set.
method
"chain" or "compose": see above.
transition
A clip that will be played between each two clips of the list.
bg_color
Only for method='compose'. Color of the background.
Set to None for a transparent clip
padding
Only for method='compose'. Duration during two consecutive clips.
Note that for negative padding, a clip will partly play at the same
time as the clip it follows (negative padding is cool for clips who fade
in on one another). A non-null padding automatically sets the method to
`compose`.
"""
if transition is not None:
l = [[v, transition] for v in clips[:-1]]
clips = reduce(lambda x, y: x + y, l) + [clips[-1]]
transition = None
tt = np.cumsum([0] + [c.duration for c in clips])
sizes = [v.size for v in clips]
w = max([r[0] for r in sizes])
h = max([r[1] for r in sizes])
tt = np.maximum(0, tt + padding*np.arange(len(tt)))
if method == "chain":
def make_frame(t):
i = max([i for i, e in enumerate(tt) if e <= t])
return clips[i].get_frame(t - tt[i])
result = VideoClip(ismask = ismask, make_frame = make_frame)
if any([c.mask is not None for c in clips]):
masks = [c.mask if (c.mask is not None) else
ColorClip([1,1], col=1, ismask=True, duration=c.duration)
#ColorClip(c.size, col=1, ismask=True).set_duration(c.duration)
for c in clips]
result.mask = concatenate_videoclips(masks, method="chain", ismask=True)
result.clips = clips
elif method == "compose":
result = CompositeVideoClip( [c.set_start(t).set_pos('center')
for (c, t) in zip(clips, tt)],
size = (w, h), bg_color=bg_color, ismask=ismask)
result.tt = tt
result.start_times = tt[:-1]
result.start, result.duration, result.end = 0, tt[-1] , tt[-1]
audio_t = [(c.audio,t) for c,t in zip(clips,tt) if c.audio is not None]
if len(audio_t)>0:
result.audio = CompositeAudioClip([a.set_start(t)
for a,t in audio_t])
fps_list = list(set([c.fps for c in clips if hasattr(c,'fps')]))
if len(fps_list)==1:
result.fps= fps_list[0]
return result
concatenate = deprecated_version_of(concatenate_videoclips, "concatenate_videoclips")
| mit |
andrewsmedina/django | tests/foreign_object/models.py | 8 | 5128 | import datetime
from django.db import models
from django.db.models.fields.related import ReverseSingleRelatedObjectDescriptor
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import get_language
class Country(models.Model):
# Table Column Fields
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Person(models.Model):
# Table Column Fields
name = models.CharField(max_length=128)
person_country_id = models.IntegerField()
# Relation Fields
person_country = models.ForeignObject(
Country, from_fields=['person_country_id'], to_fields=['id'])
friends = models.ManyToManyField('self', through='Friendship', symmetrical=False)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Group(models.Model):
# Table Column Fields
name = models.CharField(max_length=128)
group_country = models.ForeignKey(Country)
members = models.ManyToManyField(Person, related_name='groups', through='Membership')
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Membership(models.Model):
# Table Column Fields
membership_country = models.ForeignKey(Country)
date_joined = models.DateTimeField(default=datetime.datetime.now)
invite_reason = models.CharField(max_length=64, null=True)
person_id = models.IntegerField()
group_id = models.IntegerField()
# Relation Fields
person = models.ForeignObject(Person,
from_fields=['membership_country', 'person_id'],
to_fields=['person_country_id', 'id'])
group = models.ForeignObject(Group,
from_fields=['membership_country', 'group_id'],
to_fields=['group_country', 'id'])
class Meta:
ordering = ('date_joined', 'invite_reason')
def __unicode__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
class Friendship(models.Model):
# Table Column Fields
from_friend_country = models.ForeignKey(Country, related_name="from_friend_country")
from_friend_id = models.IntegerField()
to_friend_country_id = models.IntegerField()
to_friend_id = models.IntegerField()
# Relation Fields
from_friend = models.ForeignObject(Person,
from_fields=['from_friend_country', 'from_friend_id'],
to_fields=['person_country_id', 'id'],
related_name='from_friend')
to_friend_country = models.ForeignObject(Country,
from_fields=['to_friend_country_id'],
to_fields=['id'],
related_name='to_friend_country')
to_friend = models.ForeignObject(Person,
from_fields=['to_friend_country_id', 'to_friend_id'],
to_fields=['person_country_id', 'id'],
related_name='to_friend')
class ArticleTranslationDescriptor(ReverseSingleRelatedObjectDescriptor):
"""
The set of articletranslation should not set any local fields.
"""
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.field.name)
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.related.get_cache_name(), instance)
class ColConstraint(object):
# Antyhing with as_sql() method works in get_extra_restriction().
def __init__(self, alias, col, value):
self.alias, self.col, self.value = alias, col, value
def as_sql(self, qn, connection):
return '%s.%s = %%s' % (qn(self.alias), qn(self.col)), [self.value]
class ActiveTranslationField(models.ForeignObject):
"""
This field will allow querying and fetching the currently active translation
for Article from ArticleTranslation.
"""
requires_unique_target = False
def get_extra_restriction(self, where_class, alias, related_alias):
return ColConstraint(alias, 'lang', get_language())
def get_extra_descriptor_filter(self):
return {'lang': get_language()}
def contribute_to_class(self, cls, name):
super(ActiveTranslationField, self).contribute_to_class(cls, name)
setattr(cls, self.name, ArticleTranslationDescriptor(self))
@python_2_unicode_compatible
class Article(models.Model):
active_translation = ActiveTranslationField(
'ArticleTranslation',
from_fields=['id'],
to_fields=['article'],
related_name='+',
null=True)
pub_date = models.DateField()
def __str__(self):
try:
return self.active_translation.title
except ArticleTranslation.DoesNotExist:
return '[No translation found]'
class ArticleTranslation(models.Model):
article = models.ForeignKey(Article)
lang = models.CharField(max_length='2')
title = models.CharField(max_length=100)
body = models.TextField()
abstract = models.CharField(max_length=400, null=True)
class Meta:
unique_together = ('article', 'lang')
ordering = ('active_translation__title',)
| bsd-3-clause |
LockScreen/Backend | venv/lib/python2.7/site-packages/boto/swf/layer2.py | 130 | 13056 | """Object-oriented interface to SWF wrapping boto.swf.layer1.Layer1"""
import time
from functools import wraps
from boto.swf.layer1 import Layer1
from boto.swf.layer1_decisions import Layer1Decisions
DEFAULT_CREDENTIALS = {
'aws_access_key_id': None,
'aws_secret_access_key': None
}
def set_default_credentials(aws_access_key_id, aws_secret_access_key):
"""Set default credentials."""
DEFAULT_CREDENTIALS.update({
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key,
})
class SWFBase(object):
name = None
domain = None
aws_access_key_id = None
aws_secret_access_key = None
region = None
def __init__(self, **kwargs):
# Set default credentials.
for credkey in ('aws_access_key_id', 'aws_secret_access_key'):
if DEFAULT_CREDENTIALS.get(credkey):
setattr(self, credkey, DEFAULT_CREDENTIALS[credkey])
# Override attributes with keyword args.
for kwarg in kwargs:
setattr(self, kwarg, kwargs[kwarg])
self._swf = Layer1(self.aws_access_key_id,
self.aws_secret_access_key,
region=self.region)
def __repr__(self):
rep_str = str(self.name)
if hasattr(self, 'version'):
rep_str += '-' + str(getattr(self, 'version'))
return '<%s %r at 0x%x>' % (self.__class__.__name__, rep_str, id(self))
class Domain(SWFBase):
"""Simple Workflow Domain."""
description = None
retention = 30
@wraps(Layer1.describe_domain)
def describe(self):
"""DescribeDomain."""
return self._swf.describe_domain(self.name)
@wraps(Layer1.deprecate_domain)
def deprecate(self):
"""DeprecateDomain"""
self._swf.deprecate_domain(self.name)
@wraps(Layer1.register_domain)
def register(self):
"""RegisterDomain."""
self._swf.register_domain(self.name, str(self.retention),
self.description)
@wraps(Layer1.list_activity_types)
def activities(self, status='REGISTERED', **kwargs):
"""ListActivityTypes."""
act_types = self._swf.list_activity_types(self.name, status, **kwargs)
act_objects = []
for act_args in act_types['typeInfos']:
act_ident = act_args['activityType']
del act_args['activityType']
act_args.update(act_ident)
act_args.update({
'aws_access_key_id': self.aws_access_key_id,
'aws_secret_access_key': self.aws_secret_access_key,
'domain': self.name,
'region': self.region,
})
act_objects.append(ActivityType(**act_args))
return act_objects
@wraps(Layer1.list_workflow_types)
def workflows(self, status='REGISTERED', **kwargs):
"""ListWorkflowTypes."""
wf_types = self._swf.list_workflow_types(self.name, status, **kwargs)
wf_objects = []
for wf_args in wf_types['typeInfos']:
wf_ident = wf_args['workflowType']
del wf_args['workflowType']
wf_args.update(wf_ident)
wf_args.update({
'aws_access_key_id': self.aws_access_key_id,
'aws_secret_access_key': self.aws_secret_access_key,
'domain': self.name,
'region': self.region,
})
wf_objects.append(WorkflowType(**wf_args))
return wf_objects
def executions(self, closed=False, **kwargs):
"""List list open/closed executions.
For a full list of available parameters refer to
:py:func:`boto.swf.layer1.Layer1.list_closed_workflow_executions` and
:py:func:`boto.swf.layer1.Layer1.list_open_workflow_executions`
"""
if closed:
executions = self._swf.list_closed_workflow_executions(self.name,
**kwargs)
else:
if 'oldest_date' not in kwargs:
# Last 24 hours.
kwargs['oldest_date'] = time.time() - (3600 * 24)
executions = self._swf.list_open_workflow_executions(self.name,
**kwargs)
exe_objects = []
for exe_args in executions['executionInfos']:
for nested_key in ('execution', 'workflowType'):
nested_dict = exe_args[nested_key]
del exe_args[nested_key]
exe_args.update(nested_dict)
exe_args.update({
'aws_access_key_id': self.aws_access_key_id,
'aws_secret_access_key': self.aws_secret_access_key,
'domain': self.name,
'region': self.region,
})
exe_objects.append(WorkflowExecution(**exe_args))
return exe_objects
@wraps(Layer1.count_pending_activity_tasks)
def count_pending_activity_tasks(self, task_list):
"""CountPendingActivityTasks."""
return self._swf.count_pending_activity_tasks(self.name, task_list)
@wraps(Layer1.count_pending_decision_tasks)
def count_pending_decision_tasks(self, task_list):
"""CountPendingDecisionTasks."""
return self._swf.count_pending_decision_tasks(self.name, task_list)
class Actor(SWFBase):
task_list = None
last_tasktoken = None
domain = None
def run(self):
"""To be overloaded by subclasses."""
raise NotImplementedError()
class ActivityWorker(Actor):
"""Base class for SimpleWorkflow activity workers."""
@wraps(Layer1.respond_activity_task_canceled)
def cancel(self, task_token=None, details=None):
"""RespondActivityTaskCanceled."""
if task_token is None:
task_token = self.last_tasktoken
return self._swf.respond_activity_task_canceled(task_token, details)
@wraps(Layer1.respond_activity_task_completed)
def complete(self, task_token=None, result=None):
"""RespondActivityTaskCompleted."""
if task_token is None:
task_token = self.last_tasktoken
return self._swf.respond_activity_task_completed(task_token, result)
@wraps(Layer1.respond_activity_task_failed)
def fail(self, task_token=None, details=None, reason=None):
"""RespondActivityTaskFailed."""
if task_token is None:
task_token = self.last_tasktoken
return self._swf.respond_activity_task_failed(task_token, details,
reason)
@wraps(Layer1.record_activity_task_heartbeat)
def heartbeat(self, task_token=None, details=None):
"""RecordActivityTaskHeartbeat."""
if task_token is None:
task_token = self.last_tasktoken
return self._swf.record_activity_task_heartbeat(task_token, details)
@wraps(Layer1.poll_for_activity_task)
def poll(self, **kwargs):
"""PollForActivityTask."""
task_list = self.task_list
if 'task_list' in kwargs:
task_list = kwargs.get('task_list')
del kwargs['task_list']
task = self._swf.poll_for_activity_task(self.domain, task_list,
**kwargs)
self.last_tasktoken = task.get('taskToken')
return task
class Decider(Actor):
"""Base class for SimpleWorkflow deciders."""
@wraps(Layer1.respond_decision_task_completed)
def complete(self, task_token=None, decisions=None, **kwargs):
"""RespondDecisionTaskCompleted."""
if isinstance(decisions, Layer1Decisions):
# Extract decision list from a Layer1Decisions instance.
decisions = decisions._data
if task_token is None:
task_token = self.last_tasktoken
return self._swf.respond_decision_task_completed(task_token, decisions,
**kwargs)
@wraps(Layer1.poll_for_decision_task)
def poll(self, **kwargs):
"""PollForDecisionTask."""
task_list = self.task_list
if 'task_list' in kwargs:
task_list = kwargs.get('task_list')
del kwargs['task_list']
decision_task = self._swf.poll_for_decision_task(self.domain, task_list,
**kwargs)
self.last_tasktoken = decision_task.get('taskToken')
return decision_task
class WorkflowType(SWFBase):
"""A versioned workflow type."""
version = None
task_list = None
child_policy = 'TERMINATE'
@wraps(Layer1.describe_workflow_type)
def describe(self):
"""DescribeWorkflowType."""
return self._swf.describe_workflow_type(self.domain, self.name,
self.version)
@wraps(Layer1.register_workflow_type)
def register(self, **kwargs):
"""RegisterWorkflowType."""
args = {
'default_execution_start_to_close_timeout': '3600',
'default_task_start_to_close_timeout': '300',
'default_child_policy': 'TERMINATE',
}
args.update(kwargs)
self._swf.register_workflow_type(self.domain, self.name, self.version,
**args)
@wraps(Layer1.deprecate_workflow_type)
def deprecate(self):
"""DeprecateWorkflowType."""
self._swf.deprecate_workflow_type(self.domain, self.name, self.version)
@wraps(Layer1.start_workflow_execution)
def start(self, **kwargs):
"""StartWorkflowExecution."""
if 'workflow_id' in kwargs:
workflow_id = kwargs['workflow_id']
del kwargs['workflow_id']
else:
workflow_id = '%s-%s-%i' % (self.name, self.version, time.time())
for def_attr in ('task_list', 'child_policy'):
kwargs[def_attr] = kwargs.get(def_attr, getattr(self, def_attr))
run_id = self._swf.start_workflow_execution(self.domain, workflow_id,
self.name, self.version, **kwargs)['runId']
return WorkflowExecution(name=self.name, version=self.version,
runId=run_id, domain=self.domain, workflowId=workflow_id,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
class WorkflowExecution(SWFBase):
"""An instance of a workflow."""
workflowId = None
runId = None
@wraps(Layer1.signal_workflow_execution)
def signal(self, signame, **kwargs):
"""SignalWorkflowExecution."""
self._swf.signal_workflow_execution(self.domain, signame,
self.workflowId, **kwargs)
@wraps(Layer1.terminate_workflow_execution)
def terminate(self, **kwargs):
"""TerminateWorkflowExecution (p. 103)."""
return self._swf.terminate_workflow_execution(self.domain,
self.workflowId, **kwargs)
@wraps(Layer1.get_workflow_execution_history)
def history(self, **kwargs):
"""GetWorkflowExecutionHistory."""
return self._swf.get_workflow_execution_history(self.domain, self.runId,
self.workflowId, **kwargs)['events']
@wraps(Layer1.describe_workflow_execution)
def describe(self):
"""DescribeWorkflowExecution."""
return self._swf.describe_workflow_execution(self.domain, self.runId,
self.workflowId)
@wraps(Layer1.request_cancel_workflow_execution)
def request_cancel(self):
"""RequestCancelWorkflowExecution."""
return self._swf.request_cancel_workflow_execution(self.domain,
self.workflowId, self.runId)
class ActivityType(SWFBase):
"""A versioned activity type."""
version = None
@wraps(Layer1.deprecate_activity_type)
def deprecate(self):
"""DeprecateActivityType."""
return self._swf.deprecate_activity_type(self.domain, self.name,
self.version)
@wraps(Layer1.describe_activity_type)
def describe(self):
"""DescribeActivityType."""
return self._swf.describe_activity_type(self.domain, self.name,
self.version)
@wraps(Layer1.register_activity_type)
def register(self, **kwargs):
"""RegisterActivityType."""
args = {
'default_task_heartbeat_timeout': '600',
'default_task_schedule_to_close_timeout': '3900',
'default_task_schedule_to_start_timeout': '300',
'default_task_start_to_close_timeout': '3600',
}
args.update(kwargs)
self._swf.register_activity_type(self.domain, self.name, self.version,
**args)
| mit |
yoer/hue | desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_encoding.py | 49 | 1231 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.utils import unittest
from django.utils.encoding import force_bytes, filepath_to_uri, python_2_unicode_compatible
from django.utils import six
class TestEncodingUtils(unittest.TestCase):
def test_force_bytes_exception(self):
"""
Test that force_bytes knows how to convert to bytes an exception
containing non-ASCII characters in its args.
"""
error_msg = "This is an exception, voilà"
exc = ValueError(error_msg)
result = force_bytes(exc)
self.assertEqual(result, error_msg.encode('utf-8'))
def test_filepath_to_uri(self):
self.assertEqual(filepath_to_uri('upload\\чубака.mp4'),
'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4')
self.assertEqual(filepath_to_uri('upload\\чубака.mp4'.encode('utf-8')),
'upload/%D1%87%D1%83%D0%B1%D0%B0%D0%BA%D0%B0.mp4')
@unittest.skipIf(six.PY3, "tests a class not defining __str__ under Python 2")
def test_decorated_class_without_str(self):
with self.assertRaises(ValueError):
@python_2_unicode_compatible
class NoStr(object):
pass
| apache-2.0 |
brettchien/LeetCode | 13_RomanToInteger.py | 1 | 1364 | class Solution:
# @param {string} s
# @return {integer}
def romanToInt(self, s):
ROMAN_NUM = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000}
result = 0
for idx in range(0, len(s) - 1):
if ROMAN_NUM[s[idx]] < ROMAN_NUM[s[idx + 1]]:
result -= ROMAN_NUM[s[idx]]
else:
result += ROMAN_NUM[s[idx]]
result += ROMAN_NUM[s[-1]]
return result
def romanToInt_1st(self, s):
ROMAN_NUM = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000}
ROMAN_SEQ = ["I", "V", "X", "L", "C", "D", "M"]
result = 0
prev = None
for d in reversed(s):
if prev:
idx = ROMAN_SEQ.index(d)
if ROMAN_SEQ.index(d) < ROMAN_SEQ.index(prev):
result -= ROMAN_NUM[d]
else:
result += ROMAN_NUM[d]
else:
result += ROMAN_NUM[d]
prev = d
return result
if __name__ == "__main__":
sol = Solution()
# roman: IXI
print sol.romanToInt("IX")
| mit |
angr/cle | cle/backends/java/jar.py | 1 | 3125 | import logging
from zipfile import ZipFile
from .. import register_backend
from .soot import Soot
l = logging.getLogger(name=__name__)
class Jar(Soot):
"""
Backend for lifting JARs to Soot.
"""
is_default = True # let CLE automatically use this backend
def __init__(self, jar_path, binary_stream, entry_point=None, entry_point_params=('java.lang.String[]',), jni_libs=None, jni_libs_ld_path=None, **kwargs):
"""
:param jar_path: Path to JAR.
The following parameters are optional
:param entry_point: Fully qualified name of method that should be used as the entry point.
If not specified, we try to parse it from the manifest.
:param additional_jars: Additional JARs.
:param additional_jar_roots: Additional JAR roots.
:param jni_libs: Name(s) of JNI libs to load (if any).
:param jni_libs_ld_path: Path(s) where to find libs defined by param jni_libs.
Note: Directory of the JAR is added by default.
"""
l.debug("Loading JAR from %s ...", jar_path)
if not entry_point:
# try to parse main class from manifest
self.manifest = self.get_manifest(jar_path)
main_class = self.manifest.get('Main-Class', None)
if main_class:
entry_point = main_class + "." + "main"
# the actual lifting is done by the Soot superclass
super().__init__(jar_path, binary_stream,
input_format='jar',
entry_point=entry_point,
entry_point_params=entry_point_params,
jni_libs=jni_libs,
jni_libs_ld_path=jni_libs_ld_path,
**kwargs)
@staticmethod
def is_compatible(stream):
# check if stream is an archive
if not Soot.is_zip_archive(stream):
return False
# get filelist
with ZipFile(stream) as jar:
filelist = jar.namelist()
# check for manifest and if a least one java class
# file is available
if 'META-INF/MANIFEST.MF' not in filelist:
return False
class_files = [f for f in filelist if f.endswith('.class')]
if len(class_files) == 0:
return False
return True
def get_manifest(self, binary_path=None):
"""
Load the MANIFEST.MF file
:return: A dict of meta info
:rtype: dict
"""
path = binary_path if binary_path else self.binary
with ZipFile(path) as jar:
manifest = jar.open('META-INF/MANIFEST.MF', "r")
data = {}
for line in manifest.readlines():
if b':' in line:
key, value = line.split(b':')
data[key.strip().decode('utf-8')] = value.strip().decode('utf-8')
return data
register_backend('jar', Jar)
| bsd-2-clause |
TRESCLOUD/odoo | addons/lunch/tests/__init__.py | 172 | 1107 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_lunch
checks = [
test_lunch,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
praekeltfoundation/ndoh-hub | eventstore/migrations/0021_covid19triage.py | 1 | 3634 | # Generated by Django 2.2.8 on 2020-04-02 14:45
import uuid
import django.contrib.postgres.fields.jsonb
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("eventstore", "0020_auto_20200302_1458")]
operations = [
migrations.CreateModel(
name="Covid19Triage",
fields=[
(
"id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("msisdn", models.CharField(max_length=255)),
("source", models.CharField(max_length=255)),
(
"province",
models.CharField(
choices=[
("ZA-GT", "Gauteng"),
("ZA-NW", "North-West (South Africa)"),
("ZA-LP", "Limpopo"),
("ZA-WC", "Western Cape"),
("ZA-MP", "Mpumalanga"),
("ZA-EC", "Eastern Cape"),
("ZA-NC", "Northern Cape"),
("ZA-FS", "Free State"),
("ZA-NL", "Kwazulu-Natal"),
],
max_length=6,
),
),
("city", models.CharField(max_length=255)),
(
"age",
models.CharField(
choices=[
("<18", "<18"),
("18-40", "18-40"),
("40-65", "40-65"),
(">65", ">65"),
],
max_length=5,
),
),
("fever", models.BooleanField()),
("cough", models.BooleanField()),
("sore_throat", models.BooleanField()),
(
"exposure",
models.CharField(
choices=[
("yes", "Yes"),
("no", "No"),
("not_sure", "Not sure"),
],
max_length=9,
),
),
(
"tracing",
models.BooleanField(
help_text="Whether the NDoH can contact the user"
),
),
(
"risk",
models.CharField(
choices=[
("low", "Low"),
("moderate", "Moderate"),
("high", "High"),
("critical", "Critical"),
],
max_length=8,
),
),
("timestamp", models.DateTimeField(default=django.utils.timezone.now)),
(
"created_by",
models.CharField(blank=True, default="", max_length=255),
),
(
"data",
django.contrib.postgres.fields.jsonb.JSONField(
blank=True, default=dict, null=True
),
),
],
)
]
| bsd-3-clause |
7kbird/chrome | tools/telemetry/telemetry/core/backends/chrome/inspector_page.py | 6 | 5579 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import sys
import time
from telemetry.core import util
class InspectorPage(object):
def __init__(self, inspector_backend, timeout=60):
self._inspector_backend = inspector_backend
self._inspector_backend.RegisterDomain(
'Page',
self._OnNotification,
self._OnClose)
self._navigation_pending = False
self._navigation_url = '' # Support for legacy backends.
self._navigation_frame_id = ''
self._navigated_frame_ids = None # Holds frame ids while navigating.
self._script_to_evaluate_on_commit = None
# Turn on notifications. We need them to get the Page.frameNavigated event.
self._EnablePageNotifications(timeout=timeout)
def _OnNotification(self, msg):
logging.debug('Notification: %s', json.dumps(msg, indent=2))
if msg['method'] == 'Page.frameNavigated':
url = msg['params']['frame']['url']
if not self._navigated_frame_ids == None:
frame_id = msg['params']['frame']['id']
if self._navigation_frame_id == frame_id:
self._navigation_frame_id = ''
self._navigated_frame_ids = None
self._navigation_pending = False
else:
self._navigated_frame_ids.add(frame_id)
elif self._navigation_url == url:
# TODO(tonyg): Remove this when Chrome 38 goes stable.
self._navigation_url = ''
self._navigation_pending = False
elif (not url == 'chrome://newtab/' and not url == 'about:blank'
and not 'parentId' in msg['params']['frame']):
# Marks the navigation as complete and unblocks the
# WaitForNavigate call.
self._navigation_pending = False
def _OnClose(self):
pass
def _SetScriptToEvaluateOnCommit(self, source):
existing_source = (self._script_to_evaluate_on_commit and
self._script_to_evaluate_on_commit['source'])
if source == existing_source:
return
if existing_source:
request = {
'method': 'Page.removeScriptToEvaluateOnLoad',
'params': {
'identifier': self._script_to_evaluate_on_commit['id'],
}
}
self._inspector_backend.SyncRequest(request)
self._script_to_evaluate_on_commit = None
if source:
request = {
'method': 'Page.addScriptToEvaluateOnLoad',
'params': {
'scriptSource': source,
}
}
res = self._inspector_backend.SyncRequest(request)
self._script_to_evaluate_on_commit = {
'id': res['result']['identifier'],
'source': source
}
def _EnablePageNotifications(self, timeout=60):
request = {
'method': 'Page.enable'
}
res = self._inspector_backend.SyncRequest(request, timeout)
assert len(res['result'].keys()) == 0
def WaitForNavigate(self, timeout=60):
"""Waits for the navigation to complete.
The current page is expect to be in a navigation. This function returns
when the navigation is complete or when the timeout has been exceeded.
"""
start_time = time.time()
remaining_time = timeout
self._navigation_pending = True
try:
while self._navigation_pending and remaining_time > 0:
remaining_time = max(timeout - (time.time() - start_time), 0.0)
self._inspector_backend.DispatchNotifications(remaining_time)
except util.TimeoutException:
# Since we pass remaining_time to DispatchNotifications, we need to
# list the full timeout time in this message.
raise util.TimeoutException('Timed out while waiting %ds for navigation. '
'Error=%s' % (timeout, sys.exc_info()[1]))
def Navigate(self, url, script_to_evaluate_on_commit=None, timeout=60):
"""Navigates to |url|.
If |script_to_evaluate_on_commit| is given, the script source string will be
evaluated when the navigation is committed. This is after the context of
the page exists, but before any script on the page itself has executed.
"""
self._SetScriptToEvaluateOnCommit(script_to_evaluate_on_commit)
request = {
'method': 'Page.navigate',
'params': {
'url': url,
}
}
self._navigated_frame_ids = set()
res = self._inspector_backend.SyncRequest(request, timeout)
if 'frameId' in res['result']:
# Modern backends are returning frameId from Page.navigate.
# Use it here to unblock upon precise navigation.
frame_id = res['result']['frameId']
if frame_id in self._navigated_frame_ids:
self._navigated_frame_ids = None
return
self._navigation_frame_id = frame_id
else:
# TODO(tonyg): Remove this when Chrome 38 goes stable.
self._navigated_frame_ids = None
self._navigation_url = url
self.WaitForNavigate(timeout)
def GetCookieByName(self, name, timeout=60):
"""Returns the value of the cookie by the given |name|."""
request = {
'method': 'Page.getCookies'
}
res = self._inspector_backend.SyncRequest(request, timeout)
cookies = res['result']['cookies']
for cookie in cookies:
if cookie['name'] == name:
return cookie['value']
return None
def CollectGarbage(self, timeout=60):
request = {
'method': 'HeapProfiler.CollectGarbage'
}
self._inspector_backend.SyncRequest(request, timeout)
| bsd-3-clause |
miconof/CouchPotatoServer | libs/enzyme/flv.py | 180 | 6375 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
from exceptions import ParseError
import core
import logging
import struct
__all__ = ['Parser']
# get logging object
log = logging.getLogger(__name__)
FLV_TAG_TYPE_AUDIO = 0x08
FLV_TAG_TYPE_VIDEO = 0x09
FLV_TAG_TYPE_META = 0x12
# audio flags
FLV_AUDIO_CHANNEL_MASK = 0x01
FLV_AUDIO_SAMPLERATE_MASK = 0x0c
FLV_AUDIO_CODECID_MASK = 0xf0
FLV_AUDIO_SAMPLERATE_OFFSET = 2
FLV_AUDIO_CODECID_OFFSET = 4
FLV_AUDIO_CODECID = (0x0001, 0x0002, 0x0055, 0x0001)
# video flags
FLV_VIDEO_CODECID_MASK = 0x0f
FLV_VIDEO_CODECID = ('FLV1', 'MSS1', 'VP60') # wild guess
FLV_DATA_TYPE_NUMBER = 0x00
FLV_DATA_TYPE_BOOL = 0x01
FLV_DATA_TYPE_STRING = 0x02
FLV_DATA_TYPE_OBJECT = 0x03
FLC_DATA_TYPE_CLIP = 0x04
FLV_DATA_TYPE_REFERENCE = 0x07
FLV_DATA_TYPE_ECMARRAY = 0x08
FLV_DATA_TYPE_ENDOBJECT = 0x09
FLV_DATA_TYPE_ARRAY = 0x0a
FLV_DATA_TYPE_DATE = 0x0b
FLV_DATA_TYPE_LONGSTRING = 0x0c
FLVINFO = {
'creator': 'copyright',
}
class FlashVideo(core.AVContainer):
"""
Experimental parser for Flash videos. It requires certain flags to
be set to report video resolutions and in most cases it does not
provide that information.
"""
table_mapping = { 'FLVINFO' : FLVINFO }
def __init__(self, file):
core.AVContainer.__init__(self)
self.mime = 'video/flv'
self.type = 'Flash Video'
data = file.read(13)
if len(data) < 13 or struct.unpack('>3sBBII', data)[0] != 'FLV':
raise ParseError()
for _ in range(10):
if self.audio and self.video:
break
data = file.read(11)
if len(data) < 11:
break
chunk = struct.unpack('>BH4BI', data)
size = (chunk[1] << 8) + chunk[2]
if chunk[0] == FLV_TAG_TYPE_AUDIO:
flags = ord(file.read(1))
if not self.audio:
a = core.AudioStream()
a.channels = (flags & FLV_AUDIO_CHANNEL_MASK) + 1
srate = (flags & FLV_AUDIO_SAMPLERATE_MASK)
a.samplerate = (44100 << (srate >> FLV_AUDIO_SAMPLERATE_OFFSET) >> 3)
codec = (flags & FLV_AUDIO_CODECID_MASK) >> FLV_AUDIO_CODECID_OFFSET
if codec < len(FLV_AUDIO_CODECID):
a.codec = FLV_AUDIO_CODECID[codec]
self.audio.append(a)
file.seek(size - 1, 1)
elif chunk[0] == FLV_TAG_TYPE_VIDEO:
flags = ord(file.read(1))
if not self.video:
v = core.VideoStream()
codec = (flags & FLV_VIDEO_CODECID_MASK) - 2
if codec < len(FLV_VIDEO_CODECID):
v.codec = FLV_VIDEO_CODECID[codec]
# width and height are in the meta packet, but I have
# no file with such a packet inside. So maybe we have
# to decode some parts of the video.
self.video.append(v)
file.seek(size - 1, 1)
elif chunk[0] == FLV_TAG_TYPE_META:
log.info(u'metadata %r', str(chunk))
metadata = file.read(size)
try:
while metadata:
length, value = self._parse_value(metadata)
if isinstance(value, dict):
log.info(u'metadata: %r', value)
if value.get('creator'):
self.copyright = value.get('creator')
if value.get('width'):
self.width = value.get('width')
if value.get('height'):
self.height = value.get('height')
if value.get('duration'):
self.length = value.get('duration')
self._appendtable('FLVINFO', value)
if not length:
# parse error
break
metadata = metadata[length:]
except (IndexError, struct.error, TypeError):
pass
else:
log.info(u'unkown %r', str(chunk))
file.seek(size, 1)
file.seek(4, 1)
def _parse_value(self, data):
"""
Parse the next metadata value.
"""
if ord(data[0]) == FLV_DATA_TYPE_NUMBER:
value = struct.unpack('>d', data[1:9])[0]
return 9, value
if ord(data[0]) == FLV_DATA_TYPE_BOOL:
return 2, bool(data[1])
if ord(data[0]) == FLV_DATA_TYPE_STRING:
length = (ord(data[1]) << 8) + ord(data[2])
return length + 3, data[3:length + 3]
if ord(data[0]) == FLV_DATA_TYPE_ECMARRAY:
init_length = len(data)
num = struct.unpack('>I', data[1:5])[0]
data = data[5:]
result = {}
for _ in range(num):
length = (ord(data[0]) << 8) + ord(data[1])
key = data[2:length + 2]
data = data[length + 2:]
length, value = self._parse_value(data)
if not length:
return 0, result
result[key] = value
data = data[length:]
return init_length - len(data), result
log.info(u'unknown code: %x. Stop metadata parser', ord(data[0]))
return 0, None
Parser = FlashVideo
| gpl-3.0 |
g12mcgov/home-assistant | tests/components/test_device_sun_light_trigger.py | 16 | 3719 | """
tests.test_component_device_sun_light_trigger
~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests device sun light trigger component.
"""
# pylint: disable=too-many-public-methods,protected-access
import os
import unittest
import homeassistant.loader as loader
from homeassistant.const import CONF_PLATFORM, STATE_HOME, STATE_NOT_HOME
from homeassistant.components import (
device_tracker, light, sun, device_sun_light_trigger)
from tests.common import (
get_test_config_dir, get_test_home_assistant, ensure_sun_risen,
ensure_sun_set)
KNOWN_DEV_PATH = None
def setUpModule(): # pylint: disable=invalid-name
""" Initalizes a Home Assistant server. """
global KNOWN_DEV_PATH
KNOWN_DEV_PATH = os.path.join(get_test_config_dir(),
device_tracker.CSV_DEVICES)
with open(KNOWN_DEV_PATH, 'w') as fil:
fil.write('device,name,track,picture\n')
fil.write('DEV1,device 1,1,http://example.com/dev1.jpg\n')
fil.write('DEV2,device 2,1,http://example.com/dev2.jpg\n')
def tearDownModule(): # pylint: disable=invalid-name
""" Stops the Home Assistant server. """
os.remove(os.path.join(get_test_config_dir(),
device_tracker.YAML_DEVICES))
class TestDeviceSunLightTrigger(unittest.TestCase):
""" Test the device sun light trigger module. """
def setUp(self): # pylint: disable=invalid-name
self.hass = get_test_home_assistant()
self.scanner = loader.get_component(
'device_tracker.test').get_scanner(None, None)
self.scanner.reset()
self.scanner.come_home('DEV1')
loader.get_component('light.test').init()
self.assertTrue(device_tracker.setup(self.hass, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}
}))
self.assertTrue(light.setup(self.hass, {
light.DOMAIN: {CONF_PLATFORM: 'test'}
}))
self.assertTrue(sun.setup(
self.hass, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}))
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_lights_on_when_sun_sets(self):
""" Test lights go on when there is someone home and the sun sets. """
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
ensure_sun_risen(self.hass)
light.turn_off(self.hass)
self.hass.pool.block_till_done()
ensure_sun_set(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(light.is_on(self.hass))
def test_lights_turn_off_when_everyone_leaves(self):
""" Test lights turn off when everyone leaves the house. """
light.turn_on(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
self.hass.states.set(device_tracker.ENTITY_ID_ALL_DEVICES,
STATE_NOT_HOME)
self.hass.pool.block_till_done()
self.assertFalse(light.is_on(self.hass))
def test_lights_turn_on_when_coming_home_after_sun_set(self):
""" Test lights turn on when coming home after sun set. """
light.turn_off(self.hass)
ensure_sun_set(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
self.hass.states.set(
device_tracker.ENTITY_ID_FORMAT.format('device_2'), STATE_HOME)
self.hass.pool.block_till_done()
self.assertTrue(light.is_on(self.hass))
| mit |
minrk/sympy | sympy/polys/tests/test_polyclasses.py | 4 | 11852 | """Tests for OO layer of several polynomial representations. """
from sympy.polys.polyclasses import (
DMP, init_normal_DMP,
DMF, init_normal_DMF,
ANP, init_normal_ANP,
)
from sympy.polys.domains import ZZ, QQ
from sympy.polys.specialpolys import f_4
from sympy.polys.polyerrors import (
ExactQuotientFailed,
)
from sympy.utilities.pytest import raises
def test_DMP___init__():
f = DMP([[0],[],[0,1,2],[3]], ZZ)
assert f.rep == [[1,2],[3]]
assert f.dom == ZZ
assert f.lev == 1
f = DMP([[1,2],[3]], ZZ, 1)
assert f.rep == [[1,2],[3]]
assert f.dom == ZZ
assert f.lev == 1
f = DMP({(1,1): 1, (0,0): 2}, ZZ, 1)
assert f.rep == [[1,0],[2]]
assert f.dom == ZZ
assert f.lev == 1
def test_DMP___eq__():
assert DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ) == \
DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ)
assert DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ) == \
DMP([[QQ(1),QQ(2)],[QQ(3)]], QQ)
assert DMP([[QQ(1),QQ(2)],[QQ(3)]], QQ) == \
DMP([[ZZ(1),ZZ(2)],[ZZ(3)]], ZZ)
assert DMP([[[ZZ(1)]]], ZZ) != DMP([[ZZ(1)]], ZZ)
assert DMP([[ZZ(1)]], ZZ) != DMP([[[ZZ(1)]]], ZZ)
def test_DMP___bool__():
assert bool(DMP([[]], ZZ)) == False
assert bool(DMP([[1]], ZZ)) == True
def test_DMP_to_dict():
f = DMP([[3],[],[2],[],[8]], ZZ)
assert f.to_dict() == \
{(4, 0): 3, (2, 0): 2, (0, 0): 8}
assert f.to_sympy_dict() == \
{(4, 0): ZZ.to_sympy(3), (2, 0): ZZ.to_sympy(2), (0, 0): ZZ.to_sympy(8)}
def test_DMP_properties():
assert DMP([[]], ZZ).is_zero == True
assert DMP([[1]], ZZ).is_zero == False
assert DMP([[1]], ZZ).is_one == True
assert DMP([[2]], ZZ).is_one == False
assert DMP([[1]], ZZ).is_ground == True
assert DMP([[1],[2],[1]], ZZ).is_ground == False
assert DMP([[1],[2,0],[1,0]], ZZ).is_sqf == True
assert DMP([[1],[2,0],[1,0,0]], ZZ).is_sqf == False
assert DMP([[1,2],[3]], ZZ).is_monic == True
assert DMP([[2,2],[3]], ZZ).is_monic == False
assert DMP([[1,2],[3]], ZZ).is_primitive == True
assert DMP([[2,4],[6]], ZZ).is_primitive == False
def test_DMP_arithmetics():
f = DMP([[2],[2,0]], ZZ)
assert f.mul_ground(2) == DMP([[4],[4,0]], ZZ)
assert f.quo_ground(2) == DMP([[1],[1,0]], ZZ)
raises(ExactQuotientFailed, 'f.exquo_ground(3)')
f = DMP([[-5]], ZZ)
g = DMP([[5]], ZZ)
assert f.abs() == g
assert abs(f) == g
assert g.neg() == f
assert -g == f
h = DMP([[]], ZZ)
assert f.add(g) == h
assert f + g == h
assert g + f == h
assert f + 5 == h
assert 5 + f == h
h = DMP([[-10]], ZZ)
assert f.sub(g) == h
assert f - g == h
assert g - f == -h
assert f - 5 == h
assert 5 - f == -h
h = DMP([[-25]], ZZ)
assert f.mul(g) == h
assert f * g == h
assert g * f == h
assert f * 5 == h
assert 5 * f == h
h = DMP([[25]], ZZ)
assert f.sqr() == h
assert f.pow(2) == h
assert f**2 == h
raises(TypeError, "f.pow('x')")
f = DMP([[1],[],[1,0,0]], ZZ)
g = DMP([[2],[-2,0]], ZZ)
q = DMP([[2],[2,0]], ZZ)
r = DMP([[8,0,0]], ZZ)
assert f.pdiv(g) == (q, r)
assert f.pquo(g) == q
assert f.prem(g) == r
raises(ExactQuotientFailed, 'f.pexquo(g)')
f = DMP([[1],[],[1,0,0]], ZZ)
g = DMP([[1],[-1,0]], ZZ)
q = DMP([[1],[1,0]], ZZ)
r = DMP([[2,0,0]], ZZ)
assert f.div(g) == (q, r)
assert f.quo(g) == q
assert f.rem(g) == r
assert divmod(f, g) == (q, r)
assert f // g == q
assert f % g == r
raises(ExactQuotientFailed, 'f.exquo(g)')
def test_DMP_functionality():
f = DMP([[1],[2,0],[1,0,0]], ZZ)
g = DMP([[1],[1,0]], ZZ)
h = DMP([[1]], ZZ)
assert f.degree() == 2
assert f.degree_list() == (2, 2)
assert f.total_degree() == 4
assert f.LC() == ZZ(1)
assert f.TC() == ZZ(0)
assert f.nth(1, 1) == ZZ(2)
raises(TypeError, "f.nth(0, 'x')")
assert f.max_norm() == 2
assert f.l1_norm() == 4
u = DMP([[2],[2,0]], ZZ)
assert f.diff(m=1, j=0) == u
assert f.diff(m=1, j=1) == u
raises(TypeError, "f.diff(m='x', j=0)")
u = DMP([1,2,1], ZZ)
v = DMP([1,2,1], ZZ)
assert f.eval(a=1, j=0) == u
assert f.eval(a=1, j=1) == v
assert f.eval(1).eval(1) == ZZ(4)
assert f.cofactors(g) == (g, g, h)
assert f.gcd(g) == g
assert f.lcm(g) == f
u = DMP([[QQ(45),QQ(30),QQ(5)]], QQ)
v = DMP([[QQ(1),QQ(2,3),QQ(1,9)]], QQ)
assert u.monic() == v
assert (4*f).content() == ZZ(4)
assert (4*f).primitive() == (ZZ(4), f)
f = DMP([[1],[2],[3],[4],[5],[6]], ZZ)
assert f.trunc(3) == DMP([[1],[-1],[],[1],[-1],[]], ZZ)
f = DMP(f_4, ZZ)
assert f.sqf_part() == -f
assert f.sqf_list() == (ZZ(-1), [(-f, 1)])
f = DMP([[-1],[],[],[5]], ZZ)
g = DMP([[3,1],[],[]], ZZ)
h = DMP([[45,30,5]], ZZ)
r = DMP([675,675,225,25], ZZ)
assert f.subresultants(g) == [f, g, h]
assert f.resultant(g) == r
f = DMP([1,3,9,-13], ZZ)
assert f.discriminant() == -11664
f = DMP([QQ(2),QQ(0)], QQ)
g = DMP([QQ(1),QQ(0),QQ(-16)], QQ)
s = DMP([QQ(1,32),QQ(0)], QQ)
t = DMP([QQ(-1,16)], QQ)
h = DMP([QQ(1)], QQ)
assert f.half_gcdex(g) == (s, h)
assert f.gcdex(g) == (s, t, h)
assert f.invert(g) == s
f = DMP([[1],[2],[3]], QQ)
raises(ValueError, "f.half_gcdex(f)")
raises(ValueError, "f.gcdex(f)")
raises(ValueError, "f.invert(f)")
f = DMP([1,0,20,0,150,0,500,0,625,-2,0,-10,9], ZZ)
g = DMP([1,0,0,-2,9], ZZ)
h = DMP([1,0,5,0], ZZ)
assert g.compose(h) == f
assert f.decompose() == [g, h]
f = DMP([[1],[2],[3]], QQ)
raises(ValueError, "f.decompose()")
raises(ValueError, "f.sturm()")
def test_DMP_exclude():
f = [[[[[[[[[[[[[[[[[[[[[[[[[[1]], [[]]]]]]]]]]]]]]]]]]]]]]]]]]
J = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 24, 25]
assert DMP(f, ZZ).exclude() == (J, DMP([1, 0], ZZ))
assert DMP([[1], [1, 0]], ZZ).exclude() == ([], DMP([[1], [1, 0]], ZZ))
def test_DMF__init__():
f = DMF(([[0],[],[0,1,2],[3]], [[1,2,3]]), ZZ)
assert f.num == [[1,2],[3]]
assert f.den == [[1,2,3]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1,2],[3]], [[1,2,3]]), ZZ, 1)
assert f.num == [[1,2],[3]]
assert f.den == [[1,2,3]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[-1],[-2]],[[3],[-4]]), ZZ)
assert f.num == [[-1],[-2]]
assert f.den == [[3],[-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1],[2]],[[-3],[4]]), ZZ)
assert f.num == [[-1],[-2]]
assert f.den == [[3],[-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1],[2]],[[-3],[4]]), ZZ)
assert f.num == [[-1],[-2]]
assert f.den == [[3],[-4]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[]],[[-3],[4]]), ZZ)
assert f.num == [[]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(17, ZZ, 1)
assert f.num == [[17]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[1],[2]]), ZZ)
assert f.num == [[1],[2]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF([[0],[],[0,1,2],[3]], ZZ)
assert f.num == [[1,2],[3]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF({(1,1): 1, (0,0): 2}, ZZ, 1)
assert f.num == [[1,0],[2]]
assert f.den == [[1]]
assert f.lev == 1
assert f.dom == ZZ
f = DMF(([[QQ(1)],[QQ(2)]], [[-QQ(3)],[QQ(4)]]), QQ)
assert f.num == [[-QQ(1)],[-QQ(2)]]
assert f.den == [[QQ(3)],[-QQ(4)]]
assert f.lev == 1
assert f.dom == QQ
f = DMF(([[QQ(1,5)],[QQ(2,5)]], [[-QQ(3,7)],[QQ(4,7)]]), QQ)
assert f.num == [[-QQ(7)],[-QQ(14)]]
assert f.den == [[QQ(15)],[-QQ(20)]]
assert f.lev == 1
assert f.dom == QQ
raises(ValueError, "DMF(([1], [[1]]), ZZ)")
raises(ZeroDivisionError, "DMF(([1], []), ZZ)")
def test_DMF__eq__():
pass
def test_DMF__bool__():
assert bool(DMF([[]], ZZ)) == False
assert bool(DMF([[1]], ZZ)) == True
def test_DMF_properties():
assert DMF([[]], ZZ).is_zero == True
assert DMF([[]], ZZ).is_one == False
assert DMF([[1]], ZZ).is_zero == False
assert DMF([[1]], ZZ).is_one == True
assert DMF(([[1]], [[2]]), ZZ).is_one == False
def test_DMF_arithmetics():
f = DMF([[7],[-9]], ZZ)
g = DMF([[-7],[9]], ZZ)
assert f.neg() == -f == g
f = DMF(([[1]], [[1],[]]), ZZ)
g = DMF(([[1]], [[1,0]]), ZZ)
h = DMF(([[1],[1,0]], [[1,0],[]]), ZZ)
assert f.add(g) == f + g == h
assert g.add(f) == g + f == h
h = DMF(([[-1],[1,0]], [[1,0],[]]), ZZ)
assert f.sub(g) == f - g == h
h = DMF(([[1]], [[1,0],[]]), ZZ)
assert f.mul(g) == f*g == h
assert g.mul(f) == g*f == h
h = DMF(([[1,0]], [[1],[]]), ZZ)
assert f.quo(g) == f/g == h
h = DMF(([[1]], [[1],[],[],[]]), ZZ)
assert f.pow(3) == f**3 == h
h = DMF(([[1]], [[1,0,0,0]]), ZZ)
assert g.pow(3) == g**3 == h
def test_ANP___init__():
rep = [QQ(1),QQ(1)]
mod = [QQ(1),QQ(0),QQ(1)]
f = ANP(rep, mod, QQ)
assert f.rep == [QQ(1),QQ(1)]
assert f.mod == [QQ(1),QQ(0),QQ(1)]
assert f.dom == QQ
rep = {1: QQ(1), 0: QQ(1)}
mod = {2: QQ(1), 0: QQ(1)}
f = ANP(rep, mod, QQ)
assert f.rep == [QQ(1),QQ(1)]
assert f.mod == [QQ(1),QQ(0),QQ(1)]
assert f.dom == QQ
f = ANP(1, mod, QQ)
assert f.rep == [QQ(1)]
assert f.mod == [QQ(1),QQ(0),QQ(1)]
assert f.dom == QQ
def test_ANP___eq__():
a = ANP([QQ(1), QQ(1)], [QQ(1),QQ(0),QQ(1)], QQ)
b = ANP([QQ(1), QQ(1)], [QQ(1),QQ(0),QQ(2)], QQ)
assert (a == a) == True
assert (a != a) == False
assert (a == b) == False
assert (a != b) == True
b = ANP([QQ(1), QQ(2)], [QQ(1),QQ(0),QQ(1)], QQ)
assert (a == b) == False
assert (a != b) == True
def test_ANP___bool__():
assert bool(ANP([], [QQ(1),QQ(0),QQ(1)], QQ)) == False
assert bool(ANP([QQ(1)], [QQ(1),QQ(0),QQ(1)], QQ)) == True
def test_ANP_properties():
mod = [QQ(1),QQ(0),QQ(1)]
assert ANP([QQ(0)], mod, QQ).is_zero == True
assert ANP([QQ(1)], mod, QQ).is_zero == False
assert ANP([QQ(1)], mod, QQ).is_one == True
assert ANP([QQ(2)], mod, QQ).is_one == False
def test_ANP_arithmetics():
mod = [QQ(1),QQ(0),QQ(0),QQ(-2)]
a = ANP([QQ(2),QQ(-1),QQ(1)], mod, QQ)
b = ANP([QQ(1),QQ(2)], mod, QQ)
c = ANP([QQ(-2), QQ(1), QQ(-1)], mod, QQ)
assert a.neg() == -a == c
c = ANP([QQ(2), QQ(0), QQ(3)], mod, QQ)
assert a.add(b) == a+b == c
assert b.add(a) == b+a == c
c = ANP([QQ(2), QQ(-2), QQ(-1)], mod, QQ)
assert a.sub(b) == a-b == c
c = ANP([QQ(-2), QQ(2), QQ(1)], mod, QQ)
assert b.sub(a) == b-a == c
c = ANP([QQ(3), QQ(-1), QQ(6)], mod, QQ)
assert a.mul(b) == a*b == c
assert b.mul(a) == b*a == c
c = ANP([QQ(-1,43), QQ(9,43), QQ(5,43)], mod, QQ)
assert a.pow(0) == a**(0) == ANP(1, mod, QQ)
assert a.pow(1) == a**(1) == a
assert a.pow(-1) == a**(-1) == c
assert a.quo(a) == a.mul(a.pow(-1)) == a*a**(-1) == ANP(1, mod, QQ)
def test___hash__():
# Issue 2472
# Make sure int vs. long doesn't affect hashing with Python ground types
assert DMP([[1, 2], [3]], ZZ) == DMP([[1l, 2l], [3l]], ZZ)
assert hash(DMP([[1, 2], [3]], ZZ)) == hash(DMP([[1l, 2l], [3l]], ZZ))
assert DMF(([[1, 2], [3]], [[1]]), ZZ) == DMF(([[1L, 2L], [3L]], [[1L]]), ZZ)
assert hash(DMF(([[1, 2], [3]], [[1]]), ZZ)) == hash(DMF(([[1L, 2L], [3L]], [[1L]]), ZZ))
assert ANP([1, 1], [1, 0, 1], ZZ) == ANP([1l, 1l], [1l, 0l, 1l], ZZ)
assert hash(ANP([1, 1], [1, 0, 1], ZZ)) == hash(ANP([1l, 1l], [1l, 0l, 1l], ZZ))
| bsd-3-clause |
workhorsy/identify_playstation1_games | iso9660.py | 2 | 11915 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2015, Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Copyright (C) 2013-2014 Barnaby Gale
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies of the Software and its documentation and acknowledgment shall be
# given in the documentation and software packages that this Software was
# used.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys
import urllib
import struct
import datetime
PY2 = (sys.version_info[0] == 2)
if PY2:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
else:
from io import BytesIO
SECTOR_SIZE = 2048
class ISO9660IOError(IOError):
def __init__(self, path):
self.path = path
def __str__(self):
return "Path not found: {0}".format(self.path)
class ISO9660(object):
def __init__(self, url):
self._buff = None #input buffer
self._root = None #root node
self._pvd = {} #primary volume descriptor
self._paths = [] #path table
self._url = url
if not hasattr(self, '_get_sector'): #it might have been set by a subclass
self._get_sector = self._get_sector_url if url.startswith('http') else self._get_sector_file
### Volume Descriptors
sector = 0x10
while True:
self._get_sector(sector, SECTOR_SIZE)
sector += 1
ty = self._unpack('B')
if ty == 1:
self._unpack_pvd()
elif ty == 255:
break
else:
continue
### Path table
l0 = self._pvd['path_table_size']
self._get_sector(self._pvd['path_table_l_loc'], l0)
while l0 > 0:
p = {}
l1 = self._unpack('B')
l2 = self._unpack('B')
p['ex_loc'] = self._unpack('<I')
p['parent'] = self._unpack('<H')
p['name'] = self._unpack_string(l1)
if p['name'] == b'\x00':
p['name'] = b''
if l1%2 == 1:
self._unpack('B')
self._paths.append(p)
l0 -= 8 + l1 + (l1 % 2)
assert l0 == 0
##
## Generator listing available files/folders
##
def tree(self, get_files = True):
if get_files:
gen = self._tree_node(self._root)
else:
gen = self._tree_path(b'', 1)
yield b'/'
for i in gen:
yield i
def _tree_path(self, name, index):
spacer = lambda s: name + b"/" + s
for i, c in enumerate(self._paths):
if c['parent'] == index and i != 0:
yield spacer(c['name'])
for d in self._tree_path(spacer(c['name']), i+1):
yield d
def _tree_node(self, node):
spacer = lambda s: node['name'] + b"/" + s
for c in list(self._unpack_dir_children(node)):
yield spacer(c['name'])
if c['flags'] & 2:
for d in self._tree_node(c):
yield spacer(d)
##
## Retrieve file contents as a string
##
def get_file(self, path):
path = path.upper().strip(b'/').split(b'/')
path, filename = path[:-1], path[-1]
if len(path)==0:
parent_dir = self._root
else:
try:
parent_dir = self._dir_record_by_table(path)
except ISO9660IOError:
parent_dir = self._dir_record_by_root(path)
f = self._search_dir_children(parent_dir, filename)
self._get_sector(f['ex_loc'], f['ex_len'])
return self._unpack_raw(f['ex_len'])
##
## Methods for retrieving partial contents
##
def _get_sector_url(self, sector, length):
start = sector * SECTOR_SIZE
if self._buff:
self._buff.close()
opener = urllib.FancyURLopener()
opener.http_error_206 = lambda *a, **k: None
opener.addheader(b"Range", b"bytes={0}-{1}".format(start, start+length-1))
self._buff = opener.open(self._url)
def _get_sector_file(self, sector, length):
with open(self._url, 'rb') as f:
f.seek(sector*SECTOR_SIZE)
self._buff = BytesIO(f.read(length))
##
## Return the record for final directory in a path
##
def _dir_record_by_table(self, path):
for e in self._paths[::-1]:
search = list(path)
f = e
while f['name'] == search[-1]:
search.pop()
f = self._paths[f['parent']-1]
if f['parent'] == 1:
e['ex_len'] = SECTOR_SIZE #TODO
return e
raise ISO9660IOError(path)
def _dir_record_by_root(self, path):
current = self._root
remaining = list(path)
while remaining:
current = self._search_dir_children(current, remaining[0])
remaining.pop(0)
return current
##
## Unpack the Primary Volume Descriptor
##
def _unpack_pvd(self):
self._pvd['type_code'] = self._unpack_string(5)
self._pvd['standard_identifier'] = self._unpack('B')
self._unpack_raw(1) #discard 1 byte
self._pvd['system_identifier'] = self._unpack_string(32)
self._pvd['volume_identifier'] = self._unpack_string(32)
self._unpack_raw(8) #discard 8 bytes
self._pvd['volume_space_size'] = self._unpack_both('i')
self._unpack_raw(32) #discard 32 bytes
self._pvd['volume_set_size'] = self._unpack_both('h')
self._pvd['volume_seq_num'] = self._unpack_both('h')
self._pvd['logical_block_size'] = self._unpack_both('h')
self._pvd['path_table_size'] = self._unpack_both('i')
self._pvd['path_table_l_loc'] = self._unpack('<i')
self._pvd['path_table_opt_l_loc'] = self._unpack('<i')
self._pvd['path_table_m_loc'] = self._unpack('>i')
self._pvd['path_table_opt_m_loc'] = self._unpack('>i')
_, self._root = self._unpack_record() #root directory record
self._pvd['volume_set_identifer'] = self._unpack_string(128)
self._pvd['publisher_identifier'] = self._unpack_string(128)
self._pvd['data_preparer_identifier'] = self._unpack_string(128)
self._pvd['application_identifier'] = self._unpack_string(128)
self._pvd['copyright_file_identifier'] = self._unpack_string(38)
self._pvd['abstract_file_identifier'] = self._unpack_string(36)
self._pvd['bibliographic_file_identifier'] = self._unpack_string(37)
self._pvd['volume_datetime_created'] = self._unpack_vd_datetime()
self._pvd['volume_datetime_modified'] = self._unpack_vd_datetime()
self._pvd['volume_datetime_expires'] = self._unpack_vd_datetime()
self._pvd['volume_datetime_effective'] = self._unpack_vd_datetime()
self._pvd['file_structure_version'] = self._unpack('B')
##
## Unpack a directory record (a listing of a file or folder)
##
def _unpack_record(self, read=0):
l0 = self._unpack('B')
if l0 == 0:
return read+1, None
l1 = self._unpack('B')
d = dict()
d['ex_loc'] = self._unpack_both('I')
d['ex_len'] = self._unpack_both('I')
d['datetime'] = self._unpack_dir_datetime()
d['flags'] = self._unpack('B')
d['interleave_unit_size'] = self._unpack('B')
d['interleave_gap_size'] = self._unpack('B')
d['volume_sequence'] = self._unpack_both('h')
l2 = self._unpack('B')
d['name'] = self._unpack_string(l2).split(b';')[0]
if d['name'] == b'\x00':
d['name'] = b''
if l2 % 2 == 0:
self._unpack('B')
t = 34 + l2 - (l2 % 2)
e = l0-t
if e>0:
extra = self._unpack_raw(e)
return read+l0, d
#Assuming d is a directory record, this generator yields its children
def _unpack_dir_children(self, d):
sector = d['ex_loc']
read = 0
self._get_sector(sector, 2048)
read, r_self = self._unpack_record(read)
read, r_parent = self._unpack_record(read)
while read < r_self['ex_len']: #Iterate over files in the directory
if read % 2048 == 0:
sector += 1
self._get_sector(sector, 2048)
read, data = self._unpack_record(read)
if data == None: #end of directory listing
to_read = 2048 - (read % 2048)
self._unpack_raw(to_read)
read += to_read
else:
yield data
#Search for one child amongst the children
def _search_dir_children(self, d, term):
for e in self._unpack_dir_children(d):
if e['name'] == term:
return e
raise ISO9660IOError(term)
##
## Datatypes
##
def _unpack_raw(self, l):
return self._buff.read(l)
#both-endian
def _unpack_both(self, st):
a = self._unpack('<'+st)
b = self._unpack('>'+st)
assert a == b
return a
def _unpack_string(self, l):
return self._buff.read(l).rstrip(b' ')
def _unpack(self, st):
if st[0] not in ('<','>'):
st = '<' + st
d = struct.unpack(st, self._buff.read(struct.calcsize(st)))
if len(st) == 2:
return d[0]
else:
return d
def _unpack_vd_datetime(self):
return self._unpack_raw(17) #TODO
def _unpack_dir_datetime(self):
epoch = datetime.datetime(1970, 1, 1)
date = self._unpack_raw(7)
t = []
date_sub = date[:-1]
for i in range(len(date_sub)):
n = date_sub[i : i + 1]
t.append(struct.unpack('<B', n)[0])
t.append(struct.unpack('<b', date[-1 : ])[0])
t[0] += 1900
t_offset = t.pop(-1) * 15 * 60. # Offset from GMT in 15min intervals, converted to secs
t_timestamp = (datetime.datetime(*t) - epoch).total_seconds() - t_offset
t_datetime = datetime.datetime.fromtimestamp(t_timestamp)
t_readable = t_datetime.strftime('%Y-%m-%d %H:%M:%S')
return t_readable
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
print("usage: python iso9660.py isourl [path]")
else:
iso_path = sys.argv[1]
ret_path = sys.argv[2] if len(sys.argv) > 2 else None
cd = ISO9660(iso_path)
if ret_path:
sys.stdout.write(cd.get_file(ret_path))
else:
for path in cd.tree():
print(path)
| mit |
blacklin/kbengine | kbe/res/scripts/common/Lib/asyncio/windows_events.py | 63 | 23240 | """Selector and proactor event loops for Windows."""
import _winapi
import errno
import math
import socket
import struct
import weakref
from . import events
from . import base_subprocess
from . import futures
from . import proactor_events
from . import selector_events
from . import tasks
from . import windows_utils
from . import _overlapped
from .coroutines import coroutine
from .log import logger
__all__ = ['SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor',
'DefaultEventLoopPolicy',
]
NULL = 0
INFINITE = 0xffffffff
ERROR_CONNECTION_REFUSED = 1225
ERROR_CONNECTION_ABORTED = 1236
class _OverlappedFuture(futures.Future):
"""Subclass of Future which represents an overlapped operation.
Cancelling it will immediately cancel the overlapped operation.
"""
def __init__(self, ov, *, loop=None):
super().__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
self._ov = ov
def _repr_info(self):
info = super()._repr_info()
if self._ov is not None:
state = 'pending' if self._ov.pending else 'completed'
info.insert(1, 'overlapped=<%s, %#x>' % (state, self._ov.address))
return info
def _cancel_overlapped(self):
if self._ov is None:
return
try:
self._ov.cancel()
except OSError as exc:
context = {
'message': 'Cancelling an overlapped future failed',
'exception': exc,
'future': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
self._ov = None
def cancel(self):
self._cancel_overlapped()
return super().cancel()
def set_exception(self, exception):
super().set_exception(exception)
self._cancel_overlapped()
def set_result(self, result):
super().set_result(result)
self._ov = None
class _WaitHandleFuture(futures.Future):
"""Subclass of Future which represents a wait handle."""
def __init__(self, iocp, ov, handle, wait_handle, *, loop=None):
super().__init__(loop=loop)
if self._source_traceback:
del self._source_traceback[-1]
# iocp and ov are only used by cancel() to notify IocpProactor
# that the wait was cancelled
self._iocp = iocp
self._ov = ov
self._handle = handle
self._wait_handle = wait_handle
def _poll(self):
# non-blocking wait: use a timeout of 0 millisecond
return (_winapi.WaitForSingleObject(self._handle, 0) ==
_winapi.WAIT_OBJECT_0)
def _repr_info(self):
info = super()._repr_info()
info.insert(1, 'handle=%#x' % self._handle)
if self._wait_handle:
state = 'signaled' if self._poll() else 'waiting'
info.insert(1, 'wait_handle=<%s, %#x>'
% (state, self._wait_handle))
return info
def _unregister_wait(self):
if self._wait_handle is None:
return
try:
_overlapped.UnregisterWait(self._wait_handle)
except OSError as exc:
# ERROR_IO_PENDING is not an error, the wait was unregistered
if exc.winerror != _overlapped.ERROR_IO_PENDING:
context = {
'message': 'Failed to unregister the wait handle',
'exception': exc,
'future': self,
}
if self._source_traceback:
context['source_traceback'] = self._source_traceback
self._loop.call_exception_handler(context)
self._wait_handle = None
self._iocp = None
self._ov = None
def cancel(self):
result = super().cancel()
if self._ov is not None:
# signal the cancellation to the overlapped object
_overlapped.PostQueuedCompletionStatus(self._iocp, True,
0, self._ov.address)
self._unregister_wait()
return result
def set_exception(self, exception):
super().set_exception(exception)
self._unregister_wait()
def set_result(self, result):
super().set_result(result)
self._unregister_wait()
class PipeServer(object):
"""Class representing a pipe server.
This is much like a bound, listening socket.
"""
def __init__(self, address):
self._address = address
self._free_instances = weakref.WeakSet()
# initialize the pipe attribute before calling _server_pipe_handle()
# because this function can raise an exception and the destructor calls
# the close() method
self._pipe = None
self._accept_pipe_future = None
self._pipe = self._server_pipe_handle(True)
def _get_unconnected_pipe(self):
# Create new instance and return previous one. This ensures
# that (until the server is closed) there is always at least
# one pipe handle for address. Therefore if a client attempt
# to connect it will not fail with FileNotFoundError.
tmp, self._pipe = self._pipe, self._server_pipe_handle(False)
return tmp
def _server_pipe_handle(self, first):
# Return a wrapper for a new pipe handle.
if self._address is None:
return None
flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
if first:
flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
h = _winapi.CreateNamedPipe(
self._address, flags,
_winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
_winapi.PIPE_WAIT,
_winapi.PIPE_UNLIMITED_INSTANCES,
windows_utils.BUFSIZE, windows_utils.BUFSIZE,
_winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
pipe = windows_utils.PipeHandle(h)
self._free_instances.add(pipe)
return pipe
def close(self):
if self._accept_pipe_future is not None:
self._accept_pipe_future.cancel()
self._accept_pipe_future = None
# Close all instances which have not been connected to by a client.
if self._address is not None:
for pipe in self._free_instances:
pipe.close()
self._pipe = None
self._address = None
self._free_instances.clear()
__del__ = close
class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""Windows version of selector event loop."""
def _socketpair(self):
return windows_utils.socketpair()
class ProactorEventLoop(proactor_events.BaseProactorEventLoop):
"""Windows version of proactor event loop using IOCP."""
def __init__(self, proactor=None):
if proactor is None:
proactor = IocpProactor()
super().__init__(proactor)
def _socketpair(self):
return windows_utils.socketpair()
@coroutine
def create_pipe_connection(self, protocol_factory, address):
f = self._proactor.connect_pipe(address)
pipe = yield from f
protocol = protocol_factory()
trans = self._make_duplex_pipe_transport(pipe, protocol,
extra={'addr': address})
return trans, protocol
@coroutine
def start_serving_pipe(self, protocol_factory, address):
server = PipeServer(address)
def loop_accept_pipe(f=None):
pipe = None
try:
if f:
pipe = f.result()
server._free_instances.discard(pipe)
protocol = protocol_factory()
self._make_duplex_pipe_transport(
pipe, protocol, extra={'addr': address})
pipe = server._get_unconnected_pipe()
if pipe is None:
return
f = self._proactor.accept_pipe(pipe)
except OSError as exc:
if pipe and pipe.fileno() != -1:
self.call_exception_handler({
'message': 'Pipe accept failed',
'exception': exc,
'pipe': pipe,
})
pipe.close()
elif self._debug:
logger.warning("Accept pipe failed on pipe %r",
pipe, exc_info=True)
except futures.CancelledError:
if pipe:
pipe.close()
else:
server._accept_pipe_future = f
f.add_done_callback(loop_accept_pipe)
self.call_soon(loop_accept_pipe)
return [server]
@coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
transp = _WindowsSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=extra, **kwargs)
yield from transp._post_init()
return transp
class IocpProactor:
"""Proactor implementation using IOCP."""
def __init__(self, concurrency=0xffffffff):
self._loop = None
self._results = []
self._iocp = _overlapped.CreateIoCompletionPort(
_overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency)
self._cache = {}
self._registered = weakref.WeakSet()
self._stopped_serving = weakref.WeakSet()
def __repr__(self):
return ('<%s overlapped#=%s result#=%s>'
% (self.__class__.__name__, len(self._cache),
len(self._results)))
def set_loop(self, loop):
self._loop = loop
def select(self, timeout=None):
if not self._results:
self._poll(timeout)
tmp = self._results
self._results = []
return tmp
def recv(self, conn, nbytes, flags=0):
self._register_with_iocp(conn)
ov = _overlapped.Overlapped(NULL)
if isinstance(conn, socket.socket):
ov.WSARecv(conn.fileno(), nbytes, flags)
else:
ov.ReadFile(conn.fileno(), nbytes)
def finish_recv(trans, key, ov):
try:
return ov.getresult()
except OSError as exc:
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
raise ConnectionResetError(*exc.args)
else:
raise
return self._register(ov, conn, finish_recv)
def send(self, conn, buf, flags=0):
self._register_with_iocp(conn)
ov = _overlapped.Overlapped(NULL)
if isinstance(conn, socket.socket):
ov.WSASend(conn.fileno(), buf, flags)
else:
ov.WriteFile(conn.fileno(), buf)
def finish_send(trans, key, ov):
try:
return ov.getresult()
except OSError as exc:
if exc.winerror == _overlapped.ERROR_NETNAME_DELETED:
raise ConnectionResetError(*exc.args)
else:
raise
return self._register(ov, conn, finish_send)
def accept(self, listener):
self._register_with_iocp(listener)
conn = self._get_accept_socket(listener.family)
ov = _overlapped.Overlapped(NULL)
ov.AcceptEx(listener.fileno(), conn.fileno())
def finish_accept(trans, key, ov):
ov.getresult()
# Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work.
buf = struct.pack('@P', listener.fileno())
conn.setsockopt(socket.SOL_SOCKET,
_overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf)
conn.settimeout(listener.gettimeout())
return conn, conn.getpeername()
@coroutine
def accept_coro(future, conn):
# Coroutine closing the accept socket if the future is cancelled
try:
yield from future
except futures.CancelledError:
conn.close()
raise
future = self._register(ov, listener, finish_accept)
coro = accept_coro(future, conn)
tasks.async(coro, loop=self._loop)
return future
def connect(self, conn, address):
self._register_with_iocp(conn)
# The socket needs to be locally bound before we call ConnectEx().
try:
_overlapped.BindLocal(conn.fileno(), conn.family)
except OSError as e:
if e.winerror != errno.WSAEINVAL:
raise
# Probably already locally bound; check using getsockname().
if conn.getsockname()[1] == 0:
raise
ov = _overlapped.Overlapped(NULL)
ov.ConnectEx(conn.fileno(), address)
def finish_connect(trans, key, ov):
ov.getresult()
# Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work.
conn.setsockopt(socket.SOL_SOCKET,
_overlapped.SO_UPDATE_CONNECT_CONTEXT, 0)
return conn
return self._register(ov, conn, finish_connect)
def accept_pipe(self, pipe):
self._register_with_iocp(pipe)
ov = _overlapped.Overlapped(NULL)
ov.ConnectNamedPipe(pipe.fileno())
def finish_accept_pipe(trans, key, ov):
ov.getresult()
return pipe
# FIXME: Tulip issue 196: why to we neeed register=False?
# See also the comment in the _register() method
return self._register(ov, pipe, finish_accept_pipe,
register=False)
def connect_pipe(self, address):
ov = _overlapped.Overlapped(NULL)
ov.WaitNamedPipeAndConnect(address, self._iocp, ov.address)
def finish_connect_pipe(err, handle, ov):
# err, handle were arguments passed to PostQueuedCompletionStatus()
# in a function run in a thread pool.
if err == _overlapped.ERROR_SEM_TIMEOUT:
# Connection did not succeed within time limit.
msg = _overlapped.FormatMessage(err)
raise ConnectionRefusedError(0, msg, None, err)
elif err != 0:
msg = _overlapped.FormatMessage(err)
raise OSError(0, msg, None, err)
else:
return windows_utils.PipeHandle(handle)
return self._register(ov, None, finish_connect_pipe, wait_for_post=True)
def wait_for_handle(self, handle, timeout=None):
if timeout is None:
ms = _winapi.INFINITE
else:
# RegisterWaitForSingleObject() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
ms = math.ceil(timeout * 1e3)
# We only create ov so we can use ov.address as a key for the cache.
ov = _overlapped.Overlapped(NULL)
wh = _overlapped.RegisterWaitWithQueue(
handle, self._iocp, ov.address, ms)
f = _WaitHandleFuture(self._iocp, ov, handle, wh, loop=self._loop)
if f._source_traceback:
del f._source_traceback[-1]
def finish_wait_for_handle(trans, key, ov):
# Note that this second wait means that we should only use
# this with handles types where a successful wait has no
# effect. So events or processes are all right, but locks
# or semaphores are not. Also note if the handle is
# signalled and then quickly reset, then we may return
# False even though we have not timed out.
return f._poll()
if f._poll():
try:
result = f._poll()
except OSError as exc:
f.set_exception(exc)
else:
f.set_result(result)
self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle)
return f
def _register_with_iocp(self, obj):
# To get notifications of finished ops on this objects sent to the
# completion port, were must register the handle.
if obj not in self._registered:
self._registered.add(obj)
_overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0)
# XXX We could also use SetFileCompletionNotificationModes()
# to avoid sending notifications to completion port of ops
# that succeed immediately.
def _register(self, ov, obj, callback,
wait_for_post=False, register=True):
# Return a future which will be set with the result of the
# operation when it completes. The future's value is actually
# the value returned by callback().
f = _OverlappedFuture(ov, loop=self._loop)
if f._source_traceback:
del f._source_traceback[-1]
if not ov.pending and not wait_for_post:
# The operation has completed, so no need to postpone the
# work. We cannot take this short cut if we need the
# NumberOfBytes, CompletionKey values returned by
# PostQueuedCompletionStatus().
try:
value = callback(None, None, ov)
except OSError as e:
f.set_exception(e)
else:
f.set_result(value)
# Even if GetOverlappedResult() was called, we have to wait for the
# notification of the completion in GetQueuedCompletionStatus().
# Register the overlapped operation to keep a reference to the
# OVERLAPPED object, otherwise the memory is freed and Windows may
# read uninitialized memory.
#
# For an unknown reason, ConnectNamedPipe() behaves differently:
# the completion is not notified by GetOverlappedResult() if we
# already called GetOverlappedResult(). For this specific case, we
# don't expect notification (register is set to False).
else:
register = True
if register:
# Register the overlapped operation for later. Note that
# we only store obj to prevent it from being garbage
# collected too early.
self._cache[ov.address] = (f, ov, obj, callback)
return f
def _get_accept_socket(self, family):
s = socket.socket(family)
s.settimeout(0)
return s
def _poll(self, timeout=None):
if timeout is None:
ms = INFINITE
elif timeout < 0:
raise ValueError("negative timeout")
else:
# GetQueuedCompletionStatus() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
ms = math.ceil(timeout * 1e3)
if ms >= INFINITE:
raise ValueError("timeout too big")
while True:
status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms)
if status is None:
return
ms = 0
err, transferred, key, address = status
try:
f, ov, obj, callback = self._cache.pop(address)
except KeyError:
if self._loop.get_debug():
self._loop.call_exception_handler({
'message': ('GetQueuedCompletionStatus() returned an '
'unexpected event'),
'status': ('err=%s transferred=%s key=%#x address=%#x'
% (err, transferred, key, address)),
})
# key is either zero, or it is used to return a pipe
# handle which should be closed to avoid a leak.
if key not in (0, _overlapped.INVALID_HANDLE_VALUE):
_winapi.CloseHandle(key)
continue
if obj in self._stopped_serving:
f.cancel()
# Don't call the callback if _register() already read the result or
# if the overlapped has been cancelled
elif not f.done():
try:
value = callback(transferred, key, ov)
except OSError as e:
f.set_exception(e)
self._results.append(f)
else:
f.set_result(value)
self._results.append(f)
def _stop_serving(self, obj):
# obj is a socket or pipe handle. It will be closed in
# BaseProactorEventLoop._stop_serving() which will make any
# pending operations fail quickly.
self._stopped_serving.add(obj)
def close(self):
# Cancel remaining registered operations.
for address, (fut, ov, obj, callback) in list(self._cache.items()):
if obj is None:
# The operation was started with connect_pipe() which
# queues a task to Windows' thread pool. This cannot
# be cancelled, so just forget it.
del self._cache[address]
# FIXME: Tulip issue 196: remove this case, it should not happen
elif fut.done() and not fut.cancelled():
del self._cache[address]
else:
try:
fut.cancel()
except OSError as exc:
if self._loop is not None:
context = {
'message': 'Cancelling a future failed',
'exception': exc,
'future': fut,
}
if fut._source_traceback:
context['source_traceback'] = fut._source_traceback
self._loop.call_exception_handler(context)
while self._cache:
if not self._poll(1):
logger.debug('taking long time to close proactor')
self._results = []
if self._iocp is not None:
_winapi.CloseHandle(self._iocp)
self._iocp = None
def __del__(self):
self.close()
class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
self._proc = windows_utils.Popen(
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
bufsize=bufsize, **kwargs)
def callback(f):
returncode = self._proc.poll()
self._process_exited(returncode)
f = self._loop._proactor.wait_for_handle(int(self._proc._handle))
f.add_done_callback(callback)
SelectorEventLoop = _WindowsSelectorEventLoop
class _WindowsDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
_loop_factory = SelectorEventLoop
DefaultEventLoopPolicy = _WindowsDefaultEventLoopPolicy
| lgpl-3.0 |
jt6562/XX-Net | python27/1.0/lib/atexit.py | 73 | 1770 | """
atexit.py - allow programmer to define multiple exit functions to be executed
upon normal program termination.
One public function, register, is defined.
"""
__all__ = ["register"]
import sys
_exithandlers = []
def _run_exitfuncs():
"""run any registered exit functions
_exithandlers is traversed in reverse order so functions are executed
last in, first out.
"""
exc_info = None
while _exithandlers:
func, targs, kargs = _exithandlers.pop()
try:
func(*targs, **kargs)
except SystemExit:
exc_info = sys.exc_info()
except:
import traceback
print >> sys.stderr, "Error in atexit._run_exitfuncs:"
traceback.print_exc()
exc_info = sys.exc_info()
if exc_info is not None:
raise exc_info[0], exc_info[1], exc_info[2]
def register(func, *targs, **kargs):
"""register a function to be executed upon normal program termination
func - function to be called at exit
targs - optional arguments to pass to func
kargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator.
"""
_exithandlers.append((func, targs, kargs))
return func
if hasattr(sys, "exitfunc"):
# Assume it's another registered exit function - append it to our list
register(sys.exitfunc)
sys.exitfunc = _run_exitfuncs
if __name__ == "__main__":
def x1():
print "running x1"
def x2(n):
print "running x2(%r)" % (n,)
def x3(n, kwd=None):
print "running x3(%r, kwd=%r)" % (n, kwd)
register(x1)
register(x2, 12)
register(x3, 5, "bar")
register(x3, "no kwd args")
| bsd-2-clause |
tylertian/Openstack | openstack F/nova/nova/tests/hyperv/hypervutils.py | 7 | 8846 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Hyper-V classes to be used in testing.
"""
import sys
import time
from nova import exception
from nova.virt.hyperv import constants
from nova.virt.hyperv import volumeutils
from xml.etree import ElementTree
# Check needed for unit testing on Unix
if sys.platform == 'win32':
import wmi
class HyperVUtils(object):
def __init__(self):
self.__conn = None
self.__conn_v2 = None
self.__conn_cimv2 = None
self.__conn_wmi = None
self._volumeutils = volumeutils.VolumeUtils()
@property
def _conn(self):
if self.__conn is None:
self.__conn = wmi.WMI(moniker='//./root/virtualization')
return self.__conn
@property
def _conn_v2(self):
if self.__conn_v2 is None:
self.__conn_v2 = wmi.WMI(moniker='//./root/virtualization/v2')
return self.__conn_v2
@property
def _conn_cimv2(self):
if self.__conn_cimv2 is None:
self.__conn_cimv2 = wmi.WMI(moniker='//./root/cimv2')
return self.__conn_cimv2
@property
def _conn_wmi(self):
if self.__conn_wmi is None:
self.__conn_wmi = wmi.WMI(moniker='//./root/wmi')
return self.__conn_wmi
def create_vhd(self, path):
image_service = self._conn.query(
"Select * from Msvm_ImageManagementService")[0]
(job, ret_val) = image_service.CreateDynamicVirtualHardDisk(
Path=path, MaxInternalSize=3 * 1024 * 1024)
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
else:
success = (ret_val == 0)
if not success:
raise Exception('Failed to create Dynamic disk %s with error %d'
% (path, ret_val))
def _check_job_status(self, jobpath):
"""Poll WMI job state for completion"""
job_wmi_path = jobpath.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
while job.JobState == constants.WMI_JOB_STATE_RUNNING:
time.sleep(0.1)
job = wmi.WMI(moniker=job_wmi_path)
return job.JobState == constants.WMI_JOB_STATE_COMPLETED
def _get_vm(self, vm_name, conn=None):
if conn is None:
conn = self._conn
vml = conn.Msvm_ComputerSystem(ElementName=vm_name)
if not len(vml):
raise exception.InstanceNotFound(instance=vm_name)
return vml[0]
def remote_vm_exists(self, server, vm_name):
conn = wmi.WMI(moniker='//' + server + '/root/virtualization')
return self._vm_exists(conn, vm_name)
def vm_exists(self, vm_name):
return self._vm_exists(self._conn, vm_name)
def _vm_exists(self, conn, vm_name):
return len(conn.Msvm_ComputerSystem(ElementName=vm_name)) > 0
def _get_vm_summary(self, vm_name):
vm = self._get_vm(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
wmi_association_class='Msvm_SettingsDefineState',
wmi_result_class='Msvm_VirtualSystemSettingData')
settings_paths = [v.path_() for v in vmsettings]
return vs_man_svc.GetSummaryInformation([100, 105],
settings_paths)[1][0]
def get_vm_uptime(self, vm_name):
return self._get_vm_summary(vm_name).UpTime
def get_vm_state(self, vm_name):
return self._get_vm_summary(vm_name).EnabledState
def set_vm_state(self, vm_name, req_state):
self._set_vm_state(self._conn, vm_name, req_state)
def _set_vm_state(self, conn, vm_name, req_state):
vm = self._get_vm(vm_name, conn)
(job, ret_val) = vm.RequestStateChange(req_state)
success = False
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
elif ret_val == 0:
success = True
elif ret_val == 32775:
#Invalid state for current operation. Typically means it is
#already in the state requested
success = True
if not success:
raise Exception(_("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") % locals())
def get_vm_disks(self, vm_name):
return self._get_vm_disks(self._conn, vm_name)
def _get_vm_disks(self, conn, vm_name):
vm = self._get_vm(vm_name, conn)
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
disks = [r for r in rasds
if r.ResourceSubType == 'Microsoft Virtual Hard Disk']
disk_files = []
for disk in disks:
disk_files.extend([c for c in disk.Connection])
volumes = [r for r in rasds
if r.ResourceSubType == 'Microsoft Physical Disk Drive']
volume_drives = []
for volume in volumes:
hostResources = volume.HostResource
drive_path = hostResources[0]
volume_drives.append(drive_path)
return (disk_files, volume_drives)
def remove_remote_vm(self, server, vm_name):
conn = wmi.WMI(moniker='//' + server + '/root/virtualization')
conn_cimv2 = wmi.WMI(moniker='//' + server + '/root/cimv2')
self._remove_vm(vm_name, conn, conn_cimv2)
def remove_vm(self, vm_name):
self._remove_vm(vm_name, self._conn, self._conn_cimv2)
def _remove_vm(self, vm_name, conn, conn_cimv2):
vm = self._get_vm(vm_name, conn)
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
#Stop the VM first.
self._set_vm_state(conn, vm_name, 3)
(disk_files, volume_drives) = self._get_vm_disks(conn, vm_name)
(job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_())
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
elif ret_val == 0:
success = True
if not success:
raise Exception(_('Failed to destroy vm %s') % vm_name)
#Delete associated vhd disk files.
for disk in disk_files:
vhd_file = conn_cimv2.query(
"Select * from CIM_DataFile where Name = '" +
disk.replace("'", "''") + "'")[0]
vhd_file.Delete()
def _get_target_iqn(self, volume_id):
return 'iqn.2010-10.org.openstack:volume-' + volume_id
def logout_iscsi_volume_sessions(self, volume_id):
target_iqn = self._get_target_iqn(volume_id)
self._volumeutils.logout_storage_target(self._conn_wmi, target_iqn)
def iscsi_volume_sessions_exist(self, volume_id):
target_iqn = self._get_target_iqn(volume_id)
return len(self._conn_wmi.query(
"SELECT * FROM MSiSCSIInitiator_SessionClass \
WHERE TargetName='" + target_iqn + "'")) > 0
def get_vm_count(self):
return len(self._conn.query(
"Select * from Msvm_ComputerSystem where Description "
"<> 'Microsoft Hosting Computer System'"))
def get_vm_snapshots_count(self, vm_name):
return len(self._conn.query(
"Select * from Msvm_VirtualSystemSettingData where \
SettingType = 5 and SystemName = '" + vm_name + "'"))
def get_vhd_parent_path(self, vhd_path):
image_man_svc = self._conn.Msvm_ImageManagementService()[0]
(vhd_info, job_path, ret_val) = \
image_man_svc.GetVirtualHardDiskInfo(vhd_path)
if ret_val == constants.WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job_path)
else:
success = (ret_val == 0)
if not success:
raise Exception(_("Failed to get info for disk %s") %
(vhd_path))
base_disk_path = None
et = ElementTree.fromstring(vhd_info)
for item in et.findall("PROPERTY"):
if item.attrib["NAME"] == "ParentPath":
base_disk_path = item.find("VALUE").text
break
return base_disk_path
| apache-2.0 |
MediaKraken/MediaKraken_Deployment | source/testing/test_subprogram/test_subprogram_broadcast.py | 1 | 1712 | """
Copyright (C) 2016 Quinn D Granfor <spootdev@gmail.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
version 2, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License version 2 for more details.
You should have received a copy of the GNU General Public License
version 2 along with this program; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
"""
import os
import signal
import subprocess
import sys
from common import common_logging_elasticsearch_httpx
sys.path.append('.')
from common import common_network_mediakraken
class TestSubprogramBroadcast:
"""
Test broadcast
"""
def __init__(self):
"""
Class init
"""
self.proc_broadcast = None
@classmethod
def setup_class(self):
# fire up broadcast server
self.proc_broadcast = subprocess.Popen(['python3', './subprogram_broadcast.py'],
shell=False)
common_logging_elasticsearch_httpx.com_es_httpx_post(message_type='info', message_text=
{'stuff': "PID: %s" % self.proc_broadcast.pid})
@classmethod
def teardown_class(self):
os.kill(self.proc_broadcast.pid, signal.SIGTERM)
def test_sub_broadcast(self):
"""
Test function
"""
common_network_mediakraken.com_net_mediakraken_find_server()
| gpl-3.0 |
kyunghyuncho/GroundHog | groundhog/trainer/SGD_adadelta.py | 9 | 8035 | """
Stochastic Gradient Descent.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import time
import logging
import theano
import theano.tensor as TT
from theano.sandbox.scan import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog.utils import print_time, print_mem, const
logger = logging.getLogger(__name__)
class SGD(object):
def __init__(self,
model,
state,
data):
"""
Parameters:
:param model:
Class describing the model used. It should provide the
computational graph to evaluate the model, and have a
similar structure to classes on the models folder
:param state:
Dictionary containing the current state of your job. This
includes configuration of the job, specifically the seed,
the startign damping factor, batch size, etc. See main.py
for details
:param data:
Class describing the dataset used by the model
"""
if 'adarho' not in state:
state['adarho'] = 0.96
if 'adaeps' not in state:
state['adaeps'] = 1e-6
#####################################
# Step 0. Constructs shared variables
#####################################
bs = state['bs']
self.model = model
self.rng = numpy.random.RandomState(state['seed'])
srng = RandomStreams(self.rng.randint(213))
self.gs = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name)
for p in model.params]
self.gnorm2 = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name+'_g2')
for p in model.params]
self.dnorm2 = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape,
dtype=theano.config.floatX),
name=p.name+'_d2')
for p in model.params]
self.step = 0
self.bs = bs
self.state = state
self.data = data
self.step_timer = time.time()
self.gdata = [theano.shared(numpy.zeros( (2,)*x.ndim,
dtype=x.dtype),
name=x.name) for x in model.inputs]
if 'profile' not in self.state:
self.state['profile'] = 0
###################################
# Step 1. Compile training function
###################################
logger.debug('Constructing grad function')
loc_data = self.gdata
self.prop_exprs = [x[1] for x in model.properties]
self.prop_names = [x[0] for x in model.properties]
self.update_rules = [x[1] for x in model.updates]
rval = theano.clone(model.param_grads + self.update_rules + \
self.prop_exprs + [model.train_cost],
replace=zip(model.inputs, loc_data))
nparams = len(model.params)
nouts = len(self.prop_exprs)
nrules = len(self.update_rules)
gs = rval[:nparams]
rules = rval[nparams:nparams + nrules]
outs = rval[nparams + nrules:]
norm_gs = TT.sqrt(sum(TT.sum(x**2)
for x,p in zip(gs, self.model.params) if p not in self.model.exclude_params_for_norm))
if 'cutoff' in state and state['cutoff'] > 0:
c = numpy.float32(state['cutoff'])
if state['cutoff_rescale_length']:
c = c * TT.cast(loc_data[0].shape[0], 'float32')
notfinite = TT.or_(TT.isnan(norm_gs), TT.isinf(norm_gs))
_gs = []
for g,p in zip(gs,self.model.params):
if p not in self.model.exclude_params_for_norm:
tmpg = TT.switch(TT.ge(norm_gs, c), g*c/norm_gs, g)
_gs.append(
TT.switch(notfinite, numpy.float32(.1)*p, tmpg))
else:
_gs.append(g)
gs = _gs
store_gs = [(s,g) for s,g in zip(self.gs, gs)]
updates = store_gs + [(s[0], r) for s,r in zip(model.updates, rules)]
rho = self.state['adarho']
eps = self.state['adaeps']
# grad2
gnorm2_up = [rho * gn2 + (1. - rho) * (g ** 2.) for gn2,g in zip(self.gnorm2, gs)]
updates = updates + zip(self.gnorm2, gnorm2_up)
logger.debug('Compiling grad function')
st = time.time()
self.train_fn = theano.function(
[], outs, name='train_function',
updates = updates,
givens = zip(model.inputs, loc_data))
logger.debug('took {}'.format(time.time() - st))
self.lr = numpy.float32(1.)
new_params = [p - (TT.sqrt(dn2 + eps) / TT.sqrt(gn2 + eps)) * g
for p, g, gn2, dn2 in
zip(model.params, self.gs, self.gnorm2, self.dnorm2)]
updates = zip(model.params, new_params)
# d2
d2_up = [(dn2, rho * dn2 + (1. - rho) *
(((TT.sqrt(dn2 + eps) / TT.sqrt(gn2 + eps)) * g) ** 2.))
for dn2, gn2, g in zip(self.dnorm2, self.gnorm2, self.gs)]
updates = updates + d2_up
self.update_fn = theano.function(
[], [], name='update_function',
allow_input_downcast=True,
updates = updates)
self.old_cost = 1e20
self.schedules = model.get_schedules()
self.return_names = self.prop_names + \
['cost',
'error',
'time_step',
'whole_time', 'lr']
self.prev_batch = None
def __call__(self):
batch = self.data.next()
assert batch
# Perturb the data (! and the model)
if isinstance(batch, dict):
batch = self.model.perturb(**batch)
else:
batch = self.model.perturb(*batch)
# Load the dataset into GPU
# Note: not the most efficient approach in general, as it involves
# each batch is copied individually on gpu
if isinstance(batch, dict):
for gdata in self.gdata:
gdata.set_value(batch[gdata.name], borrow=True)
else:
for gdata, data in zip(self.gdata, batch):
gdata.set_value(data, borrow=True)
# Run the trianing function
g_st = time.time()
rvals = self.train_fn()
for schedule in self.schedules:
schedule(self, rvals[-1])
self.update_fn()
g_ed = time.time()
self.state['lr'] = float(self.lr)
cost = rvals[-1]
self.old_cost = cost
whole_time = time.time() - self.step_timer
if self.step % self.state['trainFreq'] == 0:
msg = '.. iter %4d cost %.3f'
vals = [self.step, cost]
for dx, prop in enumerate(self.prop_names):
msg += ' '+prop+' %.2e'
vals += [float(numpy.array(rvals[dx]))]
msg += ' step time %s whole time %s lr %.2e'
vals += [print_time(g_ed - g_st),
print_time(time.time() - self.step_timer),
float(self.lr)]
print msg % tuple(vals)
self.step += 1
ret = dict([('cost', float(cost)),
('error', float(cost)),
('lr', float(self.lr)),
('time_step', float(g_ed - g_st)),
('whole_time', float(whole_time))]+zip(self.prop_names, rvals))
return ret
| bsd-3-clause |
tomspur/shedskin | examples/c64/tape.py | 7 | 4626 | #!/usr/bin/env python2
# tape emulation
from symbols import *
import loaders
import loaders.t64
import loaders.prg
tape_loader = loaders.t64.Loader() # public. set this from the main emu GUI.
T_EOF = 0x40
WRITE_LEADER = 0x0A
WRITE_BLOCK = 0x08
READ_BLOCK = 0x0E
# SCAN_KEY = 0x0C
err = 0
def get_tape_buffer(memory):
return(memory.read_memory(0xB2, 1) | (memory.read_memory(0xB3, 1) << 8))
def get_file_name_length(memory):
return(memory.read_memory(0xB7, 1))
def get_file_name_address(memory):
return(memory.read_memory(0xBB, 1) | (memory.read_memory(0xBC) << 8))
def get_stop_location(memory):
return(memory.read_memory(0xAE, 1) | (memory.read_memory(0xAF) << 8))
#def get_status(memory):
# return(memory.read_memory(0x90, 1))
def set_status(memory, value):
return(memory.write_memory(0x90, value, 1))
#def get_VERCKK(memory):
# return(memory.read_memory(0x93, 1))
def set_VERCKK(memory, value):
return(memory.write_memory(0x93, value, 1))
def set_IRQTMP(memory, value): # if = [$315] then end IO
return(memory.write_memory(0x2A0, value, 1))
def get_start_location(memory):
return(memory.read_memory(0xC1, 1) | (memory.read_memory(0xC2) << 8))
OFFSET_TYPE = 0
OFFSET_NAME = 5
OFFSET_START_ADDR = 1
OFFSET_STOP_ADDR = 3
def setup_tape_header(type, file_name, start_addr, stop_addr, memory):
buffer_addr = get_tape_buffer(memory)
file_name = (file_name + (b" "*16))[:16]
for i in range(16):
memory.write_memory(buffer_addr + OFFSET_NAME + i, ord(file_name[i]), 1)
memory.write_memory(buffer_addr + OFFSET_TYPE, type, 1)
memory.write_memory(buffer_addr + OFFSET_START_ADDR, start_addr, 2)
memory.write_memory(buffer_addr + OFFSET_STOP_ADDR, stop_addr, 2)
def get_file_name(memory):
file_name_length = 0 # ShedSkin
file_name_addr = 0 # ShedSkin
file_name_addr = get_file_name_address(memory)
file_name_length = get_file_name_length(memory)
# TODO replace invalid (not in "0-9A-Za-z._") by "."
return b"".join([chr(memory.read_memory(file_name_addr + i, 1)) for i in range(min(16, file_name_length))])
def find_header(CPU, memory): # read a block from tape
""" trap 0xF72F, originally was [0x20, 0x41, 0xF8]. """
file_name = get_file_name(memory)
start_addr = 0 # ShedSkin
stop_addr = 0 # ShedSkin
type_ = 3 # 1 relocatable program; 2 SEQ data block; 3 non-relocatable program; 4 SEQ file header; 5 End-of-tape
file_name = "" # ShedSkin
#type_, file_name, start_addr, stop_addr
header = tape_loader.load_header(file_name)
if header is None:
print("umm... no file on tape?")
set_status(memory, 0x30) # Checksum error and pass2 error.
err = -1
else:
setup_tape_header(type_, header.file_name, header.start_addr, header.end_addr, memory)
set_status(memory, 0)
err = 0
set_VERCKK(memory, 0) # FIXME do we have to set this?
set_IRQTMP(memory, 0)
if err == 0:
CPU.CLC()
else:
CPU.SEC()
CPU.clear_Z() # FIXME
CPU.set_PC(0xF732)
def write_header(CPU, memory):
""" trap 0xF7BE, originally was [0x20, 0x6B, 0xF8]. """
CPU.set_PC(0xF7C1)
return
def transfer(CPU, memory):
""" trap 0xF8A1, originally was [0x20, 0xBD, 0xFC]. """
err = 0
st = 0
X = CPU.read_register(S_X)
if X == WRITE_LEADER:
pass
elif X == WRITE_BLOCK:
pass
elif X == READ_BLOCK:
start = get_start_location(memory)
end = get_stop_location(memory)
size = end - start # !!!!!
file_name = get_file_name(memory)
data = "" # ShedSkin
data = tape_loader.load_data(file_name)
for i in range(size):
memory.write_memory(start + i, ord(data[i]), 1)
st |= T_EOF
else:
err = -1
set_IRQTMP(memory, 0)
set_status(memory, st) # get_status(memory) | st)
if err == 0:
CPU.CLC()
else:
CPU.SEC()
CPU.set_PC(0xFC93)
def get_hooks():
return [0xF8A1, 0xF7BE, 0xF72F]
"""{0xF8A1: transfer,
0xF7BE: write_header,
0xF72F: find_header}.keys()"""
def call_hook(CPU, memory, PC):
#PC = CPU.read_register(S_PC)
if PC == 0xF8A1:
return(transfer(CPU, memory))
elif PC == 0xF7BE:
return(write_header(CPU, memory))
elif PC == 0xF72F:
return(find_header(CPU, memory))
else:
pass
def set_image_name(name, format):
global tape_loader
if format == "PRG":
tape_loader = loaders.prg.Loader().parse(open(name, "rb"), name)
else:
tape_loader = loaders.t64.Loader().parse(open(name, "rb"), name)
| gpl-3.0 |
ZxlAaron/mypros | python/pyspark/ml/util.py | 10 | 8851 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import uuid
if sys.version > '3':
basestring = str
unicode = str
from pyspark import SparkContext, since
from pyspark.ml.common import inherit_doc
def _jvm():
"""
Returns the JVM view associated with SparkContext. Must be called
after SparkContext is initialized.
"""
jvm = SparkContext._jvm
if jvm:
return jvm
else:
raise AttributeError("Cannot load _jvm from SparkContext. Is SparkContext initialized?")
class Identifiable(object):
"""
Object with a unique ID.
"""
def __init__(self):
#: A unique id for the object.
self.uid = self._randomUID()
def __repr__(self):
return self.uid
@classmethod
def _randomUID(cls):
"""
Generate a unique unicode id for the object. The default implementation
concatenates the class name, "_", and 12 random hex chars.
"""
return unicode(cls.__name__ + "_" + uuid.uuid4().hex[12:])
@inherit_doc
class MLWriter(object):
"""
Utility class that can save ML instances.
.. versionadded:: 2.0.0
"""
def save(self, path):
"""Save the ML instance to the input path."""
raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
def overwrite(self):
"""Overwrites if the output path already exists."""
raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
def context(self, sqlContext):
"""
Sets the SQL context to use for saving.
.. note:: Deprecated in 2.1 and will be removed in 2.2, use session instead.
"""
raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
def session(self, sparkSession):
"""Sets the Spark Session to use for saving."""
raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
@inherit_doc
class JavaMLWriter(MLWriter):
"""
(Private) Specialization of :py:class:`MLWriter` for :py:class:`JavaParams` types
"""
def __init__(self, instance):
super(JavaMLWriter, self).__init__()
_java_obj = instance._to_java()
self._jwrite = _java_obj.write()
def save(self, path):
"""Save the ML instance to the input path."""
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
self._jwrite.save(path)
def overwrite(self):
"""Overwrites if the output path already exists."""
self._jwrite.overwrite()
return self
def context(self, sqlContext):
"""
Sets the SQL context to use for saving.
.. note:: Deprecated in 2.1 and will be removed in 2.2, use session instead.
"""
warnings.warn("Deprecated in 2.1 and will be removed in 2.2, use session instead.")
self._jwrite.context(sqlContext._ssql_ctx)
return self
def session(self, sparkSession):
"""Sets the Spark Session to use for saving."""
self._jwrite.session(sparkSession._jsparkSession)
return self
@inherit_doc
class MLWritable(object):
"""
Mixin for ML instances that provide :py:class:`MLWriter`.
.. versionadded:: 2.0.0
"""
def write(self):
"""Returns an MLWriter instance for this ML instance."""
raise NotImplementedError("MLWritable is not yet implemented for type: %r" % type(self))
def save(self, path):
"""Save this ML instance to the given path, a shortcut of `write().save(path)`."""
self.write().save(path)
@inherit_doc
class JavaMLWritable(MLWritable):
"""
(Private) Mixin for ML instances that provide :py:class:`JavaMLWriter`.
"""
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@inherit_doc
class MLReader(object):
"""
Utility class that can load ML instances.
.. versionadded:: 2.0.0
"""
def load(self, path):
"""Load the ML instance from the input path."""
raise NotImplementedError("MLReader is not yet implemented for type: %s" % type(self))
def context(self, sqlContext):
"""
Sets the SQL context to use for loading.
.. note:: Deprecated in 2.1 and will be removed in 2.2, use session instead.
"""
raise NotImplementedError("MLReader is not yet implemented for type: %s" % type(self))
def session(self, sparkSession):
"""Sets the Spark Session to use for loading."""
raise NotImplementedError("MLReader is not yet implemented for type: %s" % type(self))
@inherit_doc
class JavaMLReader(MLReader):
"""
(Private) Specialization of :py:class:`MLReader` for :py:class:`JavaParams` types
"""
def __init__(self, clazz):
self._clazz = clazz
self._jread = self._load_java_obj(clazz).read()
def load(self, path):
"""Load the ML instance from the input path."""
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
java_obj = self._jread.load(path)
if not hasattr(self._clazz, "_from_java"):
raise NotImplementedError("This Java ML type cannot be loaded into Python currently: %r"
% self._clazz)
return self._clazz._from_java(java_obj)
def context(self, sqlContext):
"""
Sets the SQL context to use for loading.
.. note:: Deprecated in 2.1 and will be removed in 2.2, use session instead.
"""
warnings.warn("Deprecated in 2.1 and will be removed in 2.2, use session instead.")
self._jread.context(sqlContext._ssql_ctx)
return self
def session(self, sparkSession):
"""Sets the Spark Session to use for loading."""
self._jread.session(sparkSession._jsparkSession)
return self
@classmethod
def _java_loader_class(cls, clazz):
"""
Returns the full class name of the Java ML instance. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = clazz.__module__.replace("pyspark", "org.apache.spark")
if clazz.__name__ in ("Pipeline", "PipelineModel"):
# Remove the last package name "pipeline" for Pipeline and PipelineModel.
java_package = ".".join(java_package.split(".")[0:-1])
return java_package + "." + clazz.__name__
@classmethod
def _load_java_obj(cls, clazz):
"""Load the peer Java object of the ML instance."""
java_class = cls._java_loader_class(clazz)
java_obj = _jvm()
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj
@inherit_doc
class MLReadable(object):
"""
Mixin for instances that provide :py:class:`MLReader`.
.. versionadded:: 2.0.0
"""
@classmethod
def read(cls):
"""Returns an MLReader instance for this class."""
raise NotImplementedError("MLReadable.read() not implemented for type: %r" % cls)
@classmethod
def load(cls, path):
"""Reads an ML instance from the input path, a shortcut of `read().load(path)`."""
return cls.read().load(path)
@inherit_doc
class JavaMLReadable(MLReadable):
"""
(Private) Mixin for instances that provide JavaMLReader.
"""
@classmethod
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@inherit_doc
class JavaPredictionModel():
"""
(Private) Java Model for prediction tasks (regression and classification).
To be mixed in with class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numFeatures(self):
"""
Returns the number of features the model was trained on. If unknown, returns -1
"""
return self._call_java("numFeatures")
| apache-2.0 |
mydongistiny/external_chromium_org | third_party/jinja2/_compat.py | 638 | 4042 | # -*- coding: utf-8 -*-
"""
jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
get_next = lambda x: x.__next__
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
get_next = lambda x: x.next
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
try:
next = next
except NameError:
def next(it):
return it.next()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from collections import Mapping as mapping_types
except ImportError:
import UserDict
mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict)
# common types. These do exist in the special types module too which however
# does not exist in IronPython out of the box. Also that way we don't have
# to deal with implementation specific stuff here
class _C(object):
def method(self): pass
def _func():
yield None
function_type = type(_func)
generator_type = type(_func())
method_type = type(_C().method)
code_type = type(_C.method.__code__)
try:
raise TypeError()
except TypeError:
_tb = sys.exc_info()[2]
traceback_type = type(_tb)
frame_type = type(_tb.tb_frame)
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from thread import allocate_lock
except ImportError:
try:
from threading import Lock as allocate_lock
except ImportError:
from dummy_thread import allocate_lock
| bsd-3-clause |
eunchong/build | scripts/slave/recipe_modules/chromium_tests/chromium_perf.py | 1 | 5954 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from . import steps
_builders = collections.defaultdict(dict)
SPEC = {
'builders': {},
'settings': {
'build_gs_bucket': 'chrome-perf',
},
}
def _BaseSpec(bot_type, chromium_apply_config, disable_tests,
gclient_config, platform, target_bits):
return {
'bot_type': bot_type,
'chromium_apply_config' : chromium_apply_config,
'chromium_config': 'chromium_official',
'chromium_config_kwargs': {
'BUILD_CONFIG': 'Release',
'TARGET_BITS': target_bits,
},
'disable_tests': disable_tests,
'gclient_config': gclient_config,
'testing': {
'platform': 'linux' if platform == 'android' else platform,
},
}
def _BuildSpec(platform, target_bits):
spec = _BaseSpec(
bot_type='builder',
chromium_apply_config=['mb', 'chromium_perf', 'goma_hermetic_fallback'],
disable_tests=True,
gclient_config='chromium',
platform=platform,
target_bits=target_bits)
if platform == 'android':
spec['chromium_apply_config'].append('android')
spec['chromium_config_kwargs']['TARGET_ARCH'] = 'arm'
spec['gclient_apply_config'] = ['android', 'perf']
else:
spec['compile_targets'] = ['chromium_builder_perf']
spec['gclient_apply_config'] = ['chrome_internal']
if platform == 'win':
spec['tests'] = { steps.SizesStep(results_url=None, perf_id=None) }
return spec
def _TestSpec(parent_builder, perf_id, platform, target_bits, max_battery_temp,
shard_index, num_host_shards, num_device_shards,
known_devices_file):
spec = _BaseSpec(
bot_type='tester',
chromium_apply_config=[],
disable_tests=platform == 'android',
gclient_config='perf',
platform=platform,
target_bits=target_bits)
spec['parent_buildername'] = parent_builder
spec['perf-id'] = perf_id
spec['results-url'] = 'https://chromeperf.appspot.com'
spec['tests'] = [
steps.DynamicPerfTests(perf_id, platform, target_bits, max_battery_temp,
num_device_shards, num_host_shards, shard_index,
known_devices_file),
]
if platform == 'android':
spec['android_config'] = 'perf'
spec['chromium_config_kwargs']['TARGET_PLATFORM'] = 'android'
spec['gclient_apply_config'] = ['android']
else:
spec['test_generators'] = [steps.generate_script]
spec['test_spec_file'] = 'chromium.perf.json'
return spec
def _AddBuildSpec(name, platform, target_bits=64):
SPEC['builders'][name] = _BuildSpec(platform, target_bits)
assert target_bits not in _builders[platform]
_builders[platform][target_bits] = name
def _AddTestSpec(name, perf_id, platform, target_bits=64,
max_battery_temp=350, num_host_shards=1, num_device_shards=1,
known_devices_file='.known_devices'):
parent_builder = _builders[platform][target_bits]
for shard_index in xrange(num_host_shards):
builder_name = '%s (%d)' % (name, shard_index + 1)
SPEC['builders'][builder_name] = _TestSpec(
parent_builder, perf_id, platform, target_bits, max_battery_temp,
shard_index, num_host_shards, num_device_shards, known_devices_file)
_AddBuildSpec('Android Builder', 'android', target_bits=32)
_AddBuildSpec('Android arm64 Builder', 'android')
_AddBuildSpec('Win Builder', 'win', target_bits=32)
_AddBuildSpec('Win x64 Builder', 'win')
_AddBuildSpec('Mac Builder', 'mac')
_AddBuildSpec('Linux Builder', 'linux')
_AddTestSpec('Android Galaxy S5 Perf', 'android-galaxy-s5', 'android',
target_bits=32, num_device_shards=7, num_host_shards=3)
_AddTestSpec('Android Nexus5 Perf', 'android-nexus5', 'android',
target_bits=32, num_device_shards=7, num_host_shards=3)
_AddTestSpec('Android Nexus5X Perf', 'android-nexus5X', 'android',
target_bits=32, num_device_shards=7, num_host_shards=3)
_AddTestSpec('Android Nexus6 Perf', 'android-nexus6', 'android',
target_bits=32, num_device_shards=7, num_host_shards=3)
_AddTestSpec('Android Nexus7v2 Perf', 'android-nexus7v2', 'android',
target_bits=32, num_device_shards=7, num_host_shards=3)
_AddTestSpec('Android Nexus9 Perf', 'android-nexus9', 'android',
num_device_shards=7, num_host_shards=3)
_AddTestSpec('Android One Perf', 'android-one', 'android',
target_bits=32, num_device_shards=7, num_host_shards=3)
_AddTestSpec('Win Zenbook Perf', 'win-zenbook', 'win',
num_host_shards=5)
_AddTestSpec('Win 10 Perf', 'chromium-rel-win10', 'win',
num_host_shards=5)
_AddTestSpec('Win 8 Perf', 'chromium-rel-win8-dual', 'win',
num_host_shards=5)
_AddTestSpec('Win 7 Perf', 'chromium-rel-win7-dual', 'win',
target_bits=32, num_host_shards=5)
_AddTestSpec('Win 7 x64 Perf', 'chromium-rel-win7-x64-dual', 'win',
num_host_shards=5)
_AddTestSpec('Win 7 ATI GPU Perf', 'chromium-rel-win7-gpu-ati', 'win',
num_host_shards=5)
_AddTestSpec('Win 7 Intel GPU Perf', 'chromium-rel-win7-gpu-intel', 'win',
num_host_shards=5)
_AddTestSpec('Win 7 Nvidia GPU Perf', 'chromium-rel-win7-gpu-nvidia', 'win',
num_host_shards=5)
_AddTestSpec('Win 7 Low-End Perf', 'chromium-rel-win7-single', 'win',
target_bits=32, num_host_shards=2)
_AddTestSpec('Mac 10.11 Perf', 'chromium-rel-mac11', 'mac',
num_host_shards=5)
_AddTestSpec('Mac 10.10 Perf', 'chromium-rel-mac10', 'mac',
num_host_shards=5)
_AddTestSpec('Mac Retina Perf', 'chromium-rel-mac-retina', 'mac',
num_host_shards=5)
_AddTestSpec('Mac HDD Perf', 'chromium-rel-mac-hdd', 'mac',
num_host_shards=5)
_AddTestSpec('Linux Perf', 'linux-release', 'linux',
num_host_shards=5)
| bsd-3-clause |
noironetworks/group-based-policy | gbpservice/neutron/services/grouppolicy/drivers/nsp_manager.py | 1 | 7884 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import model_base
import sqlalchemy as sa
from gbpservice.neutron.db import api as db_api
class ServicePolicyPTGIpAddressMapping(model_base.BASEV2):
"""Service Policy to IP Address mapping DB."""
__tablename__ = 'gpm_service_policy_ipaddress_mappings'
service_policy_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_network_service_policies.id'),
nullable=False, primary_key=True)
policy_target_group = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_target_groups.id'),
nullable=False, primary_key=True)
ipaddress = sa.Column(sa.String(36))
class ServicePolicyPTGFipMapping(model_base.BASEV2):
"""Service Policy to FIP Address mapping DB."""
__tablename__ = 'gpm_service_policy_fip_mappings'
service_policy_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_network_service_policies.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
policy_target_group_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_target_groups.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
floatingip_id = sa.Column(sa.String(36),
sa.ForeignKey('floatingips.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
class PolicyTargetFloatingIPMapping(model_base.BASEV2):
"""Mapping of PolicyTarget to Floating IP."""
__tablename__ = 'gpm_pt_floatingip_mappings'
policy_target_id = sa.Column(
sa.String(36), sa.ForeignKey('gp_policy_targets.id',
ondelete='CASCADE'),
nullable=False, primary_key=True)
floatingip_id = sa.Column(sa.String(36),
sa.ForeignKey('floatingips.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True)
class ServicePolicyQosPolicyMapping(model_base.BASEV2):
"""Mapping of a NSP to a Neutron QoS Policy."""
__tablename__ = 'gpm_qos_policy_mappings'
service_policy_id = sa.Column(
sa.String(36),
sa.ForeignKey('gp_network_service_policies.id',
ondelete='CASCADE'),
nullable=False,
primary_key=True
)
qos_policy_id = sa.Column(
sa.String(36),
sa.ForeignKey('qos_policies.id',
ondelete='RESTRICT'),
nullable=False
)
class NetworkServicePolicyMappingMixin(object):
def _set_policy_ipaddress_mapping(self, context, service_policy_id,
policy_target_group, ipaddress):
with db_api.CONTEXT_WRITER.using(context):
session = context.session
mapping = ServicePolicyPTGIpAddressMapping(
service_policy_id=service_policy_id,
policy_target_group=policy_target_group, ipaddress=ipaddress)
session.add(mapping)
def _get_ptg_policy_ipaddress_mapping(self, context, policy_target_group):
with db_api.CONTEXT_READER.using(context):
session = context.session
return (session.query(ServicePolicyPTGIpAddressMapping).
filter_by(policy_target_group=policy_target_group).first())
def _delete_policy_ipaddress_mapping(self, context, policy_target_group):
with db_api.CONTEXT_WRITER.using(context):
session = context.session
ip_mapping = session.query(
ServicePolicyPTGIpAddressMapping).filter_by(
policy_target_group=policy_target_group).first()
if ip_mapping:
session.delete(ip_mapping)
def _set_ptg_policy_fip_mapping(self, context, service_policy_id,
policy_target_group_id, fip_id):
with db_api.CONTEXT_WRITER.using(context):
session = context.session
mapping = ServicePolicyPTGFipMapping(
service_policy_id=service_policy_id,
policy_target_group_id=policy_target_group_id,
floatingip_id=fip_id)
session.add(mapping)
def _get_ptg_policy_fip_mapping(self, context, policy_target_group_id):
with db_api.CONTEXT_READER.using(context):
session = context.session
return (session.query(ServicePolicyPTGFipMapping).
filter_by(policy_target_group_id=policy_target_group_id).
all())
def _delete_ptg_policy_fip_mapping(self, context, policy_target_group_id):
with db_api.CONTEXT_WRITER.using(context):
session = context.session
mappings = session.query(
ServicePolicyPTGFipMapping).filter_by(
policy_target_group_id=policy_target_group_id).all()
for mapping in mappings:
session.delete(mapping)
def _set_pt_floating_ips_mapping(self, context, policy_target_id, fip_ids):
with db_api.CONTEXT_WRITER.using(context):
session = context.session
for fip_id in fip_ids:
mapping = PolicyTargetFloatingIPMapping(
policy_target_id=policy_target_id, floatingip_id=fip_id)
session.add(mapping)
def _set_pts_floating_ips_mapping(self, context, pt_fip_map):
with db_api.CONTEXT_WRITER.using(context):
for policy_target_id in pt_fip_map:
self._set_pt_floating_ips_mapping(
context, policy_target_id,
pt_fip_map[policy_target_id])
def _get_pt_floating_ip_mapping(self, context, policy_target_id):
with db_api.CONTEXT_READER.using(context):
session = context.session
return (session.query(PolicyTargetFloatingIPMapping).
filter_by(policy_target_id=policy_target_id).all())
def _delete_pt_floating_ip_mapping(self, context, policy_target_id):
with db_api.CONTEXT_WRITER.using(context):
session = context.session
fip_mappings = session.query(
PolicyTargetFloatingIPMapping).filter_by(
policy_target_id=policy_target_id).all()
for fip_mapping in fip_mappings:
session.delete(fip_mapping)
def _get_nsp_qos_mapping(self, context, service_policy_id):
with db_api.CONTEXT_READER.using(context):
session = context.session
return (session.query(ServicePolicyQosPolicyMapping).
filter_by(service_policy_id=service_policy_id).first())
def _set_nsp_qos_mapping(self, context, service_policy_id, qos_policy_id):
with db_api.CONTEXT_WRITER.using(context):
session = context.session
mapping = ServicePolicyQosPolicyMapping(
service_policy_id=service_policy_id,
qos_policy_id=qos_policy_id)
session.add(mapping)
def _delete_nsp_qos_mapping(self, context, mapping):
if mapping:
with db_api.CONTEXT_WRITER.using(context):
session = context.session
session.delete(mapping)
| apache-2.0 |
endlessm/chromium-browser | third_party/llvm/lldb/examples/summaries/cocoa/CFArray.py | 13 | 7899 | """
LLDB AppKit formatters
Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
See https://llvm.org/LICENSE.txt for license information.
SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""
# example summary provider for NSArray
# the real summary is now C++ code built into LLDB
import lldb
import ctypes
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import lldb.formatters.Logger
try:
basestring
except NameError:
basestring = str
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
# much less functional than the other two cases below
# just runs code to get to the count and then returns
# no children
class NSArrayKVC_SynthProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, dict, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def num_children(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
num_children_vo = self.valobj.CreateValueFromExpression(
"count", "(int)[" + stream.GetData() + " count]")
if num_children_vo.IsValid():
return num_children_vo.GetValueAsUnsigned(0)
return "<variable is not NSArray>"
# much less functional than the other two cases below
# just runs code to get to the count and then returns
# no children
class NSArrayCF_SynthProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, dict, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.ulong):
self.sys_params.types_cache.ulong = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeUnsignedLong)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def num_children(self):
logger = lldb.formatters.Logger.Logger()
num_children_vo = self.valobj.CreateChildAtOffset(
"count", self.sys_params.cfruntime_size, self.sys_params.types_cache.ulong)
return num_children_vo.GetValueAsUnsigned(0)
class NSArrayI_SynthProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, dict, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeLong)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
# skip the isa pointer and get at the size
def num_children(self):
logger = lldb.formatters.Logger.Logger()
count = self.valobj.CreateChildAtOffset(
"count",
self.sys_params.pointer_size,
self.sys_params.types_cache.long)
return count.GetValueAsUnsigned(0)
class NSArrayM_SynthProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, dict, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not(self.sys_params.types_cache.long):
self.sys_params.types_cache.long = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeLong)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
# skip the isa pointer and get at the size
def num_children(self):
logger = lldb.formatters.Logger.Logger()
count = self.valobj.CreateChildAtOffset(
"count",
self.sys_params.pointer_size,
self.sys_params.types_cache.long)
return count.GetValueAsUnsigned(0)
# this is the actual synth provider, but is just a wrapper that checks
# whether valobj is an instance of __NSArrayI or __NSArrayM and sets up an
# appropriate backend layer to do the computations
class NSArray_SynthProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.adjust_for_architecture()
self.error = False
self.wrapper = self.make_wrapper()
self.invalid = (self.wrapper is None)
def num_children(self):
logger = lldb.formatters.Logger.Logger()
if self.wrapper is None:
return 0
return self.wrapper.num_children()
def update(self):
logger = lldb.formatters.Logger.Logger()
if self.wrapper is None:
return
self.wrapper.update()
# this code acts as our defense against NULL and uninitialized
# NSArray pointers, which makes it much longer than it would be otherwise
def make_wrapper(self):
logger = lldb.formatters.Logger.Logger()
if self.valobj.GetValueAsUnsigned() == 0:
self.error = True
return lldb.runtime.objc.objc_runtime.InvalidPointer_Description(
True)
else:
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
self.valobj, statistics)
if wrapper:
self.error = True
return wrapper
name_string = class_data.class_name()
logger >> "Class name is " + str(name_string)
if name_string == '__NSArrayI':
wrapper = NSArrayI_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit('code_notrun', self.valobj.GetName())
elif name_string == '__NSArrayM':
wrapper = NSArrayM_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit('code_notrun', self.valobj.GetName())
elif name_string == '__NSCFArray':
wrapper = NSArrayCF_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit('code_notrun', self.valobj.GetName())
else:
wrapper = NSArrayKVC_SynthProvider(
self.valobj, dict, class_data.sys_params)
statistics.metric_hit(
'unknown_class', str(
self.valobj.GetName()) + " seen as " + name_string)
return wrapper
def CFArray_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = NSArray_SynthProvider(valobj, dict)
if not provider.invalid:
if provider.error:
return provider.wrapper.message()
try:
summary = int(provider.num_children())
except:
summary = None
logger >> "provider gave me " + str(summary)
if summary is None:
summary = '<variable is not NSArray>'
elif isinstance(summary, basestring):
pass
else:
# we format it like it were a CFString to make it look the same as
# the summary from Xcode
summary = '@"' + str(summary) + \
(" objects" if summary != 1 else " object") + '"'
return summary
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F CFArray.CFArray_SummaryProvider NSArray CFArrayRef CFMutableArrayRef")
| bsd-3-clause |
lyarwood/sosreport | sos/plugins/apache.py | 6 | 2338 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Apache(Plugin):
"""Apache http daemon
"""
plugin_name = "apache"
profiles = ('webserver', 'openshift')
option_list = [
("log", "gathers all apache logs", "slow", False)
]
class RedHatApache(Apache, RedHatPlugin):
files = ('/etc/httpd/conf/httpd.conf',)
def setup(self):
super(RedHatApache, self).setup()
self.add_copy_spec([
"/etc/httpd/conf/httpd.conf",
"/etc/httpd/conf.d/*.conf",
"/etc/httpd/conf.modules.d/*.conf"
])
self.add_forbidden_path("/etc/httpd/conf/password.conf")
# collect only the current log set by default
self.add_copy_spec_limit("/var/log/httpd/access_log", 5)
self.add_copy_spec_limit("/var/log/httpd/error_log", 5)
self.add_copy_spec_limit("/var/log/httpd/ssl_access_log", 5)
self.add_copy_spec_limit("/var/log/httpd/ssl_error_log", 5)
if self.get_option("log"):
self.add_copy_spec("/var/log/httpd/*")
class DebianApache(Apache, DebianPlugin, UbuntuPlugin):
files = ('/etc/apache2/apache2.conf',)
def setup(self):
super(DebianApache, self).setup()
self.add_copy_spec([
"/etc/apache2/*",
"/etc/default/apache2"
])
# collect only the current log set by default
self.add_copy_spec_limit("/var/log/apache2/access_log", 15)
self.add_copy_spec_limit("/var/log/apache2/error_log", 15)
if self.get_option("log"):
self.add_copy_spec("/var/log/apache2/*")
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
sudovijay/youtube-dl | youtube_dl/extractor/xminus.py | 118 | 2776 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_chr,
compat_ord,
)
from ..utils import (
int_or_none,
parse_filesize,
)
class XMinusIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?x-minus\.org/track/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://x-minus.org/track/4542/%D0%BF%D0%B5%D1%81%D0%B5%D0%BD%D0%BA%D0%B0-%D1%88%D0%BE%D1%84%D0%B5%D1%80%D0%B0.html',
'md5': '401a15f2d2dcf6d592cb95528d72a2a8',
'info_dict': {
'id': '4542',
'ext': 'mp3',
'title': 'Леонид Агутин-Песенка шофера',
'duration': 156,
'tbr': 320,
'filesize_approx': 5900000,
'view_count': int,
'description': 'md5:03238c5b663810bc79cf42ef3c03e371',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
artist = self._html_search_regex(
r'minus_track\.artist="(.+?)"', webpage, 'artist')
title = artist + '-' + self._html_search_regex(
r'minus_track\.title="(.+?)"', webpage, 'title')
duration = int_or_none(self._html_search_regex(
r'minus_track\.dur_sec=\'([0-9]*?)\'',
webpage, 'duration', fatal=False))
filesize_approx = parse_filesize(self._html_search_regex(
r'<div id="finfo"[^>]*>\s*↓\s*([0-9.]+\s*[a-zA-Z][bB])',
webpage, 'approximate filesize', fatal=False))
tbr = int_or_none(self._html_search_regex(
r'<div class="quality[^"]*"></div>\s*([0-9]+)\s*kbps',
webpage, 'bitrate', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'<div class="quality.*?► ([0-9]+)',
webpage, 'view count', fatal=False))
description = self._html_search_regex(
r'(?s)<div id="song_texts">(.*?)</div><br',
webpage, 'song lyrics', fatal=False)
if description:
description = re.sub(' *\r *', '\n', description)
enc_token = self._html_search_regex(
r'minus_track\.s?tkn="(.+?)"', webpage, 'enc_token')
token = ''.join(
c if pos == 3 else compat_chr(compat_ord(c) - 1)
for pos, c in enumerate(reversed(enc_token)))
video_url = 'http://x-minus.org/dwlf/%s/%s.mp3' % (video_id, token)
return {
'id': video_id,
'title': title,
'url': video_url,
'duration': duration,
'filesize_approx': filesize_approx,
'tbr': tbr,
'view_count': view_count,
'description': description,
}
| unlicense |
gienkov/ACE3 | tools/setup.py | 21 | 3794 | #!/usr/bin/env python3
#######################
# ACE3 Setup Script #
#######################
import os
import sys
import shutil
import platform
import subprocess
import winreg
######## GLOBALS #########
MAINDIR = "z"
PROJECTDIR = "ace"
CBA = "P:\\x\\cba"
##########################
def main():
FULLDIR = "{}\\{}".format(MAINDIR,PROJECTDIR)
print("""
######################################
# ACE3 Development Environment Setup #
######################################
This script will create your ACE3 dev environment for you.
Before you run this, you should already have:
- The Arma 3 Tools installed properly via Steam
- A properly set up P-drive
If you have not done those things yet, please abort this script in the next step and do so first.
This script will create two hard links on your system, both pointing to your ACE3 project folder:
[Arma 3 installation directory]\\{} => ACE3 project folder
P:\\{} => ACE3 project folder
It will also copy the required CBA includes to {}, if you do not have the CBA source code already.""".format(FULLDIR,FULLDIR,CBA))
print("\n")
try:
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
key = winreg.OpenKey(reg,
r"SOFTWARE\Wow6432Node\bohemia interactive\arma 3")
armapath = winreg.EnumValue(key,1)[1]
except:
print("Failed to determine Arma 3 Path.")
return 1
if not os.path.exists("P:\\"):
print("No P-drive detected.")
return 2
scriptpath = os.path.realpath(__file__)
projectpath = os.path.dirname(os.path.dirname(scriptpath))
print("# Detected Paths:")
print(" Arma Path: {}".format(armapath))
print(" Project Path: {}".format(projectpath))
repl = input("\nAre these correct? (y/n): ")
if repl.lower() != "y":
return 3
print("\n# Creating links ...")
if os.path.exists("P:\\{}\\{}".format(MAINDIR,PROJECTDIR)):
print("Link on P: already exists. Please finish the setup manually.")
return 4
if os.path.exists(os.path.join(armapath, MAINDIR, PROJECTDIR)):
print("Link in Arma directory already exists. Please finish the setup manually.")
return 5
try:
if not os.path.exists("P:\\{}".format(MAINDIR)):
os.mkdir("P:\\{}".format(MAINDIR))
if not os.path.exists(os.path.join(armapath, MAINDIR)):
os.mkdir(os.path.join(armapath, MAINDIR))
subprocess.call(["cmd", "/c", "mklink", "/J", "P:\\{}\\{}".format(MAINDIR,PROJECTDIR), projectpath])
subprocess.call(["cmd", "/c", "mklink", "/J", os.path.join(armapath, MAINDIR, PROJECTDIR), projectpath])
except:
raise
print("Something went wrong during the link creation. Please finish the setup manually.")
return 6
print("# Links created successfully.")
print("\n# Copying required CBA includes ...")
if os.path.exists(CBA):
print("{} already exists, skipping.".format(CBA))
return -1
try:
shutil.copytree(os.path.join(projectpath, "tools", "cba"), CBA)
except:
raise
print("Something went wrong while copying CBA includes. Please copy tools\\cba to {} manually.".format(CBA))
return 7
print("# CBA includes copied successfully to {}.".format(CBA))
return 0
if __name__ == "__main__":
exitcode = main()
if exitcode > 0:
print("\nSomething went wrong during the setup. Make sure you run this script as administrator. If these issues persist, please follow the instructions on the ACE3 wiki to perform the setup manually.")
else:
print("\nSetup successfully completed.")
input("\nPress enter to exit ...")
sys.exit(exitcode)
| gpl-2.0 |
Tehsmash/nova | nova/vnc/xvp_proxy.py | 40 | 6230 | #!/usr/bin/env python
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Eventlet WSGI Services to proxy VNC for XCP protocol."""
import socket
import eventlet
import eventlet.green
import eventlet.greenio
import eventlet.wsgi
from oslo_config import cfg
from oslo_log import log as logging
import webob
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.i18n import _LI
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
xvp_proxy_opts = [
cfg.IntOpt('xvpvncproxy_port',
default=6081,
help='Port that the XCP VNC proxy should bind to'),
cfg.StrOpt('xvpvncproxy_host',
default='0.0.0.0',
help='Address that the XCP VNC proxy should bind to'),
]
CONF = cfg.CONF
CONF.register_opts(xvp_proxy_opts)
class XCPVNCProxy(object):
"""Class to use the xvp auth protocol to proxy instance vnc consoles."""
def one_way_proxy(self, source, dest):
"""Proxy tcp connection from source to dest."""
while True:
try:
d = source.recv(32384)
except Exception:
d = None
# If recv fails, send a write shutdown the other direction
if d is None or len(d) == 0:
dest.shutdown(socket.SHUT_WR)
break
# If send fails, terminate proxy in both directions
try:
# sendall raises an exception on write error, unlike send
dest.sendall(d)
except Exception:
source.close()
dest.close()
break
def handshake(self, req, connect_info, sockets):
"""Execute hypervisor-specific vnc auth handshaking (if needed)."""
host = connect_info['host']
port = int(connect_info['port'])
server = eventlet.connect((host, port))
# Handshake as necessary
if connect_info.get('internal_access_path'):
server.sendall("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
data = ""
while True:
b = server.recv(1)
if b:
data += b
if data.find("\r\n\r\n") != -1:
if not data.split("\r\n")[0].find("200"):
LOG.info(_LI("Error in handshake format: %s"),
data)
return
break
if not b or len(data) > 4096:
LOG.info(_LI("Error in handshake: %s"), data)
return
client = req.environ['eventlet.input'].get_socket()
client.sendall("HTTP/1.1 200 OK\r\n\r\n")
sockets['client'] = client
sockets['server'] = server
def proxy_connection(self, req, connect_info, start_response):
"""Spawn bi-directional vnc proxy."""
sockets = {}
t0 = eventlet.spawn(self.handshake, req, connect_info, sockets)
t0.wait()
if not sockets.get('client') or not sockets.get('server'):
LOG.info(_LI("Invalid request: %s"), req)
start_response('400 Invalid Request',
[('content-type', 'text/html')])
return "Invalid Request"
client = sockets['client']
server = sockets['server']
t1 = eventlet.spawn(self.one_way_proxy, client, server)
t2 = eventlet.spawn(self.one_way_proxy, server, client)
t1.wait()
t2.wait()
# Make sure our sockets are closed
server.close()
client.close()
def __call__(self, environ, start_response):
try:
req = webob.Request(environ)
LOG.info(_LI("Request: %s"), req)
token = req.params.get('token')
if not token:
LOG.info(_LI("Request made with missing token: %s"), req)
start_response('400 Invalid Request',
[('content-type', 'text/html')])
return "Invalid Request"
ctxt = context.get_admin_context()
api = consoleauth_rpcapi.ConsoleAuthAPI()
connect_info = api.check_token(ctxt, token)
if not connect_info:
LOG.info(_LI("Request made with invalid token: %s"), req)
start_response('401 Not Authorized',
[('content-type', 'text/html')])
return "Not Authorized"
return self.proxy_connection(req, connect_info, start_response)
except Exception as e:
LOG.info(_LI("Unexpected error: %s"), e)
class SafeHttpProtocol(eventlet.wsgi.HttpProtocol):
"""HttpProtocol wrapper to suppress IOErrors.
The proxy code above always shuts down client connections, so we catch
the IOError that raises when the SocketServer tries to flush the
connection.
"""
def finish(self):
try:
eventlet.green.BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except IOError:
pass
eventlet.greenio.shutdown_safe(self.connection)
self.connection.close()
def get_wsgi_server():
LOG.info(_LI("Starting nova-xvpvncproxy node (version %s)"),
version.version_string_with_package())
return wsgi.Server("XCP VNC Proxy",
XCPVNCProxy(),
protocol=SafeHttpProtocol,
host=CONF.xvpvncproxy_host,
port=CONF.xvpvncproxy_port)
| apache-2.0 |
dynaryu/inasafe | safe/gui/tools/test/test_function_options_dialog.py | 3 | 10292 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid and World Bank
- **GUI Test Cases.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from safe_extras.parameters.boolean_parameter import BooleanParameter
from safe_extras.parameters.dict_parameter import DictParameter
__author__ = 'misugijunz@gmail.com'
__date__ = '15/10/2012'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import os
import sys
PARAMETERS_DIR = os.path.abspath(
os.path.join(
os.path.dirname(__file__), '..', 'safe_extras', 'parameters'))
if PARAMETERS_DIR not in sys.path:
sys.path.append(PARAMETERS_DIR)
import unittest
import logging
from collections import OrderedDict
# noinspection PyUnresolvedReferences
import qgis # pylint: disable=unused-import
# noinspection PyPackageRequirements
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QLineEdit, QCheckBox, QPushButton, QListWidget, \
QTreeWidget
from safe.test.utilities import get_qgis_app
from safe.defaults import (
default_gender_postprocessor,
age_postprocessor,
minimum_needs_selector)
from safe.common.resource_parameter import ResourceParameter
from safe.gui.tools.function_options_dialog import (
FunctionOptionsDialog)
from safe_extras.parameters.input_list_parameter import InputListParameter
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
LOGGER = logging.getLogger('InaSAFE')
class FunctionOptionsDialogTest(unittest.TestCase):
"""Test the InaSAFE GUI for Configurable Impact Functions"""
def test_build_form(self):
"""Test that we can build a form by passing params.
"""
dialog = FunctionOptionsDialog()
# Define rice for minimum needs
rice = ResourceParameter()
rice.value = 2.8
rice.frequency = 'weekly'
rice.minimum_allowed_value = 1.4
rice.maximum_allowed_value = 5.6
rice.name = 'Rice'
rice.unit.abbreviation = 'kg'
rice.unit.name = 'kilogram'
rice.unit.plural = 'kilograms'
# Define threshold
threshold = InputListParameter()
threshold.name = 'Thresholds [m]'
threshold.is_required = True
threshold.element_type = float
threshold.expected_type = list
threshold.ordering = InputListParameter.AscendingOrder
threshold.minimum_item_count = 1
threshold.maximum_item_count = 3
threshold.value = [1.0] # default value
parameter = {
'thresholds': threshold,
'postprocessors': OrderedDict([
('Gender', default_gender_postprocessor()),
('Age', age_postprocessor()),
('MinimumNeeds', minimum_needs_selector()),
]),
'minimum needs': [rice]
}
dialog.build_form(parameter)
message = 'There should be %s tabwidget but got %s' % (
3, dialog.tabWidget.count())
self.assertEqual(dialog.tabWidget.count(), 3, message)
children = dialog.tabWidget.findChildren(QLineEdit)
message = 'There should be %s QLineEdit but got %s' % (
5, len(children))
self.assertEqual(len(children), 5, message)
def test_build_form_minimum_needs(self):
"""Test that we can build a form by passing it params.
"""
dialog = FunctionOptionsDialog()
# Define threshold
threshold = InputListParameter()
threshold.name = 'Thresholds [m]'
threshold.is_required = True
threshold.element_type = float
threshold.expected_type = list
threshold.ordering = InputListParameter.AscendingOrder
threshold.minimum_item_count = 1
threshold.maximum_item_count = 3
threshold.value = [1.0] # default value
parameters = {
'thresholds': threshold,
'postprocessors': OrderedDict([
('Gender', default_gender_postprocessor()),
('Age', age_postprocessor()),
('MinimumNeeds', minimum_needs_selector()),
])
}
dialog.build_form(parameters)
assert dialog.tabWidget.count() == 2
children = dialog.tabWidget.findChildren(QLineEdit)
assert len(children) == 4
@staticmethod
def click_list_widget_item(list_widget, content):
"""Clicking a list widget item using User Interface
:param list_widget: the list widget to clear for
:type list_widget: QListWidget
:param content: the content text of the list widget item to click
:type content: str
"""
# iterate through widget items
items = list_widget.findItems(content, Qt.MatchExactly)
for item in items:
item.setSelected(True)
def test_build_widget(self):
dialog = FunctionOptionsDialog()
# Define threshold
threshold = InputListParameter()
threshold.name = 'Thresholds [m]'
threshold.is_required = True
threshold.element_type = float
threshold.expected_type = list
threshold.ordering = InputListParameter.AscendingOrder
threshold.minimum_item_count = 1
threshold.maximum_item_count = 3
threshold.value = [2.3] # default value
value = dialog.build_widget(dialog.configLayout, 'foo', threshold)
widget = dialog.findChild(QLineEdit)
add_button = dialog.findChildren(QPushButton)[0]
remove_button = dialog.findChildren(QPushButton)[1]
list_widget = dialog.findChild(QListWidget)
# initial value must be same with default
expected_value = [2.3]
real_value = value().value
message = 'Expected %s but got %s' % (expected_value, real_value)
self.assertEqual(expected_value, real_value, message)
# change to 5.9
# select 2.3 list item
self.click_list_widget_item(list_widget, '2.3')
# remove 2.3 list item
remove_button.click()
# typing 5.9
widget.setText('5.9')
# add it to list
add_button.click()
expected_value = [5.9]
real_value = value().value
message = 'Expected %s but got %s' % (expected_value, real_value)
self.assertEqual(expected_value, real_value, message)
# add 70
widget.setText('70')
# add it to list
add_button.click()
expected_value = [5.9, 70]
real_value = value().value
message = 'Expected %s but got %s' % (expected_value, real_value)
self.assertEqual(expected_value, real_value, message)
widget.setText('bar')
self.assertEqual('bar', widget.text())
def trigger_error(error):
message = 'Expected %s type but got %s' % (
ValueError, type(error))
self.assertIsInstance(error, ValueError, message)
threshold.add_row_error_handler = trigger_error
add_button.click()
bool_param = BooleanParameter()
bool_param.name = 'boolean checkbox'
bool_param.value = True
dialog = FunctionOptionsDialog()
value = dialog.build_widget(dialog.configLayout, 'foo', bool_param)
widget = dialog.findChild(QCheckBox)
# initial value must be same with default
expected_value = True
real_value = value().value
message = 'Expected %s but got %s' % (expected_value, real_value)
self.assertEqual(expected_value, real_value, message)
widget.setChecked(False)
expected_value = False
real_value = value().value
message = 'Expected %s but got %s' % (expected_value, real_value)
self.assertEqual(expected_value, real_value, message)
dict_param = DictParameter()
dict_param.name = 'Dictionary tree'
dict_param.element_type = int
dict_param.value = {'a': 1, 'b': 2}
dialog = FunctionOptionsDialog()
value = dialog.build_widget(dialog.configLayout, 'foo', dict_param)
widget = dialog.findChild(QTreeWidget)
# initial value must be same with default
expected_value = {'a': 1, 'b': 2}
real_value = value().value
message = 'Expected %s but got %s' % (expected_value, real_value)
self.assertEqual(expected_value, real_value, message)
expected_value = {'a': 2, 'b': 1}
# get tree items
tree_items = widget.invisibleRootItem()
# set the input
tree_items.child(0).setText(1, str(2))
tree_items.child(1).setText(1, str(1))
real_value = value().value
message = 'Expected %s but got %s' % (expected_value, real_value)
self.assertEqual(expected_value, real_value, message)
def test_parse_input(self):
function_input = {
'thresholds': lambda: [1.0],
'postprocessors': {
'Gender': {'on': lambda: True},
'Age': {
'on': lambda: True,
'params': {
'youth_ratio': lambda: 0.263,
'elderly_ratio': lambda: 0.078,
'adult_ratio': lambda: 0.659}}}}
dialog = FunctionOptionsDialog()
result = dialog.parse_input(function_input)
print result
expected = OrderedDict([
('thresholds', [1.0]),
('postprocessors', OrderedDict([
('Gender', OrderedDict([('on', True)])),
('Age', OrderedDict([
('on', True),
('params', OrderedDict([
('elderly_ratio', 0.078),
('youth_ratio', 0.263),
('adult_ratio', 0.659)]))]))]))])
# noinspection PyPep8Naming
self.maxDiff = None
self.assertDictEqual(result, expected)
if __name__ == '__main__':
suite = unittest.makeSuite(FunctionOptionsDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gpl-3.0 |
hujiajie/chromium-crosswalk | third_party/cython/src/Cython/Compiler/UtilityCode.py | 98 | 6918 | from TreeFragment import parse_from_strings, StringParseContext
import Symtab
import Naming
import Code
class NonManglingModuleScope(Symtab.ModuleScope):
def __init__(self, prefix, *args, **kw):
self.prefix = prefix
self.cython_scope = None
Symtab.ModuleScope.__init__(self, *args, **kw)
def add_imported_entry(self, name, entry, pos):
entry.used = True
return super(NonManglingModuleScope, self).add_imported_entry(
name, entry, pos)
def mangle(self, prefix, name=None):
if name:
if prefix in (Naming.typeobj_prefix, Naming.func_prefix, Naming.var_prefix, Naming.pyfunc_prefix):
# Functions, classes etc. gets a manually defined prefix easily
# manually callable instead (the one passed to CythonUtilityCode)
prefix = self.prefix
return "%s%s" % (prefix, name)
else:
return Symtab.ModuleScope.mangle(self, prefix)
class CythonUtilityCodeContext(StringParseContext):
scope = None
def find_module(self, module_name, relative_to = None, pos = None,
need_pxd = 1):
if module_name != self.module_name:
if module_name not in self.modules:
raise AssertionError("Only the cython cimport is supported.")
else:
return self.modules[module_name]
if self.scope is None:
self.scope = NonManglingModuleScope(self.prefix,
module_name,
parent_module=None,
context=self)
return self.scope
class CythonUtilityCode(Code.UtilityCodeBase):
"""
Utility code written in the Cython language itself.
The @cname decorator can set the cname for a function, method of cdef class.
Functions decorated with @cname('c_func_name') get the given cname.
For cdef classes the rules are as follows:
obj struct -> <cname>_obj
obj type ptr -> <cname>_type
methods -> <class_cname>_<method_cname>
For methods the cname decorator is optional, but without the decorator the
methods will not be prototyped. See Cython.Compiler.CythonScope and
tests/run/cythonscope.pyx for examples.
"""
is_cython_utility = True
def __init__(self, impl, name="__pyxutil", prefix="", requires=None,
file=None, from_scope=None, context=None):
# 1) We need to delay the parsing/processing, so that all modules can be
# imported without import loops
# 2) The same utility code object can be used for multiple source files;
# while the generated node trees can be altered in the compilation of a
# single file.
# Hence, delay any processing until later.
if context is not None:
impl = Code.sub_tempita(impl, context, file, name)
self.impl = impl
self.name = name
self.file = file
self.prefix = prefix
self.requires = requires or []
self.from_scope = from_scope
def get_tree(self, entries_only=False, cython_scope=None):
from AnalysedTreeTransforms import AutoTestDictTransform
# The AutoTestDictTransform creates the statement "__test__ = {}",
# which when copied into the main ModuleNode overwrites
# any __test__ in user code; not desired
excludes = [AutoTestDictTransform]
import Pipeline, ParseTreeTransforms
context = CythonUtilityCodeContext(self.name)
context.prefix = self.prefix
context.cython_scope = cython_scope
#context = StringParseContext(self.name)
tree = parse_from_strings(self.name, self.impl, context=context,
allow_struct_enum_decorator=True)
pipeline = Pipeline.create_pipeline(context, 'pyx', exclude_classes=excludes)
if entries_only:
p = []
for t in pipeline:
p.append(t)
if isinstance(p, ParseTreeTransforms.AnalyseDeclarationsTransform):
break
pipeline = p
transform = ParseTreeTransforms.CnameDirectivesTransform(context)
# InterpretCompilerDirectives already does a cdef declarator check
#before = ParseTreeTransforms.DecoratorTransform
before = ParseTreeTransforms.InterpretCompilerDirectives
pipeline = Pipeline.insert_into_pipeline(pipeline, transform,
before=before)
if self.from_scope:
def scope_transform(module_node):
module_node.scope.merge_in(self.from_scope)
return module_node
transform = ParseTreeTransforms.AnalyseDeclarationsTransform
pipeline = Pipeline.insert_into_pipeline(pipeline, scope_transform,
before=transform)
(err, tree) = Pipeline.run_pipeline(pipeline, tree, printtree=False)
assert not err, err
return tree
def put_code(self, output):
pass
@classmethod
def load_as_string(cls, util_code_name, from_file=None, **kwargs):
"""
Load a utility code as a string. Returns (proto, implementation)
"""
util = cls.load(util_code_name, from_file, **kwargs)
return util.proto, util.impl # keep line numbers => no lstrip()
def declare_in_scope(self, dest_scope, used=False, cython_scope=None,
whitelist=None):
"""
Declare all entries from the utility code in dest_scope. Code will only
be included for used entries. If module_name is given, declare the
type entries with that name.
"""
tree = self.get_tree(entries_only=True, cython_scope=cython_scope)
entries = tree.scope.entries
entries.pop('__name__')
entries.pop('__file__')
entries.pop('__builtins__')
entries.pop('__doc__')
for name, entry in entries.iteritems():
entry.utility_code_definition = self
entry.used = used
original_scope = tree.scope
dest_scope.merge_in(original_scope, merge_unused=True,
whitelist=whitelist)
tree.scope = dest_scope
for dep in self.requires:
if dep.is_cython_utility:
dep.declare_in_scope(dest_scope)
return original_scope
def declare_declarations_in_scope(declaration_string, env, private_type=True,
*args, **kwargs):
"""
Declare some declarations given as Cython code in declaration_string
in scope env.
"""
CythonUtilityCode(declaration_string, *args, **kwargs).declare_in_scope(env)
| bsd-3-clause |
BitWriters/Zenith_project | zango/lib/python3.5/site-packages/django/contrib/gis/utils/layermapping.py | 34 | 27264 | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
http://geodjango.org/docs/layermapping.html
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.rel.to
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| mit |
mdjurfeldt/nest-simulator | topology/examples/conncon_sources.py | 16 | 3092 | # -*- coding: utf-8 -*-
#
# conncon_sources.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
NEST Topology Module Example
Create two 30x30 layers of iaf_psc_alpha neurons,
connect with convergent projection and rectangular mask,
visualize connection from target perspective.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
'''
import nest
import nest.topology as topo
import pylab
pylab.ion()
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
# create two test layers
a = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
b = topo.CreateLayer({'columns': 30, 'rows': 30, 'extent': [3.0, 3.0],
'elements': 'iaf_psc_alpha', 'edge_wrap': True})
topo.ConnectLayers(a, b, {'connection_type': 'convergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.5],
'upper_right': [0.2, 0.5]}},
'kernel': 0.5,
'weights': {'uniform': {'min': 0.5, 'max': 2.0}},
'delays': 1.0})
pylab.clf()
# plot sources of neurons in different grid locations
for tgt_pos in [[15, 15], [0, 0]]:
# obtain node id for center
tgt = topo.GetElement(b, tgt_pos)
# obtain list of outgoing connections for ctr
# int() required to cast numpy.int64
spos = tuple(zip(*[topo.GetPosition([int(conn[0])])[0] for conn in
nest.GetConnections(target=tgt)]))
# scatter-plot
pylab.scatter(spos[0], spos[1], 20, zorder=10)
# mark sender position with transparent red circle
ctrpos = pylab.array(topo.GetPosition(tgt)[0])
pylab.gca().add_patch(pylab.Circle(ctrpos, radius=0.1, zorder=99,
fc='r', alpha=0.4, ec='none'))
# mark mask position with open red rectangle
pylab.gca().add_patch(
pylab.Rectangle(ctrpos - (0.2, 0.5), 0.4, 1.0, zorder=1,
fc='none', ec='r', lw=3))
# mark layer edge
pylab.gca().add_patch(pylab.Rectangle((-1.5, -1.5), 3.0, 3.0, zorder=1,
fc='none', ec='k', lw=3))
# beautify
pylab.axes().set_xticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.axes().set_yticks(pylab.arange(-1.5, 1.55, 0.5))
pylab.grid(True)
pylab.axis([-2.0, 2.0, -2.0, 2.0])
pylab.axes().set_aspect('equal', 'box')
pylab.title('Connection sources')
| gpl-2.0 |
jmcanterafonseca/fiware-cygnus | test/acceptance/tools/fabric_utils.py | 1 | 7626 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of Short Term Historic (FI-WARE project).
#
# fiware-cygnus is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any
# later version.
# fiware-cygnus is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with fiware-cygnus. If not, see
# http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License please contact:
# iot_support at tid.es
#
__author__ = 'Iván Arias León (ivan.ariasleon at telefonica dot com)'
from fabric.api import env, run, get
from fabric.context_managers import hide, cd
from fabric.operations import sudo, local, put
from StringIO import StringIO
#constants
EMPTY = u''
HOST = u'host'
HOST_DEFAULT = u'localhost'
PORT = u'port'
PORT_DEFAULT = u'22'
USER = u'user'
USER_DEFAULT = u'root'
PASSWORD = u'password'
CERT_FILE = u'cert_file'
PATH = u'path'
RETRY = u'retry'
RETRY_DEFAULT = u'1'
SUDO = u'sudo'
MODE = u'mode'
COMMAND = u'command'
HIDE = u'hide'
class FabricSupport:
"""
manage fabric tools
"""
def __init__ (self, **kwargs):
"""
constructor
:param host: hostname or IP to connect
:param port: specify a default port used to connect
:param hide: show message or not (True or False)
:param user: implicit user used to connect
:param password: passwords associated to user
:param cert_file: certificate file associated to user
:param retry: number of retries in case of error
:param sudo: with superuser privileges (True | False)
"""
self.host = kwargs.get(HOST,HOST_DEFAULT)
self.port = kwargs.get(PORT,PORT_DEFAULT)
self.hide = kwargs.get(HIDE, True)
env.host_string = "%s:%s" % (self.host, self.port)
env.user = kwargs.get(USER,USER_DEFAULT)
env.password = kwargs.get(PASSWORD,EMPTY)
env.key_filename = kwargs.get(CERT_FILE,EMPTY)
env.connection_attempts = kwargs.get(RETRY,RETRY_DEFAULT)
env.cwd = kwargs.get(PATH, "")
self.sudo = kwargs.get(SUDO, False)
# if host is localhost use Process instead of Fabric. see run method
if self.host.lower() == "localhost" or self.host == "127.0.0.1":
self.LOCALHOST = True
else:
self.LOCALHOST = False
def warn_only(self, value):
"""
Boolean setting determining whether Fabric exits when detecting errors on the remote end
:param value: ( True |False )
"""
env.warn_only = value
def __sub_run (self, command, path, sudo_run):
"""
run a command independently that the output message by console is displayed or not
:param command: command o script to execute in remote
:param path: path where execute the command
:param sudo_run: with superuser privileges (True | False)
"""
with cd(path):
if self.LOCALHOST:
return local(command)
elif sudo_run:
return sudo(command)
else:
return run(command)
def run(self, command, **kwargs):
"""
run a command or script in remote host, but if host is localhost use Process instead of Fabric
:param command: command o script to execute in remote
:param path: path where execute the command
:param sudo: with superuser privileges (True | False)
:param hide: show message or not (True or False)
"""
path = kwargs.get(PATH, env.cwd)
sudo_run = kwargs.get(SUDO, self.sudo)
hide_msg = kwargs.get(HIDE, self.hide)
try:
if hide_msg:
with hide('running', 'stdout', 'stderr'):
return self.__sub_run(command, path, sudo_run)
else:
return self.__sub_run(command, path, sudo_run)
except Exception, e:
assert False, "ERROR - running the command \"%s\" remotely with Fabric \n - %s" % (command, str (e))
def runs(self, ops_list, **kwargs):
"""
run several commands in a list with its path associated
:param ops_list: list of commands with its current path associated and if it is necessary of superuser privilege (dictionary).
ex:{"command": "ls", "path": "/tmp", "sudo": False}
:param hide: show message or not (True or False)
"""
hide_msg = kwargs.get(HIDE, self.hide)
for op in ops_list:
self.run(op[COMMAND], path=op[PATH], sudo=op[SUDO], hide=hide_msg)
def current_directory (self, directory):
"""
change current directory
:param directory: directory path
"""
env.cwd = directory
def put_file_to_remote(self, file_name, target_path, **kwargs):
"""
Upload one or more files to a remote host from local host
:param file_name: path and files to be copied into server
:param target_path: path where will be put the file
:param hide_run: show message or not (True or False)
:param use_sudo: superuser privileges (True | False)
:param mode: to specify an exact mode (chmod)
"""
hide_run = kwargs.get(HIDE, self.hide)
sudo_run = kwargs.get(SUDO, self.sudo)
mode = kwargs.get(MODE, None)
if hide_run:
with hide('running', 'stdout', 'stderr'):
put(local_path=file_name, remote_path=target_path, use_sudo=sudo_run, mode=mode)
def __sub_read_file(self, file, sudo_run):
"""
read a file independently that the output message by console is displayed or not
:param file: file name to read
:param sudo_run: with superuser privileges (True | False)
"""
if self.LOCALHOST:
with open(file) as config_file:
return config_file.readlines()
else:
fd = StringIO()
get(file, use_sudo=sudo_run)
get(file, fd)
return fd.getvalue()
def read_file(self, file, **kwargs):
"""
read a file remotely
:param file: file name to read
:param path: path where the file is read
:param hide: show message or not (True or False)
:param sudo: with superuser privileges (True | False)
"""
env.cwd = kwargs.get(PATH, env.cwd)
hide_run = kwargs.get(HIDE, self.hide)
sudo_run = kwargs.get(SUDO, self.sudo)
try:
if hide_run:
with hide('running', 'stdout', 'stderr'):
return self.__sub_read_file(file, sudo_run)
else:
return self.__sub_read_file(file, sudo_run)
except Exception, e:
assert False, "ERROR -reading a File \"%s\" remotely with Fabric \n - %s" % (file, str (e))
| agpl-3.0 |
turbokongen/home-assistant | homeassistant/components/onewire/__init__.py | 6 | 1439 | """The 1-Wire component."""
import asyncio
from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN, SUPPORTED_PLATFORMS
from .onewirehub import CannotConnect, OneWireHub
async def async_setup(hass, config):
"""Set up 1-Wire integrations."""
return True
async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry):
"""Set up a 1-Wire proxy for a config entry."""
hass.data.setdefault(DOMAIN, {})
onewirehub = OneWireHub(hass)
try:
await onewirehub.initialize(config_entry)
except CannotConnect as exc:
raise ConfigEntryNotReady() from exc
hass.data[DOMAIN][config_entry.unique_id] = onewirehub
for component in SUPPORTED_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistantType, config_entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in SUPPORTED_PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.unique_id)
return unload_ok
| apache-2.0 |
garbled1/ansible | lib/ansible/modules/cloud/amazon/ec2_metadata_facts.py | 23 | 25815 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_metadata_facts
short_description: Gathers facts (instance metadata) about remote hosts within ec2
version_added: "1.0"
author:
- Silviu Dicu (@silviud)
- Vinay Dandekar (@roadmapper)
description:
- This module fetches data from the instance metadata endpoint in ec2 as per
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html.
The module must be called from within the EC2 instance itself.
notes:
- Parameters to filter on ec2_metadata_facts may be added later.
'''
EXAMPLES = '''
# Gather EC2 metadata facts
- ec2_metadata_facts:
- debug:
msg: "This instance is a t1.micro"
when: ansible_ec2_instance_type == "t1.micro"
'''
RETURN = '''
ansible_facts:
description: Dictionary of new facts representing discovered properties of the EC2 instance.
returned: changed
type: complex
contains:
ansible_ec2_ami_id:
description: The AMI ID used to launch the instance.
type: string
sample: "ami-XXXXXXXX"
ansible_ec2_ami_launch_index:
description:
- If you started more than one instance at the same time, this value indicates the order in which the instance was launched.
The value of the first instance launched is 0.
type: string
sample: "0"
ansible_ec2_ami_manifest_path:
description:
- The path to the AMI manifest file in Amazon S3.
If you used an Amazon EBS-backed AMI to launch the instance, the returned result is unknown.
type: string
sample: "(unknown)"
ansible_ec2_ancestor_ami_ids:
description:
- The AMI IDs of any instances that were rebundled to create this AMI.
This value will only exist if the AMI manifest file contained an ancestor-amis key.
type: string
sample: "(unknown)"
ansible_ec2_block_device_mapping_ami:
description: The virtual device that contains the root/boot file system.
type: string
sample: "/dev/sda1"
ansible_ec2_block_device_mapping_ebsN:
description:
- The virtual devices associated with Amazon EBS volumes, if any are present.
Amazon EBS volumes are only available in metadata if they were present at launch time or when the instance was last started.
The N indicates the index of the Amazon EBS volume (such as ebs1 or ebs2).
type: string
sample: "/dev/xvdb"
ansible_ec2_block_device_mapping_ephemeralN:
description: The virtual devices associated with ephemeral devices, if any are present. The N indicates the index of the ephemeral volume.
type: string
sample: "/dev/xvdc"
ansible_ec2_block_device_mapping_root:
description:
- The virtual devices or partitions associated with the root devices, or partitions on the virtual device,
where the root (/ or C) file system is associated with the given instance.
type: string
sample: "/dev/sda1"
ansible_ec2_block_device_mapping_swap:
description: The virtual devices associated with swap. Not always present.
type: string
sample: "/dev/sda2"
ansible_ec2_fws_instance_monitoring:
description: "Value showing whether the customer has enabled detailed one-minute monitoring in CloudWatch."
type: string
sample: "enabled"
ansible_ec2_hostname:
description:
- The private IPv4 DNS hostname of the instance.
In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
type: string
sample: "ip-10-0-0-1.ec2.internal"
ansible_ec2_iam_info:
description:
- If there is an IAM role associated with the instance, contains information about the last time the instance profile was updated,
including the instance's LastUpdated date, InstanceProfileArn, and InstanceProfileId. Otherwise, not present.
type: complex
sample: ""
ansible_ec2_iam_info_instanceprofilearn:
description: The IAM instance profile ARN.
type: string
sample: "arn:aws:iam::<account id>:instance-profile/<role name>"
ansible_ec2_iam_info_instanceprofileid:
description: IAM instance profile ID.
type: string
sample: ""
ansible_ec2_iam_info_lastupdated:
description: IAM info last updated time.
type: string
sample: "2017-05-12T02:42:27Z"
ansible_ec2_iam_instance_profile_role:
description: IAM instance role.
type: string
sample: "role_name"
ansible_ec2_iam_security_credentials_<role name>:
description:
- If there is an IAM role associated with the instance, role-name is the name of the role,
and role-name contains the temporary security credentials associated with the role. Otherwise, not present.
type: string
sample: ""
ansible_ec2_iam_security_credentials_<role name>_accesskeyid:
description: IAM role access key ID.
type: string
sample: ""
ansible_ec2_iam_security_credentials_<role name>_code:
description: IAM code.
type: string
sample: "Success"
ansible_ec2_iam_security_credentials_<role name>_expiration:
description: IAM role credentials expiration time.
type: string
sample: "2017-05-12T09:11:41Z"
ansible_ec2_iam_security_credentials_<role name>_lastupdated:
description: IAM role last updated time.
type: string
sample: "2017-05-12T02:40:44Z"
ansible_ec2_iam_security_credentials_<role name>_secretaccesskey:
description: IAM role secret access key.
type: string
sample: ""
ansible_ec2_iam_security_credentials_<role name>_token:
description: IAM role token.
type: string
sample: ""
ansible_ec2_iam_security_credentials_<role name>_type:
description: IAM role type.
type: string
sample: "AWS-HMAC"
ansible_ec2_instance_action:
description: Notifies the instance that it should reboot in preparation for bundling.
type: string
sample: "none"
ansible_ec2_instance_id:
description: The ID of this instance.
type: string
sample: "i-XXXXXXXXXXXXXXXXX"
ansible_ec2_instance_identity_document:
description: JSON containing instance attributes, such as instance-id, private IP address, etc.
type: string
sample: ""
ansible_ec2_instance_identity_document_accountid:
description: ""
type: string
sample: "012345678901"
ansible_ec2_instance_identity_document_architecture:
description: Instance system architecture.
type: string
sample: "x86_64"
ansible_ec2_instance_identity_document_availabilityzone:
description: The Availability Zone in which the instance launched.
type: string
sample: "us-east-1a"
ansible_ec2_instance_identity_document_billingproducts:
description: Billing products for this instance.
type: string
sample: ""
ansible_ec2_instance_identity_document_devpayproductcodes:
description: Product codes for the launched AMI.
type: string
sample: ""
ansible_ec2_instance_identity_document_imageid:
description: The AMI ID used to launch the instance.
type: string
sample: "ami-01234567"
ansible_ec2_instance_identity_document_instanceid:
description: The ID of this instance.
type: string
sample: "i-0123456789abcdef0"
ansible_ec2_instance_identity_document_instancetype:
description: The type of instance.
type: string
sample: "m4.large"
ansible_ec2_instance_identity_document_kernelid:
description: The ID of the kernel launched with this instance, if applicable.
type: string
sample: ""
ansible_ec2_instance_identity_document_pendingtime:
description: The instance pending time.
type: string
sample: "2017-05-11T20:51:20Z"
ansible_ec2_instance_identity_document_privateip:
description:
- The private IPv4 address of the instance.
In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
type: string
sample: "10.0.0.1"
ansible_ec2_instance_identity_document_ramdiskid:
description: The ID of the RAM disk specified at launch time, if applicable.
type: string
sample: ""
ansible_ec2_instance_identity_document_region:
description: The Region in which the instance launched.
type: string
sample: "us-east-1"
ansible_ec2_instance_identity_document_version:
description: Identity document version.
type: string
sample: "2010-08-31"
ansible_ec2_instance_identity_pkcs7:
description: Used to verify the document's authenticity and content against the signature.
type: string
sample: ""
ansible_ec2_instance_identity_rsa2048:
description: Used to verify the document's authenticity and content against the signature.
type: string
sample: ""
ansible_ec2_instance_identity_signature:
description: Data that can be used by other parties to verify its origin and authenticity.
type: string
sample: ""
ansible_ec2_instance_type:
description: The type of instance.
type: string
sample: "m4.large"
ansible_ec2_local_hostname:
description:
- The private IPv4 DNS hostname of the instance.
In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
type: string
sample: "ip-10-0-0-1.ec2.internal"
ansible_ec2_local_ipv4:
description:
- The private IPv4 address of the instance.
In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
type: string
sample: "10.0.0.1"
ansible_ec2_mac:
description:
- The instance's media access control (MAC) address.
In cases where multiple network interfaces are present, this refers to the eth0 device (the device for which the device number is 0).
type: string
sample: "00:11:22:33:44:55"
ansible_ec2_metrics_vhostmd:
description: Metrics.
type: string
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_device_number:
description:
- The unique device number associated with that interface. The device number corresponds to the device name;
for example, a device-number of 2 is for the eth2 device.
This category corresponds to the DeviceIndex and device-index fields that are used by the Amazon EC2 API and the EC2 commands for the AWS CLI.
type: string
sample: "0"
ansible_ec2_network_interfaces_macs_<mac address>_interface_id:
description: The elastic network interface ID.
type: string
sample: "eni-12345678"
ansible_ec2_network_interfaces_macs_<mac address>_ipv4_associations_<ip address>:
description: The private IPv4 addresses that are associated with each public-ip address and assigned to that interface.
type: string
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_ipv6s:
description: The IPv6 addresses associated with the interface. Returned only for instances launched into a VPC.
type: string
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_local_hostname:
description: The interface's local hostname.
type: string
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_local_ipv4s:
description: The private IPv4 addresses associated with the interface.
type: string
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_mac:
description: The instance's MAC address.
type: string
sample: "00:11:22:33:44:55"
ansible_ec2_network_interfaces_macs_<mac address>_owner_id:
description:
- The ID of the owner of the network interface.
In multiple-interface environments, an interface can be attached by a third party, such as Elastic Load Balancing.
Traffic on an interface is always billed to the interface owner.
type: string
sample: "01234567890"
ansible_ec2_network_interfaces_macs_<mac address>_public_hostname:
description:
- The interface's public DNS (IPv4). If the instance is in a VPC,
this category is only returned if the enableDnsHostnames attribute is set to true.
type: string
sample: "ec2-1-2-3-4.compute-1.amazonaws.com"
ansible_ec2_network_interfaces_macs_<mac address>_public_ipv4s:
description: The Elastic IP addresses associated with the interface. There may be multiple IPv4 addresses on an instance.
type: string
sample: "1.2.3.4"
ansible_ec2_network_interfaces_macs_<mac address>_security_group_ids:
description: The IDs of the security groups to which the network interface belongs. Returned only for instances launched into a VPC.
type: string
sample: "sg-01234567,sg-01234568"
ansible_ec2_network_interfaces_macs_<mac address>_security_groups:
description: Security groups to which the network interface belongs. Returned only for instances launched into a VPC.
type: string
sample: "secgroup1,secgroup2"
ansible_ec2_network_interfaces_macs_<mac address>_subnet_id:
description: The ID of the subnet in which the interface resides. Returned only for instances launched into a VPC.
type: string
sample: "subnet-01234567"
ansible_ec2_network_interfaces_macs_<mac address>_subnet_ipv4_cidr_block:
description: The IPv4 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC.
type: string
sample: "10.0.1.0/24"
ansible_ec2_network_interfaces_macs_<mac address>_subnet_ipv6_cidr_blocks:
description: The IPv6 CIDR block of the subnet in which the interface resides. Returned only for instances launched into a VPC.
type: string
sample: ""
ansible_ec2_network_interfaces_macs_<mac address>_vpc_id:
description: The ID of the VPC in which the interface resides. Returned only for instances launched into a VPC.
type: string
sample: "vpc-0123456"
ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv4_cidr_block:
description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
type: string
sample: "10.0.0.0/16"
ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv4_cidr_blocks:
description: The IPv4 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
type: string
sample: "10.0.0.0/16"
ansible_ec2_network_interfaces_macs_<mac address>_vpc_ipv6_cidr_blocks:
description: The IPv6 CIDR block of the VPC in which the interface resides. Returned only for instances launched into a VPC.
type: string
sample: ""
ansible_ec2_placement_availability_zone:
description: The Availability Zone in which the instance launched.
type: string
sample: "us-east-1a"
ansible_ec2_placement_region:
description: The Region in which the instance launched.
type: string
sample: "us-east-1"
ansible_ec2_product_codes:
description: Product codes associated with the instance, if any.
type: string
sample: "aw0evgkw8e5c1q413zgy5pjce"
ansible_ec2_profile:
description: EC2 instance hardware profile.
type: string
sample: "default-hvm"
ansible_ec2_public_hostname:
description:
- The instance's public DNS. If the instance is in a VPC, this category is only returned if the enableDnsHostnames attribute is set to true.
type: string
sample: "ec2-1-2-3-4.compute-1.amazonaws.com"
ansible_ec2_public_ipv4:
description: The public IPv4 address. If an Elastic IP address is associated with the instance, the value returned is the Elastic IP address.
type: string
sample: "1.2.3.4"
ansible_ec2_public_key:
description: Public key. Only available if supplied at instance launch time.
type: string
sample: ""
ansible_ec2_ramdisk_id:
description: The ID of the RAM disk specified at launch time, if applicable.
type: string
sample: ""
ansible_ec2_reservation_id:
description: The ID of the reservation.
type: string
sample: "r-0123456789abcdef0"
ansible_ec2_security_groups:
description:
- The names of the security groups applied to the instance. After launch, you can only change the security groups of instances running in a VPC.
Such changes are reflected here and in network/interfaces/macs/mac/security-groups.
type: string
sample: "securitygroup1,securitygroup2"
ansible_ec2_services_domain:
description: The domain for AWS resources for the region; for example, amazonaws.com for us-east-1.
type: string
sample: "amazonaws.com"
ansible_ec2_services_partition:
description:
- The partition that the resource is in. For standard AWS regions, the partition is aws.
If you have resources in other partitions, the partition is aws-partitionname.
For example, the partition for resources in the China (Beijing) region is aws-cn.
type: string
sample: "aws"
ansible_ec2_spot_termination_time:
description:
- The approximate time, in UTC, that the operating system for your Spot instance will receive the shutdown signal.
This item is present and contains a time value only if the Spot instance has been marked for termination by Amazon EC2.
The termination-time item is not set to a time if you terminated the Spot instance yourself.
type: string
sample: "2015-01-05T18:02:00Z"
ansible_ec2_user_data:
description: The instance user data.
type: string
sample: "#!/bin/bash"
'''
import json
import re
import socket
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import fetch_url, url_argument_spec
socket.setdefaulttimeout(5)
class Ec2Metadata(object):
ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'
ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'
ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'
ec2_dynamicdata_uri = 'http://169.254.169.254/latest/dynamic/'
def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None, ec2_dynamicdata_uri=None):
self.module = module
self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
self.uri_dynamic = ec2_dynamicdata_uri or self.ec2_dynamicdata_uri
self._data = {}
self._prefix = 'ansible_ec2_%s'
def _fetch(self, url):
(response, info) = fetch_url(self.module, url, force=True)
if response:
data = response.read()
else:
data = None
return to_text(data)
def _mangle_fields(self, fields, uri, filter_patterns=None):
filter_patterns = ['public-keys-0'] if filter_patterns is None else filter_patterns
new_fields = {}
for key, value in fields.items():
split_fields = key[len(uri):].split('/')
if len(split_fields) == 3 and split_fields[0:2] == ['iam', 'security-credentials'] and '_' not in split_fields[2]:
new_fields[self._prefix % "iam-instance-profile-role"] = split_fields[2]
if len(split_fields) > 1 and split_fields[1]:
new_key = "-".join(split_fields)
new_fields[self._prefix % new_key] = value
else:
new_key = "".join(split_fields)
new_fields[self._prefix % new_key] = value
for pattern in filter_patterns:
for key in dict(new_fields):
match = re.search(pattern, key)
if match:
new_fields.pop(key)
return new_fields
def fetch(self, uri, recurse=True):
raw_subfields = self._fetch(uri)
if not raw_subfields:
return
subfields = raw_subfields.split('\n')
for field in subfields:
if field.endswith('/') and recurse:
self.fetch(uri + field)
if uri.endswith('/'):
new_uri = uri + field
else:
new_uri = uri + '/' + field
if new_uri not in self._data and not new_uri.endswith('/'):
content = self._fetch(new_uri)
if field == 'security-groups' or field == 'security-group-ids':
sg_fields = ",".join(content.split('\n'))
self._data['%s' % (new_uri)] = sg_fields
else:
try:
dict = json.loads(content)
self._data['%s' % (new_uri)] = content
for (key, value) in dict.items():
self._data['%s_%s' % (new_uri, key.lower())] = value
except:
self._data['%s' % (new_uri)] = content # not a stringifed JSON string
def fix_invalid_varnames(self, data):
"""Change ':'' and '-' to '_' to ensure valid template variable names"""
new_data = data.copy()
for key, value in data.items():
if ':' in key or '-' in key:
newkey = re.sub(':|-', '_', key)
new_data[newkey] = value
del new_data[key]
return new_data
def run(self):
self.fetch(self.uri_meta) # populate _data with metadata
data = self._mangle_fields(self._data, self.uri_meta)
data[self._prefix % 'user-data'] = self._fetch(self.uri_user)
data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)
self._data = {} # clear out metadata in _data
self.fetch(self.uri_dynamic) # populate _data with dynamic data
dyndata = self._mangle_fields(self._data, self.uri_dynamic)
data.update(dyndata)
data = self.fix_invalid_varnames(data)
# Maintain old key for backwards compatibility
data['ansible_ec2_placement_region'] = data['ansible_ec2_instance_identity_document_region']
return data
def main():
argument_spec = url_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if module._name == 'ec2_facts':
module.deprecate("The 'ec2_facts' module is being renamed 'ec2_metadata_facts'", version=2.7)
ec2_metadata_facts = Ec2Metadata(module).run()
ec2_metadata_facts_result = dict(changed=False, ansible_facts=ec2_metadata_facts)
module.exit_json(**ec2_metadata_facts_result)
if __name__ == '__main__':
main()
| gpl-3.0 |
tmimori/erpnext | erpnext/stock/report/stock_ledger/stock_ledger.py | 32 | 3783 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns = get_columns()
sl_entries = get_stock_ledger_entries(filters)
item_details = get_item_details(filters)
opening_row = get_opening_balance(filters, columns)
data = []
if opening_row:
data.append(opening_row)
for sle in sl_entries:
item_detail = item_details[sle.item_code]
data.append([sle.date, sle.item_code, item_detail.item_name, item_detail.item_group,
item_detail.brand, item_detail.description, sle.warehouse,
item_detail.stock_uom, sle.actual_qty, sle.qty_after_transaction,
(sle.incoming_rate if sle.actual_qty > 0 else 0.0),
sle.valuation_rate, sle.stock_value, sle.voucher_type, sle.voucher_no,
sle.batch_no, sle.serial_no, sle.company])
return columns, data
def get_columns():
return [_("Date") + ":Datetime:95", _("Item") + ":Link/Item:130", _("Item Name") + "::100", _("Item Group") + ":Link/Item Group:100",
_("Brand") + ":Link/Brand:100", _("Description") + "::200", _("Warehouse") + ":Link/Warehouse:100",
_("Stock UOM") + ":Link/UOM:100", _("Qty") + ":Float:50", _("Balance Qty") + ":Float:100",
_("Incoming Rate") + ":Currency:110", _("Valuation Rate") + ":Currency:110", _("Balance Value") + ":Currency:110",
_("Voucher Type") + "::110", _("Voucher #") + ":Dynamic Link/Voucher Type:100", _("Batch") + ":Link/Batch:100",
_("Serial #") + ":Link/Serial No:100", _("Company") + ":Link/Company:100"
]
def get_stock_ledger_entries(filters):
return frappe.db.sql("""select concat_ws(" ", posting_date, posting_time) as date,
item_code, warehouse, actual_qty, qty_after_transaction, incoming_rate, valuation_rate,
stock_value, voucher_type, voucher_no, batch_no, serial_no, company
from `tabStock Ledger Entry`
where company = %(company)s and
posting_date between %(from_date)s and %(to_date)s
{sle_conditions}
order by posting_date asc, posting_time asc, name asc"""\
.format(sle_conditions=get_sle_conditions(filters)), filters, as_dict=1)
def get_item_details(filters):
item_details = {}
for item in frappe.db.sql("""select name, item_name, description, item_group,
brand, stock_uom from `tabItem` {item_conditions}"""\
.format(item_conditions=get_item_conditions(filters)), filters, as_dict=1):
item_details.setdefault(item.name, item)
return item_details
def get_item_conditions(filters):
conditions = []
if filters.get("item_code"):
conditions.append("name=%(item_code)s")
if filters.get("brand"):
conditions.append("brand=%(brand)s")
return "where {}".format(" and ".join(conditions)) if conditions else ""
def get_sle_conditions(filters):
conditions = []
item_conditions=get_item_conditions(filters)
if item_conditions:
conditions.append("""item_code in (select name from tabItem
{item_conditions})""".format(item_conditions=item_conditions))
if filters.get("warehouse"):
conditions.append("warehouse=%(warehouse)s")
if filters.get("voucher_no"):
conditions.append("voucher_no=%(voucher_no)s")
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_opening_balance(filters, columns):
if not (filters.item_code and filters.warehouse and filters.from_date):
return
from erpnext.stock.stock_ledger import get_previous_sle
last_entry = get_previous_sle({
"item_code": filters.item_code,
"warehouse": filters.warehouse,
"posting_date": filters.from_date,
"posting_time": "00:00:00"
})
row = [""]*len(columns)
row[1] = _("'Opening'")
for i, v in ((9, 'qty_after_transaction'), (11, 'valuation_rate'), (12, 'stock_value')):
row[i] = last_entry.get(v, 0)
return row | agpl-3.0 |
alfa-addon/addon | plugin.video.alfa/lib/js2py/constructors/jsuint8array.py | 9 | 2436 | # this is based on jsarray.py
from ..base import *
try:
import numpy
except:
pass
@Js
def Uint8Array():
TypedArray = (PyJsInt8Array, PyJsUint8Array, PyJsUint8ClampedArray,
PyJsInt16Array, PyJsUint16Array, PyJsInt32Array,
PyJsUint32Array, PyJsFloat32Array, PyJsFloat64Array)
a = arguments[0]
if isinstance(a, PyJsNumber): # length
length = a.to_uint32()
if length != a.value:
raise MakeError('RangeError', 'Invalid array length')
temp = Js(numpy.full(length, 0, dtype=numpy.uint8))
temp.put('length', a)
return temp
elif isinstance(a, PyJsString): # object (string)
temp = Js(numpy.array(list(a.value), dtype=numpy.uint8))
temp.put('length', Js(len(list(a.value))))
return temp
elif isinstance(a, PyJsArray) or isinstance(a, TypedArray) or isinstance(
a, PyJsArrayBuffer): # object (Array, TypedArray)
array = a.to_list()
array = [(int(item.value) if item.value != None else 0)
for item in array]
temp = Js(numpy.array(array, dtype=numpy.uint8))
temp.put('length', Js(len(array)))
return temp
elif isinstance(a, PyObjectWrapper): # object (ArrayBuffer, etc)
if len(arguments) > 1:
offset = int(arguments[1].value)
else:
offset = 0
if len(arguments) > 2:
length = int(arguments[2].value)
else:
length = int(len(a.obj) - offset)
array = numpy.frombuffer(
a.obj, dtype=numpy.uint8, count=length, offset=offset)
temp = Js(array)
temp.put('length', Js(length))
temp.buff = array
return temp
temp = Js(numpy.full(0, 0, dtype=numpy.uint8))
temp.put('length', Js(0))
return temp
Uint8Array.create = Uint8Array
Uint8Array.own['length']['value'] = Js(3)
Uint8Array.define_own_property(
'prototype', {
'value': Uint8ArrayPrototype,
'enumerable': False,
'writable': False,
'configurable': False
})
Uint8ArrayPrototype.define_own_property(
'constructor', {
'value': Uint8Array,
'enumerable': False,
'writable': True,
'configurable': True
})
Uint8ArrayPrototype.define_own_property('BYTES_PER_ELEMENT', {
'value': Js(1),
'enumerable': False,
'writable': False,
'configurable': False
})
| gpl-3.0 |
etherkit/OpenBeacon2 | client/win/venv/Lib/site-packages/pip/_vendor/html5lib/treebuilders/etree_lxml.py | 102 | 14122 | """Module for supporting the lxml.etree library. The idea here is to use as much
of the native library as possible, without using fragile hacks like custom element
names that break between releases. The downside of this is that we cannot represent
all possible trees; specifically the following are known to cause problems:
Text or comments as siblings of the root element
Docypes with no name
When any of these things occur, we emit a DataLossWarning
"""
from __future__ import absolute_import, division, unicode_literals
# pylint:disable=protected-access
import warnings
import re
import sys
from . import base
from ..constants import DataLossWarning
from .. import constants
from . import etree as etree_builders
from .. import _ihatexml
import lxml.etree as etree
fullTree = True
tag_regexp = re.compile("{([^}]*)}(.*)")
comment_type = etree.Comment("asd").tag
class DocumentType(object):
def __init__(self, name, publicId, systemId):
self.name = name
self.publicId = publicId
self.systemId = systemId
class Document(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
self._elementTree.getroot().addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
# Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent + 2)
next_element = next_element.getnext()
elif isinstance(element, str) or isinstance(element, bytes):
# Text in a fragment
assert isinstance(element, str) or sys.version_info[0] == 2
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
# Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent + 2)
elif element.tag == comment_type:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
else:
assert isinstance(element, etree._Element)
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
infosetFilter.fromXmlName(tag)))
else:
rv.append("|%s<%s>" % (' ' * indent,
infosetFilter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = infosetFilter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = infosetFilter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
class TreeBuilder(base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = None
commentClass = None
fragmentClass = Document
implementation = etree
def __init__(self, namespaceHTMLElements, fullTree=False):
builder = etree_builders.getETreeModule(etree, fullTree=fullTree)
infosetFilter = self.infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
self.namespaceHTMLElements = namespaceHTMLElements
class Attributes(dict):
def __init__(self, element, value=None):
if value is None:
value = {}
self._element = element
dict.__init__(self, value) # pylint:disable=non-parent-init-called
for key, value in self.items():
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
if isinstance(key, tuple):
name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1]))
else:
name = infosetFilter.coerceAttribute(key)
self._element._element.attrib[name] = value
class Element(builder.Element):
def __init__(self, name, namespace):
name = infosetFilter.coerceElement(name)
builder.Element.__init__(self, name, namespace=namespace)
self._attributes = Attributes(self)
def _setName(self, name):
self._name = infosetFilter.coerceElement(name)
self._element.tag = self._getETreeTag(
self._name, self._namespace)
def _getName(self):
return infosetFilter.fromXmlName(self._name)
name = property(_getName, _setName)
def _getAttributes(self):
return self._attributes
def _setAttributes(self, attributes):
self._attributes = Attributes(self, attributes)
attributes = property(_getAttributes, _setAttributes)
def insertText(self, data, insertBefore=None):
data = infosetFilter.coerceCharacters(data)
builder.Element.insertText(self, data, insertBefore)
def appendChild(self, child):
builder.Element.appendChild(self, child)
class Comment(builder.Comment):
def __init__(self, data):
data = infosetFilter.coerceComment(data)
builder.Comment.__init__(self, data)
def _setData(self, data):
data = infosetFilter.coerceComment(data)
self._element.text = data
def _getData(self):
return self._element.text
data = property(_getData, _setData)
self.elementClass = Element
self.commentClass = Comment
# self.fragmentClass = builder.DocumentFragment
base.TreeBuilder.__init__(self, namespaceHTMLElements)
def reset(self):
base.TreeBuilder.reset(self)
self.insertComment = self.insertCommentInitial
self.initial_comments = []
self.doctype = None
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
if fullTree:
return self.document._elementTree
else:
return self.document._elementTree.getroot()
def getFragment(self):
fragment = []
element = self.openElements[0]._element
if element.text:
fragment.append(element.text)
fragment.extend(list(element))
if element.tail:
fragment.append(element.tail)
return fragment
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
if not name:
warnings.warn("lxml cannot represent empty doctype", DataLossWarning)
self.doctype = None
else:
coercedName = self.infosetFilter.coerceElement(name)
if coercedName != name:
warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning)
doctype = self.doctypeClass(coercedName, publicId, systemId)
self.doctype = doctype
def insertCommentInitial(self, data, parent=None):
assert parent is None or parent is self.document
assert self.document._elementTree is None
self.initial_comments.append(data)
def insertCommentMain(self, data, parent=None):
if (parent == self.document and
self.document._elementTree.getroot()[-1].tag == comment_type):
warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning)
super(TreeBuilder, self).insertComment(data, parent)
def insertRoot(self, token):
# Because of the way libxml2 works, it doesn't seem to be possible to
# alter information like the doctype after the tree has been parsed.
# Therefore we need to use the built-in parser to create our initial
# tree, after which we can add elements like normal
docStr = ""
if self.doctype:
assert self.doctype.name
docStr += "<!DOCTYPE %s" % self.doctype.name
if (self.doctype.publicId is not None or
self.doctype.systemId is not None):
docStr += (' PUBLIC "%s" ' %
(self.infosetFilter.coercePubid(self.doctype.publicId or "")))
if self.doctype.systemId:
sysid = self.doctype.systemId
if sysid.find("'") >= 0 and sysid.find('"') >= 0:
warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning)
sysid = sysid.replace("'", 'U00027')
if sysid.find("'") >= 0:
docStr += '"%s"' % sysid
else:
docStr += "'%s'" % sysid
else:
docStr += "''"
docStr += ">"
if self.doctype.name != token["name"]:
warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning)
docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>"
root = etree.fromstring(docStr)
# Append the initial comments:
for comment_token in self.initial_comments:
comment = self.commentClass(comment_token["data"])
root.addprevious(comment._element)
# Create the root document and add the ElementTree to it
self.document = self.documentClass()
self.document._elementTree = root.getroottree()
# Give the root element the right name
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
if namespace is None:
etree_tag = name
else:
etree_tag = "{%s}%s" % (namespace, name)
root.tag = etree_tag
# Add the root element to the internal child/open data structures
root_element = self.elementClass(name, namespace)
root_element._element = root
self.document._childNodes.append(root_element)
self.openElements.append(root_element)
# Reset to the default insert comment function
self.insertComment = self.insertCommentMain
| gpl-3.0 |
thaumos/ansible | lib/ansible/plugins/callback/log_plays.py | 42 | 3291 | # (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: log_plays
type: notification
short_description: write playbook output to log file
version_added: historical
description:
- This callback writes playbook output to a file per host in the `/var/log/ansible/hosts` directory
- "TODO: make this configurable"
requirements:
- Whitelist in configuration
- A writeable /var/log/ansible/hosts directory by the user executing Ansible on the controller
'''
import os
import time
import json
from ansible.module_utils._text import to_bytes
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
# NOTE: in Ansible 1.2 or later general logging is available without
# this plugin, just set ANSIBLE_LOG_PATH as an environment variable
# or log_path in the DEFAULTS section of your ansible configuration
# file. This callback is an example of per hosts logging for those
# that want it.
class CallbackModule(CallbackBase):
"""
logs playbook results, per host, in /var/log/ansible/hosts
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'log_plays'
CALLBACK_NEEDS_WHITELIST = True
TIME_FORMAT = "%b %d %Y %H:%M:%S"
MSG_FORMAT = "%(now)s - %(category)s - %(data)s\n\n"
def __init__(self):
super(CallbackModule, self).__init__()
if not os.path.exists("/var/log/ansible/hosts"):
os.makedirs("/var/log/ansible/hosts")
def log(self, host, category, data):
if isinstance(data, MutableMapping):
if '_ansible_verbose_override' in data:
# avoid logging extraneous data
data = 'omitted'
else:
data = data.copy()
invocation = data.pop('invocation', None)
data = json.dumps(data, cls=AnsibleJSONEncoder)
if invocation is not None:
data = json.dumps(invocation) + " => %s " % data
path = os.path.join("/var/log/ansible/hosts", host)
now = time.strftime(self.TIME_FORMAT, time.localtime())
msg = to_bytes(self.MSG_FORMAT % dict(now=now, category=category, data=data))
with open(path, "ab") as fd:
fd.write(msg)
def runner_on_failed(self, host, res, ignore_errors=False):
self.log(host, 'FAILED', res)
def runner_on_ok(self, host, res):
self.log(host, 'OK', res)
def runner_on_skipped(self, host, item=None):
self.log(host, 'SKIPPED', '...')
def runner_on_unreachable(self, host, res):
self.log(host, 'UNREACHABLE', res)
def runner_on_async_failed(self, host, res, jid):
self.log(host, 'ASYNC_FAILED', res)
def playbook_on_import_for_host(self, host, imported_file):
self.log(host, 'IMPORTED', imported_file)
def playbook_on_not_import_for_host(self, host, missing_file):
self.log(host, 'NOTIMPORTED', missing_file)
| gpl-3.0 |
fyfcauc/android_external_chromium-org | chrome/common/extensions/docs/server2/patcher.py | 121 | 1026 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Patcher(object):
def GetPatchedFiles(self, version=None):
'''Returns patched files as(added_files, deleted_files, modified_files)
from the patchset specified by |version|.
'''
raise NotImplementedError(self.__class__)
def GetVersion(self):
'''Returns patch version. Returns None when nothing is patched by the
patcher.
'''
raise NotImplementedError(self.__class__)
def Apply(self, paths, file_system, version=None):
'''Apply the patch to added/modified files. Returns Future with patched
data. Throws FileNotFoundError if |paths| contains deleted files.
'''
raise NotImplementedError(self.__class__)
def GetIdentity(self):
'''Returns a string that identifies this patch. Typically it would be the
codereview server's ID for this patch.
'''
raise NotImplementedError(self.__class__)
| bsd-3-clause |
felipeZ/nonAdiabaticCoupling | nac/integrals/multipole_matrices.py | 1 | 3122 | from compute_integrals import compute_integrals_multipole
from nac.common import (
Matrix, retrieve_hdf5_data, is_data_in_hdf5,
store_arrays_in_hdf5, tuplesXYZ_to_plams)
from os.path import join
import os
import uuid
def get_multipole_matrix(config: dict, inp: dict, multipole: str) -> Matrix:
"""
Retrieve the `multipole` number `i` from the trajectory. Otherwise compute it.
"""
root = join(config['project_name'], 'multipole',
f'point_{inp.i + config.enumerate_from}')
path_hdf5 = config['path_hdf5']
path_multipole_hdf5 = join(root, multipole)
matrix_multipole = search_multipole_in_hdf5(
path_hdf5, path_multipole_hdf5, multipole)
if matrix_multipole is None:
matrix_multipole = compute_matrix_multipole(inp.mol, config, multipole)
store_arrays_in_hdf5(path_hdf5, path_multipole_hdf5, matrix_multipole)
return matrix_multipole
def search_multipole_in_hdf5(path_hdf5: str, path_multipole_hdf5: str, multipole: str):
"""
Search if the multipole is already store in the HDFt
"""
if is_data_in_hdf5(path_hdf5, path_multipole_hdf5):
print(f"retrieving multipole: {multipole} from the hdf5")
return retrieve_hdf5_data(path_hdf5, path_multipole_hdf5)
else:
print(f"computing multipole: {multipole}")
return None
def compute_matrix_multipole(
mol: list, config: dict, multipole: str) -> Matrix:
"""
Compute the some `multipole` matrix: overlap, dipole, etc. for a given geometry `mol`.
Compute the Multipole matrix in spherical coordinates.
Note: for the dipole onwards the super_matrix contains all the matrices stack all the
0-axis.
:returns: Matrix with entries <ψi | x^i y^j z^k | ψj>
"""
path_hdf5 = config['path_hdf5']
# Write molecule in temporal file
path = join(config["scratch_path"], f"molecule_{uuid.uuid4()}.xyz")
mol_plams = tuplesXYZ_to_plams(mol)
mol_plams.write(path)
# name of the basis set
basis_name = config["cp2k_general_settings"]["basis"]
if multipole == 'overlap':
matrix_multipole = compute_integrals_multipole(
path, path_hdf5, basis_name, multipole)
elif multipole == 'dipole':
# The tensor contains the overlap + {x, y, z} dipole matrices
super_matrix = compute_integrals_multipole(
path, path_hdf5, basis_name, multipole)
dim = super_matrix.shape[1]
# Reshape the super_matrix as a tensor containing overlap + {x, y, z} dipole matrices
matrix_multipole = super_matrix.reshape(4, dim, dim)
elif multipole == 'quadrupole':
# The tensor contains the overlap + {xx, xy, xz, yy, yz, zz} quadrupole matrices
super_matrix = compute_integrals_multipole(
path, path_hdf5, basis_name, multipole)
dim = super_matrix.shape[1]
# Reshape to 3d tensor containing overlap + {xx, xy, xz, yy, yz, zz} quadrupole matrices
matrix_multipole = super_matrix.reshape(7, dim, dim)
# Delete the tmp molecule file
os.remove(path)
return matrix_multipole
| mit |
m2dsupsdlclass/lectures-labs | labs/10_unsupervised_generative_models/solutions/conv_encoder.py | 1 | 1359 |
from tensorflow.keras.layers import BatchNormalization
def make_conv_encoder(img_rows, img_cols, img_chns,
latent_dim, intermediate_dim):
x = Input(shape=(img_rows, img_cols, img_chns))
x_conv = Conv2D(filters,
kernel_size=kernel_size,
padding='same', activation='relu')(x)
x_conv = BatchNormalization()(x_conv)
x_conv = Conv2D(filters,
kernel_size=kernel_size,
padding='same', activation='relu',
strides=(2, 2))(x_conv)
x_conv = BatchNormalization()(x_conv)
x_conv = Conv2D(filters,
kernel_size=kernel_size,
padding='same', activation='relu')(x_conv)
x_conv = BatchNormalization()(x_conv)
x_conv = Conv2D(filters,
kernel_size=kernel_size,
padding='same', activation='relu',
strides=(2, 2))(x_conv)
flat = Flatten()(x_conv)
hidden = Dense(intermediate_dim, activation='relu')(flat)
z_mean = Dense(latent_dim)(hidden)
z_log_var = Dense(latent_dim)(hidden)
return Model(inputs=x, outputs=[z_mean, z_log_var],
name='convolutional_encoder')
conv_encoder = make_conv_encoder(img_rows, img_cols, img_chns,
latent_dim, intermediate_dim)
| mit |
ptemplier/ansible | lib/ansible/modules/network/panos/panos_dag.py | 29 | 4264 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_dag
short_description: create a dynamic address group
description:
- Create a dynamic address group object in the firewall used for policy rules
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
default: null
password:
description:
- password for authentication
required: true
default: null
username:
description:
- username for authentication
required: false
default: "admin"
dag_name:
description:
- name of the dynamic address group
required: true
default: null
dag_filter:
description:
- dynamic filter user by the dynamic address group
required: true
default: null
commit:
description:
- commit if changed
required: false
default: true
'''
EXAMPLES = '''
- name: dag
panos_dag:
ip_address: "192.168.1.1"
password: "admin"
dag_name: "dag-1"
dag_filter: "'aws-tag.aws:cloudformation:logical-id.ServerInstance' and 'instanceState.running'"
'''
RETURN='''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
_ADDRGROUP_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\
"/vsys/entry[@name='vsys1']/address-group/entry[@name='%s']"
def addressgroup_exists(xapi, group_name):
xapi.get(_ADDRGROUP_XPATH % group_name)
e = xapi.element_root.find('.//entry')
if e is None:
return False
return True
def add_dag(xapi, dag_name, dag_filter):
if addressgroup_exists(xapi, dag_name):
return False
# setup the non encrypted part of the monitor
exml = []
exml.append('<dynamic>')
exml.append('<filter>%s</filter>' % dag_filter)
exml.append('</dynamic>')
exml = ''.join(exml)
xapi.set(xpath=_ADDRGROUP_XPATH % dag_name, element=exml)
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
dag_name=dict(required=True),
dag_filter=dict(required=True),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
dag_name = module.params['dag_name']
dag_filter = module.params['dag_filter']
commit = module.params['commit']
changed = add_dag(xapi, dag_name, dag_filter)
if changed and commit:
xapi.commit(cmd="<commit></commit>", sync=True, interval=1)
module.exit_json(changed=changed, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
mahinthjoe/bedrock | bedrock/press/forms.py | 19 | 6883 | # coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
from lib.l10n_utils.dotlang import _, _lazy
from bedrock.mozorg.forms import (DateInput, EmailInput, HoneyPotWidget,
NumberInput, TelInput, TimeInput, URLInput)
SPEAKER_REQUEST_FILE_SIZE_LIMIT = 5242880 # 5MB
class SpeakerRequestForm(forms.Form):
# event fields
sr_event_name = forms.CharField(
max_length=255,
required=True,
error_messages={
'required': _lazy(u'Please enter a name for the event.'),
},
widget=forms.TextInput(
attrs={
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
),
)
sr_event_url = forms.URLField(
max_length=2000,
required=True,
error_messages={
'required': _lazy(u'Please enter a URL.'),
'invalid': _lazy(u'Please enter a valid URL.'),
},
widget=URLInput(
attrs={
'class': 'required',
'required': 'required',
'aria-required': 'true',
'placeholder': _lazy(u'http://www.my-event.com'),
}
),
)
sr_event_date = forms.CharField(
required=True,
error_messages={
'required': _lazy(u'Please provide a date.'),
},
widget=DateInput(
attrs={
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
),
)
sr_event_time = forms.CharField(
required=True,
error_messages={
'required': _lazy(u'Please provide a time.'),
},
widget=TimeInput(
attrs={
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
),
)
sr_guest_speaker1 = forms.CharField(
max_length=200,
required=False,
)
sr_guest_speaker2 = forms.CharField(
max_length=200,
required=False,
)
# contact fields
sr_contact_name = forms.CharField(
max_length=200,
required=True,
widget=forms.TextInput(
attrs={
'required': 'required',
'class': 'required',
'aria-required': 'true',
}
),
)
sr_contact_title = forms.CharField(
max_length=200,
required=False,
)
sr_contact_company = forms.CharField(
max_length=200,
required=False,
)
sr_contact_phone = forms.CharField(
max_length=50,
required=False,
widget=TelInput(),
)
sr_contact_email = forms.EmailField(
max_length=254, # max length allowed for emails
required=True,
error_messages={
'invalid': _lazy(u'Please enter a valid email address'),
},
widget=EmailInput(
attrs={
'required': 'required',
'class': 'required',
'aria-required': 'true',
}
),
)
sr_contact_company_url = forms.URLField(
max_length=2000,
required=False,
widget=forms.TextInput(
attrs={
'placeholder': _lazy(u'http://www.my-company.com'),
}
),
)
# event details fields
sr_event_venue = forms.CharField(
max_length=400,
required=False,
)
sr_event_theme = forms.CharField(
max_length=200,
required=False,
)
sr_event_goal = forms.CharField(
max_length=300,
required=False,
)
sr_event_format = forms.CharField(
max_length=200,
required=False,
)
sr_event_audience_size = forms.IntegerField(
required=False,
widget=NumberInput(
attrs={
'min': 1,
'placeholder': 25,
}
),
)
sr_event_audience_demographics = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_event_speakers_confirmed = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_event_speakers_invited = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_event_speakers_past = forms.CharField(
max_length=1000,
required=False,
widget=forms.Textarea(),
)
sr_event_media_coverage = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_event_sponsors = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_event_confirmation_deadline = forms.DateField(
required=False,
widget=DateInput(),
)
# presentation details fields
sr_presentation_type = forms.MultipleChoiceField(
required=False,
choices=(
('keynote', _lazy(u'Keynote')),
('presentation', _lazy(u'Presentation')),
('fireside chat', _lazy(u'Fireside Chat')),
('panel', _lazy(u'Panel')),
('other', _lazy(u'Other')),
),
widget=forms.CheckboxSelectMultiple(),
)
sr_presentation_panelists = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_presentation_topic = forms.CharField(
required=False,
max_length=255,
)
sr_presentation_length = forms.IntegerField(
required=False,
widget=NumberInput(
attrs={
'min': 0.5,
'step': 0.5,
'placeholder': 2.5,
}
)
)
# additional info fields
sr_attachment = forms.FileField(
required=False,
)
# honeypot
office_fax = forms.CharField(widget=HoneyPotWidget, required=False)
def clean_sr_attachment(self):
cleaned_data = super(SpeakerRequestForm, self).clean()
attachment = cleaned_data.get("sr_attachment")
if attachment:
if attachment._size > SPEAKER_REQUEST_FILE_SIZE_LIMIT:
raise forms.ValidationError(
_("Attachment must not exceed 5MB"))
return attachment
def clean_office_fax(self):
cleaned_data = super(SpeakerRequestForm, self).clean()
honeypot = cleaned_data.pop('office_fax', None)
if honeypot:
raise forms.ValidationError(
_('Your submission could not be processed'))
| mpl-2.0 |
martinbede/second-sight | tensorflow/python/framework/tensor_shape_div_test.py | 15 | 1483 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test that old style division works for Dimension."""
from __future__ import absolute_import
# from __future__ import division # Intentionally skip this import
from __future__ import print_function
import six
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class DimensionDivTest(test_util.TensorFlowTestCase):
def testDivSucceeds(self):
"""Without from __future__ import division, __div__ should work."""
if six.PY2: # Old division exists only in Python 2
values = [tensor_shape.Dimension(x) for x in (3, 7, 11, None)]
for x in values:
for y in values:
self.assertEqual((x / y).value, (x // y).value)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
marioluan/mit-opencourseware-cs | 600/unit-2/introduction-to-simulation-and-random-walks/problem-set/ps5/ps5.py | 1 | 11443 | DEBUG = False
# 6.00 Problem Set 5
# RSS Feed Filter
import feedparser
import string
import re
import time
import copy
from project_util import translate_html
from news_gui import Popup
#-----------------------------------------------------------------------
#
# Problem Set 5
#======================
# Code for retrieving and parsing
# Google and Yahoo News feeds
# Do not change this code
#======================
def process(url):
"""
Fetches news items from the rss url and parses them.
Returns a list of NewsStory-s.
"""
feed = feedparser.parse(url)
entries = feed.entries
ret = []
for entry in entries:
guid = entry.guid
title = translate_html(entry.title)
link = entry.link
summary = translate_html(entry.summary)
try:
subject = translate_html(entry.tags[0]['term'])
except AttributeError:
subject = ""
newsStory = NewsStory(guid, title, subject, summary, link)
ret.append(newsStory)
return ret
#======================
# Part 1
# Data structure design
#======================
# Problem 1
class NewsStory:
def __init__(self, guid, title, subject, summary, link):
self.guid = guid
self.title = title
self.subject = subject
self.summary = summary
self.link = link
def get_guid(self):
return self.guid
def get_title(self):
return self.title
def get_subject(self):
return self.subject
def get_summary(self):
return self.summary
def get_link(self):
return self.link
#======================
# Part 2
# Triggers
#======================
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
raise NotImplementedError
def __str__(self):
return self.__class__.__name__
# Whole Word Triggers
# Problems 2-5
class WordUtils():
@staticmethod
def remove_punctuation(text):
if DEBUG:
print 'iniail ', text
for punctuation in string.punctuation:
text = text.replace(punctuation, ' ')
if DEBUG:
print 'final ', text
return text
@staticmethod
def normalize_text(text):
return WordUtils.remove_punctuation(text).split(' ')
class WordTrigger(Trigger):
def __init__(self, word):
self.word = word;
def is_word_in(self, text):
text = WordUtils.normalize_text(text)
for text_word in text:
if self.word.lower() == text_word.lower():
return True
return False
class TitleTrigger(WordTrigger):
def evaluate(self, story):
title = story.get_title()
return self.is_word_in(title)
class SubjectTrigger(WordTrigger):
def evaluate(self, story):
subject = story.get_subject()
return self.is_word_in(subject)
class SummaryTrigger(WordTrigger):
def evaluate(self, story):
summary = story.get_summary()
return self.is_word_in(summary)
# Composite Triggers
# Problems 6-8
class NotTrigger(Trigger):
def __init__(self, trigger):
self.trigger = trigger
def evaluate(self, news_item):
return not self.trigger.evaluate(news_item)
class AndTrigger(Trigger):
def __init__(self, trigger_a, trigger_b):
self.trigger_a = trigger_a
self.trigger_b = trigger_b
def evaluate(self, news_item):
return self.trigger_a.evaluate(news_item) and self.trigger_b.evaluate(news_item)
class OrTrigger(Trigger):
def __init__(self, trigger_a, trigger_b):
self.trigger_a = trigger_a
self.trigger_b = trigger_b
def evaluate(self, news_item):
return self.trigger_a.evaluate(news_item) or self.trigger_b.evaluate(news_item)
# Phrase Trigger
# Question 9
class PhraseTrigger(Trigger):
def __init__(self, phrase):
self.phrase = phrase
def is_phrase_in(self, text):
return self.phrase in text
def evaluate(self, story):
title_trigger = TitleTrigger(self.phrase)
subject_trigger = SubjectTrigger(self.phrase)
summary_trigger = SummaryTrigger(self.phrase)
return self.is_phrase_in(story.get_title()) or self.is_phrase_in(story.get_summary()) or self.is_phrase_in(story.get_subject())
#======================
# Part 3
# Filtering
#======================
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory-s.
Returns only those stories for whom
a trigger in triggerlist fires.
"""
filtered_stories = []
for story in stories:
for trigger in triggerlist:
if trigger.evaluate(story):
filtered_stories.append(story)
break
return filtered_stories
#======================
# Part 4
# User-Specified Triggers
#======================
TRIGGERS = {
'TITLE': TitleTrigger,
'SUBJECT': SubjectTrigger,
'PHRASE': PhraseTrigger,
'AND': AndTrigger,
'ADD': 'ADD'
}
def getValidLinesFromFile(filename):
triggerfile = open(filename, "r")
all = [ line.rstrip() for line in triggerfile.readlines() ]
lines = []
for line in all:
if len(line) == 0 or line[0] == '#':
continue
lines.append(line)
return lines
def get_word_from_line(line):
return line.split(' ')[-1]
def get_phrase_from_line(line):
PHRASE = "PHRASE"
start_index = line.index(PHRASE)+len(PHRASE)+1
return line[start_index:]
def get_trigger_class(line):
for name in TRIGGERS:
if name in line:
return TRIGGERS[name]
def get_trigger_name(line):
return line.split(' ')[0]
def is_word_trigger(trigger_class):
response = False
if trigger_class and type(trigger_class) != type(""):
response = issubclass(trigger_class, WordTrigger)
if DEBUG:
print "trigger_class %s is_word_trigger: " % trigger_class, response
return response
def is_phrase_trigger(trigger_class):
response = False
if trigger_class and type(trigger_class) != type(""):
response = issubclass(trigger_class, PhraseTrigger)
if DEBUG:
print "trigger_class %s is_phrase_trigger: " % trigger_class, response
return response
def is_and_or_trigger(trigger_class):
trigger_class_copy = copy.deepcopy(trigger_class)
response = False
if type(trigger_class_copy) == type({}) and type(trigger_class_copy["instance"]) == type({}):
trigger_class_copy = trigger_class_copy["instance"]["trigger"]
response = trigger_class_copy == AndTrigger or trigger_class_copy == OrTrigger
if DEBUG:
print "trigger_class %s is_and_or_trigger: " % trigger_class_copy, response
return response
def is_add_trigger(trigger):
return trigger and (trigger == TRIGGERS["ADD"] or trigger["name"] == TRIGGERS["ADD"])
def build_word_trigger(trigger_class, line):
word = get_word_from_line(line)
trigger = trigger_class(word)
return trigger
def build_phrase_trigger(trigger_class, line):
phrase = get_phrase_from_line(line)
trigger = trigger_class(phrase)
return trigger
def build_logical_trigger(trigger_class, line):
trigger_name = line.split(' ')[0]
triggers_name = line.split(' ')[2:]
trigger = {
"trigger": trigger_class,
"triggers_name": triggers_name
}
return trigger
def build_add_trigger(trigger_class, line):
return line.split(' ')[1:]
def build_trigger(line):
trigger_class = get_trigger_class(line)
trigger = {
"name": get_trigger_name(line),
"line": line,
"instance": None
}
if is_word_trigger(trigger_class):
trigger["instance"] = build_word_trigger(trigger_class, line)
elif is_phrase_trigger(trigger_class):
trigger["instance"] = build_phrase_trigger(trigger_class, line)
elif is_and_or_trigger(trigger_class):
trigger["instance"] = build_logical_trigger(trigger_class, line)
elif is_add_trigger(trigger_class):
trigger["instance"] = build_add_trigger(trigger_class, line)
else:
if DEBUG:
print "No trigger found for line: ", line
return trigger
def build_triggers(lines):
triggers = []
for line in lines:
trigger = build_trigger(line)
if is_and_or_trigger(trigger):
trigger = build_composite_trigger(triggers, trigger)
if trigger["instance"]:
triggers.append(trigger)
return triggers
def build_composite_trigger(triggers, logical_trigger):
composite_trigger = {
"name": logical_trigger["name"],
"instance": None,
"line": logical_trigger["line"]
}
trigger_a = None
trigger_b = None
for trigger in triggers:
if trigger["name"] == logical_trigger["instance"]["triggers_name"][0]:
trigger_a = trigger["instance"]
elif trigger["name"] == logical_trigger["instance"]["triggers_name"][1]:
trigger_b = trigger["instance"]
break
composite_trigger["instance"] = logical_trigger["instance"]["trigger"](trigger_a, trigger_b)
return composite_trigger
def get_trigger_by_name(triggers, name):
for trigger in triggers:
if trigger["name"] == name:
return trigger["instance"]
def build_trigger_set(triggers):
trigger_set = []
for trigger in triggers:
if DEBUG:
print trigger
if is_add_trigger(trigger):
for add_trigger in trigger["instance"]:
add_trigger = get_trigger_by_name(triggers, add_trigger)
trigger_set.append(add_trigger)
return trigger_set
def readTriggerConfig(filename):
"""
Returns a list of trigger objects
that correspond to the rules set
in the file filename
"""
lines = getValidLinesFromFile(filename)
triggers = build_triggers(lines)
trigger_set = build_trigger_set(triggers)
return trigger_set
import thread
def main_thread(p):
# A sample trigger list - you'll replace
# this with something more configurable in Problem 11
# t1 = SubjectTrigger("Obama")
# t2 = SummaryTrigger("MIT")
# t3 = PhraseTrigger("Supreme Court")
# t4 = OrTrigger(t2, t3)
# triggerlist = [t1, t4]
triggerlist = readTriggerConfig("triggers.txt")
guidShown = []
run = 3
while run:
print "Polling..."
# Get stories from Google's Top Stories RSS news feed
stories = process("http://news.google.com/?output=rss")
# Get stories from Yahoo's Top Stories RSS news feed
stories.extend(process("http://rss.news.yahoo.com/rss/topstories"))
# Only select stories we're interested in
stories = filter_stories(stories, triggerlist)
# Don't print a story if we have already printed it before
newstories = []
for story in stories:
if story.get_guid() not in guidShown:
newstories.append(story)
for story in newstories:
guidShown.append(story.get_guid())
p.newWindow(story)
print "Sleeping..."
run -= 1
time.sleep(SLEEPTIME)
SLEEPTIME = 10 #seconds -- how often we poll
if __name__ == '__main__':
p = Popup()
thread.start_new_thread(main_thread, (p,))
p.start()
| mit |
met-office-lab/cloud-processing-config | analysis_config.py | 1 | 3204 | import iris
import functools
import numpy as np
import os
max_val = 255 # maximum data value (i.e. 8 bit uint)
thredds_server = "http://thredds.3dvis.informaticslab.co.uk/thredds/dodsC/testLab/"
img_data_server = "http://data.3dvis.informaticslab.co.uk/molab-3dwx-ds/media/"
vid_data_server = img_data_server
roothal = "http://data.3dvis.informaticslab.co.uk/molab-3dwx-ds/"
data_dir = os.getenv("DATA_DIR")
if data_dir == None:
topog_file = None
else:
topog_file = os.path.join(os.getenv("DATA_DIR"), "ukv_orog.pp")
sea_level = 3 # minimum altitude number
def saturateClouds(c, min_val=None, max_val=None):
c.data -= min_val
c.data /= (max_val-min_val)
c.data[c.data<=0.0] = 0.0
c.data[c.data>=1.0] = 1.0
return c
def binarySaturateClouds(c, cutoff):
c.data[c.data<cutoff] = 0.0
c.data[c.data>cutoff] = 1.0
return c
def degrib_cb(c, f, n):
levc = c.coord("level")
levcdim, = c.coord_dims(levc)
newc = iris.coords.DimCoord(levc.points, "height", long_name="level_height", units="m")
c.remove_coord(levc)
c.add_dim_coord(newc, levcdim)
c.coord("time").guess_bounds()
frtc = iris.coords.AuxCoord.from_coord(c.coord("time")[0])
frtc.rename("forecast_reference_time")
c.add_aux_coord(frtc)
return c
def latlon2Dto1D_cb(c, f, n):
latc = c.coord("latitude")
newlatc = iris.coords.DimCoord(np.mean(latc.points, 1),
standard_name="latitude", units="degrees")
lonc = c.coord("longitude")
newlonc = iris.coords.DimCoord(np.mean(lonc.points, 0),
standard_name="longitude", units="degrees")
c.remove_coord("latitude")
c.remove_coord("longitude")
c.remove_coord("x-coordinate in Cartesian system")
c.remove_coord("y-coordinate in Cartesian system")
c.add_dim_coord(newlatc, c.ndim-2)
c.add_dim_coord(newlonc, c.ndim-1)
return c
def ukv_cb(c, f, n):
c = latlon2Dto1D_cb(c, f, n)
c = degrib_cb(c, f, n)
return c
# profiles are namespaces which contain setting for different analysis types
profiles = {
# "UKV2EGRR_LR": {"data_constraint": iris.Constraint(coord_values={"height": lambda v: v.point < 5e3}),
# "extent": [-13.62, 6.406, 47.924, 60.866],
# "regrid_shape": [200, 200, 20],
# "proc_fn": None,
# "load_call_back": ukv_cb,
# "video_ending": "ogv",
# "ffmpeg_args_template": ["ffmpeg", "-r", "20", "-i", "FILES_IN",
# "-r", "20", "-c:v", "libtheora", "FILE_OUT"]
# },
"UKV2EGRR": {"data_constraint": iris.Constraint(coord_values={"atmosphere_hybrid_height_coordinate": lambda v: v.point < 5e3}),
"extent": [-10.2, 2.4, 48.7, 59.2],
"regrid_shape": [400, 400, 35],
"proc_fn": None,
"load_call_back": None,
"video_ending": "ogv",
"ffmpeg_args_template": ["avconv", "-y", "-r", "1", "-i", "FILES_IN",
"-r", "1", "-vcodec", "libtheora", "-qscale:v", "2", "FILE_OUT"]
}
} | lgpl-3.0 |
efiring/numpy-work | numpy/testing/utils.py | 1 | 25446 | """
Utility function to facilitate testing.
"""
import os
import sys
import re
import operator
from nosetester import import_nose
__all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_']
verbose = 0
def assert_(val, msg='') :
"""Assert that works in release mode."""
if not val :
raise AssertionError(msg)
def rand(*args):
"""Returns an array of random numbers with the given shape.
This only uses the standard library, so it is useful for testing purposes.
"""
import random
from numpy.core import zeros, float64
results = zeros(args, float64)
f = results.flat
for i in range(len(f)):
f[i] = random.random()
return results
if sys.platform[:5]=='linux':
def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),
_load_time=[]):
""" Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. See man 5 proc. """
import time
if not _load_time:
_load_time.append(time.time())
try:
f=open(_proc_pid_stat,'r')
l = f.readline().split(' ')
f.close()
return int(l[13])
except:
return int(100*(time.time()-_load_time[0]))
def memusage(_proc_pid_stat = '/proc/%s/stat'%(os.getpid())):
""" Return virtual memory size in bytes of the running python.
"""
try:
f=open(_proc_pid_stat,'r')
l = f.readline().split(' ')
f.close()
return int(l[22])
except:
return
else:
# os.getpid is not in all platforms available.
# Using time is safe but inaccurate, especially when process
# was suspended or sleeping.
def jiffies(_load_time=[]):
""" Return number of jiffies (1/100ths of a second) that this
process has been scheduled in user mode. [Emulation with time.time]. """
import time
if not _load_time:
_load_time.append(time.time())
return int(100*(time.time()-_load_time[0]))
def memusage():
""" Return memory usage of running python. [Not implemented]"""
raise NotImplementedError
if os.name=='nt' and sys.version[:3] > '2.3':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance = None,
inum=-1, format = None, machine=None):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
# My older explanation for this was that the "AddCounter" process forced
# the CPU to 100%, but the above makes more sense :)
import win32pdh
if format is None: format = win32pdh.PDH_FMT_LONG
path = win32pdh.MakeCounterPath( (machine,object,instance, None, inum,counter) )
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
def memusage(processName="python", instance=0):
# from win32pdhutil, part of the win32all package
import win32pdh
return GetPerformanceAttributes("Process", "Virtual Bytes",
processName, instance,
win32pdh.PDH_FMT_LONG, None)
def build_err_msg(arrays, err_msg, header='Items are not equal:',
verbose=True,
names=('ACTUAL', 'DESIRED')):
msg = ['\n' + header]
if err_msg:
if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
msg = [msg[0] + ' ' + err_msg]
else:
msg.append(err_msg)
if verbose:
for i, a in enumerate(arrays):
try:
r = repr(a)
except:
r = '[repr failed]'
if r.count('\n') > 3:
r = '\n'.join(r.splitlines()[:3])
r += '...'
msg.append(' %s: %s' % (names[i], r))
return '\n'.join(msg)
def assert_equal(actual,desired,err_msg='',verbose=True):
"""
Raise an assertion if two objects are not equal.
Given two objects (lists, tuples, dictionaries or numpy arrays), check
that all elements of these objects are equal. An exception is raised at
the first conflicting values.
Parameters
----------
actual : list, tuple, dict or ndarray
The object to check.
desired : list, tuple, dict or ndarray
The expected object.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal.
Examples
--------
>>> np.testing.assert_equal([4,5], [4,6])
...
<type 'exceptions.AssertionError'>:
Items are not equal:
item=1
ACTUAL: 5
DESIRED: 6
"""
if isinstance(desired, dict):
if not isinstance(actual, dict) :
raise AssertionError(repr(type(actual)))
assert_equal(len(actual),len(desired),err_msg,verbose)
for k,i in desired.items():
if k not in actual :
raise AssertionError(repr(k))
assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k,err_msg), verbose)
return
if isinstance(desired, (list,tuple)) and isinstance(actual, (list,tuple)):
assert_equal(len(actual),len(desired),err_msg,verbose)
for k in range(len(desired)):
assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k,err_msg), verbose)
return
from numpy.core import ndarray
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_equal(actual, desired, err_msg, verbose)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
if desired != actual :
raise AssertionError(msg)
def print_assert_equal(test_string,actual,desired):
import pprint
try:
assert(actual == desired)
except AssertionError:
import cStringIO
msg = cStringIO.StringIO()
msg.write(test_string)
msg.write(' failed\nACTUAL: \n')
pprint.pprint(actual,msg)
msg.write('DESIRED: \n')
pprint.pprint(desired,msg)
raise AssertionError(msg.getvalue())
def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
"""
Raise an assertion if two items are not equal up to desired precision.
The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal)
Given two objects (numbers or ndarrays), check that all elements of these
objects are almost equal. An exception is raised at conflicting values.
For ndarrays this delegates to assert_array_almost_equal
Parameters
----------
actual : number or ndarray
The object to check.
desired : number or ndarray
The expected object.
decimal : integer (decimal=7)
desired precision
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_array_almost_equal: compares array_like objects
assert_equal: tests objects for equality
Examples
--------
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
...
<type 'exceptions.AssertionError'>:
Items are not equal:
ACTUAL: 2.3333333333333002
DESIRED: 2.3333333399999998
>>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
\t\t\tnp.array([1.0,2.33333334]), decimal=9)
...
<type 'exceptions.AssertionError'>:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333333])
y: array([ 1. , 2.33333334])
"""
from numpy.core import ndarray
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_almost_equal(actual, desired, decimal, err_msg)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
if round(abs(desired - actual),decimal) != 0 :
raise AssertionError(msg)
def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
"""
Raise an assertion if two items are not equal up to significant digits.
Given two numbers, check that they are approximately equal.
Approximately equal is defined as the number of significant digits
that agree.
Parameters
----------
actual : number
The object to check.
desired : number
The expected object.
significant : integer (significant=7)
desired precision
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_almost_equal: compares objects by decimals
assert_array_almost_equal: compares array_like objects by decimals
assert_equal: tests objects for equality
Examples
--------
>>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
significant=8)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
significant=8)
...
<type 'exceptions.AssertionError'>:
Items are not equal to 8 significant digits:
ACTUAL: 1.234567e-021
DESIRED: 1.2345672000000001e-021
the evaluated condition that raises the exception is
>>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
True
"""
import math
actual, desired = map(float, (actual, desired))
if desired==actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
try:
sc_desired = desired/scale
except ZeroDivisionError:
sc_desired = 0.0
try:
sc_actual = actual/scale
except ZeroDivisionError:
sc_actual = 0.0
msg = build_err_msg([actual, desired], err_msg,
header='Items are not equal to %d significant digits:' %
significant,
verbose=verbose)
if math.fabs(sc_desired - sc_actual) >= pow(10.,-(significant-1)) :
raise AssertionError(msg)
def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
header=''):
from numpy.core import array, isnan, any
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPfdgFDG'
try:
cond = (x.shape==() or y.shape==()) or x.shape == y.shape
if not cond:
msg = build_err_msg([x, y],
err_msg
+ '\n(shapes %s, %s mismatch)' % (x.shape,
y.shape),
verbose=verbose, header=header,
names=('x', 'y'))
if not cond :
raise AssertionError(msg)
if (isnumber(x) and isnumber(y)) and (any(isnan(x)) or any(isnan(y))):
# Handling nan: we first check that x and y have the nan at the
# same locations, and then we mask the nan and do the comparison as
# usual.
xnanid = isnan(x)
ynanid = isnan(y)
try:
assert_array_equal(xnanid, ynanid)
except AssertionError:
msg = build_err_msg([x, y],
err_msg
+ '\n(x and y nan location mismatch %s, ' \
'%s mismatch)' % (xnanid, ynanid),
verbose=verbose, header=header,
names=('x', 'y'))
val = comparison(x[~xnanid], y[~ynanid])
else:
val = comparison(x,y)
if isinstance(val, bool):
cond = val
reduced = [0]
else:
reduced = val.ravel()
cond = reduced.all()
reduced = reduced.tolist()
if not cond:
match = 100-100.0*reduced.count(1)/len(reduced)
msg = build_err_msg([x, y],
err_msg
+ '\n(mismatch %s%%)' % (match,),
verbose=verbose, header=header,
names=('x', 'y'))
if not cond :
raise AssertionError(msg)
except ValueError:
msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
names=('x', 'y'))
raise ValueError(msg)
def assert_array_equal(x, y, err_msg='', verbose=True):
"""
Raise an assertion if two array_like objects are not equal.
Given two array_like objects, check that the shape is equal and all
elements of these objects are equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
The usual caution for verifying equality with floating point numbers is
advised.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_array_almost_equal: test objects for equality up to precision
assert_equal: tests objects for equality
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
\t\t\t[np.exp(0),2.33333, np.nan])
assert fails with numerical inprecision with floats
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
\t\t\t[1, np.sqrt(np.pi)**2, np.nan])
...
<type 'exceptions.ValueError'>:
AssertionError:
Arrays are not equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 3.14159265, NaN])
y: array([ 1. , 3.14159265, NaN])
use assert_array_almost_equal for these cases instead
>>> np.testing.assert_array_almost_equal([1.0,np.pi,np.nan],
\t\t\t[1, np.sqrt(np.pi)**2, np.nan], decimal=15)
"""
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
verbose=verbose, header='Arrays are not equal')
def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
"""
Raise an assertion if two objects are not equal up to desired precision.
The test verifies identical shapes and verifies values with
abs(desired-actual) < 0.5 * 10**(-decimal)
Given two array_like objects, check that the shape is equal and all
elements of these objects are almost equal. An exception is raised at
shape mismatch or conflicting values. In contrast to the standard usage
in numpy, NaNs are compared like numbers, no assertion is raised if
both objects have NaNs in the same positions.
Parameters
----------
x : array_like
The actual object to check.
y : array_like
The desired, expected object.
decimal : integer (decimal=6)
desired precision
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired are not equal up to specified precision.
See Also
--------
assert_almost_equal: simple version for comparing numbers
assert_array_equal: tests objects for equality
Examples
--------
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
[1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
\t\t\t[1.0,2.33339,np.nan], decimal=5)
...
<type 'exceptions.AssertionError'>:
AssertionError:
Arrays are not almost equal
<BLANKLINE>
(mismatch 50.0%)
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33339, NaN])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
\t\t\t[1.0,2.33333, 5], decimal=5)
<type 'exceptions.ValueError'>:
ValueError:
Arrays are not almost equal
x: array([ 1. , 2.33333, NaN])
y: array([ 1. , 2.33333, 5. ])
"""
from numpy.core import around, number, float_
from numpy.lib import issubdtype
def compare(x, y):
z = abs(x-y)
if not issubdtype(z.dtype, number):
z = z.astype(float_) # handle object arrays
return around(z, decimal) <= 10.0**(-decimal)
assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
header='Arrays are not almost equal')
def assert_array_less(x, y, err_msg='', verbose=True):
"""
Raise an assertion if two array_like objects are not ordered by less than.
Given two array_like objects, check that the shape is equal and all
elements of the first object are strictly smaller than those of the
second object. An exception is raised at shape mismatch or incorrectly
ordered values. Shape mismatch does not raise if an object has zero
dimension. In contrast to the standard usage in numpy, NaNs are
compared, no assertion is raised if both objects have NaNs in the same
positions.
Parameters
----------
x : array_like
The smaller object to check.
y : array_like
The larger object to compare.
err_msg : string
The error message to be printed in case of failure.
verbose : bool
If True, the conflicting values are appended to the error message.
Raises
------
AssertionError
If actual and desired objects are not equal.
See Also
--------
assert_array_equal: tests objects for equality
assert_array_almost_equal: test objects for equality up to precision
Examples
--------
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(mismatch 50.0%)
x: array([ 1., 1., NaN])
y: array([ 1., 2., NaN])
>>> np.testing.assert_array_less([1.0, 4.0], 3)
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(mismatch 50.0%)
x: array([ 1., 4.])
y: array(3)
>>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
...
<type 'exceptions.ValueError'>:
Arrays are not less-ordered
(shapes (3,), (1,) mismatch)
x: array([ 1., 2., 3.])
y: array([4])
"""
assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
verbose=verbose,
header='Arrays are not less-ordered')
def runstring(astr, dict):
exec astr in dict
def assert_string_equal(actual, desired):
# delay import of difflib to reduce startup time
import difflib
if not isinstance(actual, str) :
raise AssertionError(`type(actual)`)
if not isinstance(desired, str):
raise AssertionError(`type(desired)`)
if re.match(r'\A'+desired+r'\Z', actual, re.M): return
diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
diff_list = []
while diff:
d1 = diff.pop(0)
if d1.startswith(' '):
continue
if d1.startswith('- '):
l = [d1]
d2 = diff.pop(0)
if d2.startswith('? '):
l.append(d2)
d2 = diff.pop(0)
if not d2.startswith('+ ') :
raise AssertionError(`d2`)
l.append(d2)
d3 = diff.pop(0)
if d3.startswith('? '):
l.append(d3)
else:
diff.insert(0, d3)
if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
continue
diff_list.extend(l)
continue
raise AssertionError(`d1`)
if not diff_list:
return
msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
if actual != desired :
raise AssertionError(msg)
def rundocs(filename=None, raise_on_error=True):
"""Run doc string tests found in file.
By default raises AssertionError on failure.
"""
import doctest, imp
if filename is None:
f = sys._getframe(1)
filename = f.f_globals['__file__']
name = os.path.splitext(os.path.basename(filename))[0]
path = [os.path.dirname(filename)]
file, pathname, description = imp.find_module(name, path)
try:
m = imp.load_module(name, file, pathname, description)
finally:
file.close()
tests = doctest.DocTestFinder().find(m)
runner = doctest.DocTestRunner(verbose=False)
msg = []
if raise_on_error:
out = lambda s: msg.append(s)
else:
out = None
for test in tests:
runner.run(test, out=out)
if runner.failures > 0 and raise_on_error:
raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
def raises(*args,**kwargs):
nose = import_nose()
return nose.tools.raises(*args,**kwargs)
def assert_raises(*args,**kwargs):
"""
assert_raises(exception_class, callable, *args, **kwargs)
Fail unless an exception of class exception_class is thrown
by callable when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
"""
nose = import_nose()
return nose.tools.assert_raises(*args,**kwargs)
def decorate_methods(cls, decorator, testmatch=None):
''' Apply decorator to all methods in class matching testmatch
Parameters
----------
cls : class
Class to decorate methods for
decorator : function
Decorator to apply to methods
testmatch : compiled regexp or string to compile to regexp
Decorators are applied if testmatch.search(methodname)
is not None. Default value is
re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
(the default for nose)
'''
if testmatch is None:
testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
else:
testmatch = re.compile(testmatch)
cls_attr = cls.__dict__
# delayed import to reduce startup time
from inspect import isfunction
methods = filter(isfunction, cls_attr.values())
for function in methods:
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
continue
if testmatch.search(funcname) and not funcname.startswith('_'):
setattr(cls, funcname, decorator(function))
return
def measure(code_str,times=1,label=None):
""" Return elapsed time for executing code_str in the
namespace of the caller for given times.
"""
frame = sys._getframe(1)
locs,globs = frame.f_locals,frame.f_globals
code = compile(code_str,
'Test name: %s ' % label,
'exec')
i = 0
elapsed = jiffies()
while i < times:
i += 1
exec code in globs,locs
elapsed = jiffies() - elapsed
return 0.01*elapsed
| bsd-3-clause |
LoLab-VU/pymc | pymc/step_methods/hmc.py | 2 | 3269 | '''
Created on Mar 7, 2011
@author: johnsalvatier
'''
from numpy import floor
from .quadpotential import *
from .arraystep import *
from ..core import *
from ..tuning import guess_scaling
import numpy as np
from scipy.sparse import issparse
from collections import namedtuple
__all__ = ['HamiltonianMC']
# TODO:
# add constraint handling via page 37 of Radford's
# http://www.cs.utoronto.ca/~radford/ham-mcmc.abstract.html
def unif(step_size, elow=.85, ehigh=1.15):
return np.random.uniform(elow, ehigh) * step_size
class HamiltonianMC(ArrayStep):
default_blocked = True
def __init__(self, vars=None, scaling=None, step_scale=.25, path_length=2., is_cov=False, step_rand=unif, state=None, model=None, **kwargs):
"""
Parameters
----------
vars : list of theano variables
scaling : array_like, ndim = {1,2}
Scaling for momentum distribution. 1d arrays interpreted matrix diagonal.
step_scale : float, default=.25
Size of steps to take, automatically scaled down by 1/n**(1/4) (defaults to .25)
path_length : float, default=2
total length to travel
is_cov : bool, default=False
Treat scaling as a covariance matrix/vector if True, else treat it as a precision matrix/vector
step_rand : function float -> float, default=unif
A function which takes the step size and returns an new one used to randomize the step size at each iteration.
state
State object
model : Model
"""
model = modelcontext(model)
if vars is None:
vars = model.cont_vars
if scaling is None:
scaling = model.test_point
if isinstance(scaling, dict):
scaling = guess_scaling(Point(scaling, model=model), model=model)
n = scaling.shape[0]
self.step_size = step_scale / n ** (1 / 4.)
self.potential = quad_potential(scaling, is_cov, as_cov=False)
self.path_length = path_length
self.step_rand = step_rand
if state is None:
state = SamplerHist()
self.state = state
super(HamiltonianMC, self).__init__(vars, [model.fastlogp, model.fastdlogp(vars)], **kwargs)
def astep(self, q0, logp, dlogp):
H = Hamiltonian(logp, dlogp, self.potential)
e = self.step_rand(self.step_size)
nstep = int(self.path_length / e)
p0 = H.pot.random()
q, p = leapfrog(H, q0, p0, nstep, e)
p = -p
mr = energy(H, q0, p0) - energy(H, q, p)
self.state.metrops.append(mr)
return metrop_select(mr, q, q0)
def bern(p):
return np.random.uniform() < p
Hamiltonian = namedtuple("Hamiltonian", "logp, dlogp, pot")
def energy(H, q, p):
return -(H.logp(q) - H.pot.energy(p))
def leapfrog(H, q, p, n, e):
_, dlogp, pot = H
p = p - (e/2) * -dlogp(q) # half momentum update
for i in range(n):
#alternate full variable and momentum updates
q = q + e * pot.velocity(p)
if i != n - 1:
p = p - e * -dlogp(q)
p = p - (e/2) * -dlogp(q) # do a half step momentum update to finish off
return q, p
| apache-2.0 |
bolkedebruin/airflow | tests/test_impersonation.py | 2 | 7598 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import errno
import functools
import logging
import os
import subprocess
import sys
import unittest
import unittest.mock
from copy import deepcopy
from airflow import jobs, models
from airflow.utils.db import add_default_pool_if_not_exists
from airflow.utils.state import State
from airflow.utils.timezone import datetime
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
TEST_DAG_CORRUPTED_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags_corrupted')
TEST_UTILS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'test_utils')
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_USER = 'airflow_test_user'
logger = logging.getLogger(__name__)
def mock_custom_module_path(path: str):
"""
This decorator adds a path to sys.path to simulate running the current script with the ``PYTHONPATH``
environment variable set and sets the environment variable ``PYTHONPATH`` to change the
module load directory for child scripts.
"""
def wrapper(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
copy_sys_path = deepcopy(sys.path)
sys.path.append(path)
try:
with unittest.mock.patch.dict('os.environ', {'PYTHONPATH': path}):
return func(*args, **kwargs)
finally:
sys.path = copy_sys_path
return decorator
return wrapper
def grant_permissions():
airflow_home = os.environ['AIRFLOW_HOME']
subprocess.check_call(
'find "%s" -exec sudo chmod og+w {} +; sudo chmod og+rx /root' % airflow_home, shell=True)
def revoke_permissions():
airflow_home = os.environ['AIRFLOW_HOME']
subprocess.check_call(
'find "%s" -exec sudo chmod og-w {} +; sudo chmod og-rx /root' % airflow_home, shell=True)
def check_original_docker_image():
if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAGE') is None:
raise unittest.SkipTest("""Adding/removing a user as part of a test is very bad for host os
(especially if the user already existed to begin with on the OS), therefore we check if we run inside a
the official docker container and only allow to run the test there. This is done by checking /.dockerenv
file (always present inside container) and checking for PYTHON_BASE_IMAGE variable.
""")
def create_user():
try:
subprocess.check_output(['sudo', 'useradd', '-m', TEST_USER, '-g',
str(os.getegid())])
except OSError as e:
if e.errno == errno.ENOENT:
raise unittest.SkipTest(
"The 'useradd' command did not exist so unable to test "
"impersonation; Skipping Test. These tests can only be run on a "
"linux host that supports 'useradd'."
)
else:
raise unittest.SkipTest(
"The 'useradd' command exited non-zero; Skipping tests. Does the "
"current user have permission to run 'useradd' without a password "
"prompt (check sudoers file)?"
)
class TestImpersonation(unittest.TestCase):
def setUp(self):
check_original_docker_image()
grant_permissions()
add_default_pool_if_not_exists()
self.dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
logger.info('Loaded DAGS:')
logger.info(self.dagbag.dagbag_report())
create_user()
def tearDown(self):
subprocess.check_output(['sudo', 'userdel', '-r', TEST_USER])
revoke_permissions()
def run_backfill(self, dag_id, task_id):
dag = self.dagbag.get_dag(dag_id)
dag.clear()
jobs.BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE).run()
ti = models.TaskInstance(
task=dag.get_task(task_id),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_impersonation(self):
"""
Tests that impersonating a unix user works
"""
self.run_backfill(
'test_impersonation',
'test_impersonated_user'
)
def test_no_impersonation(self):
"""
If default_impersonation=None, tests that the job is run
as the current user (which will be a sudoer)
"""
self.run_backfill(
'test_no_impersonation',
'test_superuser',
)
@unittest.mock.patch.dict('os.environ', AIRFLOW__CORE__DEFAULT_IMPERSONATION=TEST_USER)
def test_default_impersonation(self):
"""
If default_impersonation=TEST_USER, tests that the job defaults
to running as TEST_USER for a test without run_as_user set
"""
self.run_backfill(
'test_default_impersonation',
'test_deelevated_user'
)
def test_impersonation_subdag(self):
"""
Tests that impersonation using a subdag correctly passes the right configuration
:return:
"""
self.run_backfill(
'impersonation_subdag',
'test_subdag_operation'
)
class TestImpersonationWithCustomPythonPath(unittest.TestCase):
@mock_custom_module_path(TEST_UTILS_FOLDER)
def setUp(self):
check_original_docker_image()
grant_permissions()
add_default_pool_if_not_exists()
self.dagbag = models.DagBag(
dag_folder=TEST_DAG_CORRUPTED_FOLDER,
include_examples=False,
)
logger.info('Loaded DAGS:')
logger.info(self.dagbag.dagbag_report())
create_user()
def tearDown(self):
subprocess.check_output(['sudo', 'userdel', '-r', TEST_USER])
revoke_permissions()
def run_backfill(self, dag_id, task_id):
dag = self.dagbag.get_dag(dag_id)
dag.clear()
jobs.BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE).run()
ti = models.TaskInstance(
task=dag.get_task(task_id),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
@mock_custom_module_path(TEST_UTILS_FOLDER)
def test_impersonation_custom(self):
"""
Tests that impersonation using a unix user works with custom packages in
PYTHONPATH
"""
# PYTHONPATH is already set in script triggering tests
assert 'PYTHONPATH' in os.environ
self.run_backfill(
'impersonation_with_custom_pkg',
'exec_python_fn'
)
| apache-2.0 |
gaomingnudt/gm-flask2.0 | tests/test_templating.py | 148 | 13057 | # -*- coding: utf-8 -*-
"""
tests.templating
~~~~~~~~~~~~~~~~
Template functionality
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import flask
import logging
from jinja2 import TemplateNotFound
def test_context_processing():
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'injected_value': 42}
@app.route('/')
def index():
return flask.render_template('context_template.html', value=23)
rv = app.test_client().get('/')
assert rv.data == b'<p>23|42'
def test_original_win():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template_string('{{ config }}', config=42)
rv = app.test_client().get('/')
assert rv.data == b'42'
def test_request_less_rendering():
app = flask.Flask(__name__)
app.config['WORLD_NAME'] = 'Special World'
@app.context_processor
def context_processor():
return dict(foo=42)
with app.app_context():
rv = flask.render_template_string('Hello {{ config.WORLD_NAME }} '
'{{ foo }}')
assert rv == 'Hello Special World 42'
def test_standard_context():
app = flask.Flask(__name__)
app.secret_key = 'development key'
@app.route('/')
def index():
flask.g.foo = 23
flask.session['test'] = 'aha'
return flask.render_template_string('''
{{ request.args.foo }}
{{ g.foo }}
{{ config.DEBUG }}
{{ session.test }}
''')
rv = app.test_client().get('/?foo=42')
assert rv.data.split() == [b'42', b'23', b'False', b'aha']
def test_escaping():
text = '<p>Hello World!'
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('escaping_template.html', text=text,
html=flask.Markup(text))
lines = app.test_client().get('/').data.splitlines()
assert lines == [
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!'
]
def test_no_escaping():
text = '<p>Hello World!'
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('non_escaping_template.txt', text=text,
html=flask.Markup(text))
lines = app.test_client().get('/').data.splitlines()
assert lines == [
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!'
]
def test_escaping_without_template_filename():
app = flask.Flask(__name__)
with app.test_request_context():
assert flask.render_template_string(
'{{ foo }}', foo='<test>') == '<test>'
assert flask.render_template('mail.txt', foo='<test>') == \
'<test> Mail'
def test_macros():
app = flask.Flask(__name__)
with app.test_request_context():
macro = flask.get_template_attribute('_macro.html', 'hello')
assert macro('World') == 'Hello World!'
def test_template_filter():
app = flask.Flask(__name__)
@app.template_filter()
def my_reverse(s):
return s[::-1]
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_add_template_filter():
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse)
assert 'my_reverse' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['my_reverse'] == my_reverse
assert app.jinja_env.filters['my_reverse']('abcd') == 'dcba'
def test_template_filter_with_name():
app = flask.Flask(__name__)
@app.template_filter('strrev')
def my_reverse(s):
return s[::-1]
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_add_template_filter_with_name():
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'strrev')
assert 'strrev' in app.jinja_env.filters.keys()
assert app.jinja_env.filters['strrev'] == my_reverse
assert app.jinja_env.filters['strrev']('abcd') == 'dcba'
def test_template_filter_with_template():
app = flask.Flask(__name__)
@app.template_filter()
def super_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_template():
app = flask.Flask(__name__)
def super_reverse(s):
return s[::-1]
app.add_template_filter(super_reverse)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_filter_with_name_and_template():
app = flask.Flask(__name__)
@app.template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_add_template_filter_with_name_and_template():
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'super_reverse')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
assert rv.data == b'dcba'
def test_template_test():
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test():
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_name():
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_add_template_test_with_name():
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
assert 'boolean' in app.jinja_env.tests.keys()
assert app.jinja_env.tests['boolean'] == is_boolean
assert app.jinja_env.tests['boolean'](False)
def test_template_test_with_template():
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_template():
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_template_test_with_name_and_template():
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_test_with_name_and_template():
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
assert b'Success!' in rv.data
def test_add_template_global():
app = flask.Flask(__name__)
@app.template_global()
def get_stuff():
return 42
assert 'get_stuff' in app.jinja_env.globals.keys()
assert app.jinja_env.globals['get_stuff'] == get_stuff
assert app.jinja_env.globals['get_stuff'](), 42
with app.app_context():
rv = flask.render_template_string('{{ get_stuff() }}')
assert rv == '42'
def test_custom_template_loader():
class MyFlask(flask.Flask):
def create_global_jinja_loader(self):
from jinja2 import DictLoader
return DictLoader({'index.html': 'Hello Custom World!'})
app = MyFlask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html')
c = app.test_client()
rv = c.get('/')
assert rv.data == b'Hello Custom World!'
def test_iterable_loader():
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'whiskey': 'Jameson'}
@app.route('/')
def index():
return flask.render_template(
['no_template.xml', # should skip this one
'simple_template.html', # should render this
'context_template.html'],
value=23)
rv = app.test_client().get('/')
assert rv.data == b'<h1>Jameson</h1>'
def test_templates_auto_reload():
# debug is False, config option is None
app = flask.Flask(__name__)
assert app.debug is False
assert app.config['TEMPLATES_AUTO_RELOAD'] is None
assert app.jinja_env.auto_reload is False
# debug is False, config option is False
app = flask.Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = False
assert app.debug is False
assert app.jinja_env.auto_reload is False
# debug is False, config option is True
app = flask.Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
assert app.debug is False
assert app.jinja_env.auto_reload is True
# debug is True, config option is None
app = flask.Flask(__name__)
app.config['DEBUG'] = True
assert app.config['TEMPLATES_AUTO_RELOAD'] is None
assert app.jinja_env.auto_reload is True
# debug is True, config option is False
app = flask.Flask(__name__)
app.config['DEBUG'] = True
app.config['TEMPLATES_AUTO_RELOAD'] = False
assert app.jinja_env.auto_reload is False
# debug is True, config option is True
app = flask.Flask(__name__)
app.config['DEBUG'] = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
assert app.jinja_env.auto_reload is True
def test_template_loader_debugging(test_apps):
from blueprintapp import app
called = []
class _TestHandler(logging.Handler):
def handle(x, record):
called.append(True)
text = str(record.msg)
assert '1: trying loader of application "blueprintapp"' in text
assert ('2: trying loader of blueprint "admin" '
'(blueprintapp.apps.admin)') in text
assert ('trying loader of blueprint "frontend" '
'(blueprintapp.apps.frontend)') in text
assert 'Error: the template could not be found' in text
assert ('looked up from an endpoint that belongs to '
'the blueprint "frontend"') in text
assert 'See http://flask.pocoo.org/docs/blueprints/#templates' in text
with app.test_client() as c:
try:
old_load_setting = app.config['EXPLAIN_TEMPLATE_LOADING']
old_handlers = app.logger.handlers[:]
app.logger.handlers = [_TestHandler()]
app.config['EXPLAIN_TEMPLATE_LOADING'] = True
with pytest.raises(TemplateNotFound) as excinfo:
c.get('/missing')
assert 'missing_template.html' in str(excinfo.value)
finally:
app.logger.handlers[:] = old_handlers
app.config['EXPLAIN_TEMPLATE_LOADING'] = old_load_setting
assert len(called) == 1
def test_custom_jinja_env():
class CustomEnvironment(flask.templating.Environment):
pass
class CustomFlask(flask.Flask):
jinja_environment = CustomEnvironment
app = CustomFlask(__name__)
assert isinstance(app.jinja_env, CustomEnvironment)
| bsd-3-clause |
rlefevre1/hpp-rbprm-corba | script/scenarios/demos/siggraph_asia/stair_bauzil_hrp2_path3.py | 2 | 3993 | from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.gepetto import Viewer
from hpp.corbaserver import Client
from hpp.corbaserver.robot import Robot as Parent
class Robot (Parent):
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
# URDF file describing the trunk of the robot HyQ
urdfName = 'hrp2_trunk_flexible'
urdfSuffix = ""
srdfSuffix = ""
def __init__ (self, robotName, load = True):
Parent.__init__ (self, robotName, self.rootJointType, load)
self.tf_root = "base_footprint"
self.client.basic = Client ()
self.load = load
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
urdfName = 'hrp2_trunk_flexible'
urdfNameRoms = ['hrp2_larm_rom','hrp2_rarm_rom','hrp2_lleg_rom','hrp2_rleg_rom']
urdfSuffix = ""
srdfSuffix = ""
rbprmBuilder = Builder ()
rbprmBuilder.loadModel(urdfName, urdfNameRoms, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
rbprmBuilder.setJointBounds ("base_joint_xyz", [0,2, -1, 1, 0, 2.2])
#~ rbprmBuilder.setFilter(['hrp2_rarm_rom','hrp2_lleg_rom','hrp2_rleg_rom'])
rbprmBuilder.setAffordanceFilter('3Rarm', ['Support'])
rbprmBuilder.setAffordanceFilter('4Larm', ['Support'])
rbprmBuilder.setAffordanceFilter('0rLeg', ['Support',])
rbprmBuilder.setAffordanceFilter('1lLeg', ['Support'])
#~ rbprmBuilder.setNormalFilter('hrp2_rarm_rom', [0,0,1], 0.5)
#~ rbprmBuilder.setNormalFilter('hrp2_lleg_rom', [0,0,1], 0.9)
#~ rbprmBuilder.setNormalFilter('hrp2_rleg_rom', [0,0,1], 0.9)
#~ rbprmBuilder.setNormalFilter('hyq_rhleg_rom', [0,0,1], 0.9)
rbprmBuilder.boundSO3([-0.,0,-1,1,-1,1])
#~ from hpp.corbaserver.rbprm. import ProblemSolver
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
ps = ProblemSolver( rbprmBuilder )
r = Viewer (ps)
q_init = rbprmBuilder.getCurrentConfig ();
q_init [0:3] = [1.49, -0.65, 1.25]; rbprmBuilder.setCurrentConfig (q_init); r (q_init)
#~ q_init [0:3] = [0, 0, 0.648702]; rbprmBuilder.setCurrentConfig (q_init); r (q_init)
q_init [3:7] = [0, 0, 0, 1 ]
#~ q_init [0:3] = [0, -0.63, 0.6]; rbprmBuilder.setCurrentConfig (q_init); r (q_init)
#~ q_init [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ]
q_goal = q_init [::]
#~ q_goal [3:7] = [ 0.98877108, 0. , 0.14943813, 0. ]
q_goal [0:3] = [0, 0, 0.648702]; r (q_goal)
#~ q_goal [0:3] = [1.2, -0.65, 1.1]; r (q_goal)
#~ ps.addPathOptimizer("GradientBased")
ps.addPathOptimizer("RandomShortcut")
ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
from hpp.corbaserver.affordance.affordance import AffordanceTool
afftool = AffordanceTool ()
afftool.setAffordanceConfig('Support', [0.5, 0.03, 0.00005])
afftool.loadObstacleModel (packageName, "stair_bauzil", "planning", r)
#~ afftool.analyseAll()
#~ afftool.visualiseAffordances('Support', r, [0.25, 0.5, 0.5])
#~ afftool.visualiseAffordances('Lean', r, [0, 0, 0.9])
ps.client.problem.selectConFigurationShooter("RbprmShooter")
ps.client.problem.selectPathValidation("RbprmPathValidation",0.05)
#~ ps.solve ()
t = ps.solve ()
print t;
if isinstance(t, list):
t = t[0]* 3600000 + t[1] * 60000 + t[2] * 1000 + t[3]
f = open('log.txt', 'a')
f.write("path computation " + str(t) + "\n")
f.close()
from hpp.gepetto import PathPlayer
pp = PathPlayer (rbprmBuilder.client.basic, r)
#~ pp.fromFile("/home/stonneau/dev/hpp/src/hpp-rbprm-corba/script/paths/stair.path")
#~
#~ pp (2)
#~ pp (0)
#~ pp (1)
#~ pp.toFile(1, "/home/stonneau/dev/hpp/src/hpp-rbprm-corba/script/paths/stair.path")
#~ rbprmBuilder.exportPath (r, ps.client.problem, 1, 0.01, "stair_bauzil_hrp2_path.txt")
cl = Client()
cl.problem.selectProblem("rbprm_path")
rbprmBuilder2 = Robot ("toto")
ps2 = ProblemSolver( rbprmBuilder2 )
cl.problem.selectProblem("default")
cl.problem.movePathToProblem(1,"rbprm_path",rbprmBuilder.getAllJointNames())
r2 = Viewer (ps2)
r.client.gui.setVisibility("toto", "OFF")
r.client.gui.setVisibility("hrp2_trunk_flexible", "ON")
#~ r2(q_far)
| lgpl-3.0 |
jiangzhixiao/odoo | addons/board/__init__.py | 439 | 1144 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import board
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
koniiiik/django | django/core/management/sql.py | 108 | 1972 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
def sql_flush(style, connection, only_django=False, reset_sequences=True, allow_cascade=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True, include_views=False)
else:
tables = connection.introspection.table_names(include_views=False)
seqs = connection.introspection.sequence_list() if reset_sequences else ()
statements = connection.ops.sql_flush(style, tables, seqs, allow_cascade)
return statements
def emit_pre_migrate_signal(verbosity, interactive, db, **kwargs):
# Emit the pre_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running pre-migrate handlers for application %s" % app_config.label)
models.signals.pre_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db,
**kwargs
)
def emit_post_migrate_signal(verbosity, interactive, db, **kwargs):
# Emit the post_migrate signal for every application.
for app_config in apps.get_app_configs():
if app_config.models_module is None:
continue
if verbosity >= 2:
print("Running post-migrate handlers for application %s" % app_config.label)
models.signals.post_migrate.send(
sender=app_config,
app_config=app_config,
verbosity=verbosity,
interactive=interactive,
using=db,
**kwargs
)
| bsd-3-clause |
meisamhe/GPLshared | Research_Projects_UTD/DataScientistCompetitionCode/clickthroughratepredictionkagglecompetitioncodes/vowpelWabbitOutputCreationBu.py | 1 | 1919 | #======================================================
# create the output from output of vowpel wabbit
# to start just open vW output and put simple prediction word at top
# to make the files syncronized
#======================================================
logisticOut ="C:\\Users\\mxh109420\\Desktop\\KaggleCometition\\predictionsBu.out"
#svmOut = "C:\\Users\\mxh109420\\Desktop\\KaggleCometition\\predictionsSVM.out" # path of to be outputted submission file
test = "C:\\Users\\mxh109420\\Desktop\\KaggleCometition\\test\\test.csv"
submitableVWLR = "C:\\Users\\mxh109420\\Desktop\\KaggleCometition\\submissionVWPredictionsLRBu.csv" # path of to be outputted submission file
#submitableVWSVM = "C:\\Users\\mxh109420\\Desktop\\KaggleCometition\\submissionVWPredictionsSVM.csv" # path of to be outputted submission file
# I should add to one and divid by two because its current output is between -1 and 1
from itertools import izip
from datetime import datetime
from csv import DictReader
from math import exp, log, sqrt
numerator = 0
with open(test, "r") as idFile, open(logisticOut, "r") as predFile, open(submitableVWLR, 'w') as outfile:
outfile.write('id,click\n')
for x, y in izip(idFile, predFile):
# because the separator is comma
numerator += 1
if (numerator>1):
ID = x.split(',')[0]
y = y.split('\n')[0]
print('LogRegnumerator is:%s'%numerator)
prediction = 1/(1+exp(-float(y)))
outfile.write('%s,%s\n' % (ID, str(prediction)))
#numerator = 0
#with open(test, "r") as idFile, open(svmOut, "r") as predFile, open(submitableVWSVM, 'w') as outfile:
# outfile.write('id,click\n')
# for x, y in izip(idFile, predFile):
# # because the separator is comma
# numerator += 1
# if (numerator>1):
# ID = x.split(',')[0]
# y = y.split('\n')[0]
# print('SVMnumerator is:%s'%numerator)
# prediction = (float(y)+1)/2
# outfile.write('%s,%s\n' % (ID, str(prediction)))
| gpl-3.0 |
jeoliva/hls-analyzer | m3u8/__init__.py | 2 | 2424 | # coding: utf-8
# Copyright 2014 Globo.com Player authors. All rights reserved.
# Use of this source code is governed by a MIT License
# license that can be found in the LICENSE file.
import sys
PYTHON_MAJOR_VERSION = sys.version_info
import os
import posixpath
try:
from cookielib import CookieJar
except ImportError:
from http.cookiejar import CookieJar
try:
import urlparse as url_parser
import urllib2
cj = CookieJar()
cookieProcessor = urllib2.HTTPCookieProcessor(cj)
opener = urllib2.build_opener(cookieProcessor)
urlopen = opener.open
except ImportError:
import urllib.parse as url_parser
from urllib.request import urlopen as url_opener
urlopen = url_opener
from m3u8.model import M3U8, Playlist, IFramePlaylist, Media, Segment
from m3u8.parser import parse, is_url
__all__ = ('M3U8', 'Playlist', 'IFramePlaylist', 'Media',
'Segment', 'loads', 'load', 'parse')
def loads(content):
'''
Given a string with a m3u8 content, returns a M3U8 object.
Raises ValueError if invalid content
'''
return M3U8(content)
def load(uri):
'''
Retrieves the content from a given URI and returns a M3U8 object.
Raises ValueError if invalid content or IOError if request fails.
'''
if is_url(uri):
return _load_from_uri(uri)
else:
return _load_from_file(uri)
def getCookieProcessor():
return cookieProcessor
# Support for python3 inspired by https://github.com/szemtiv/m3u8/
def _load_from_uri(uri):
resource = urlopen(uri)
base_uri = _parsed_url(_url_for(uri))
if PYTHON_MAJOR_VERSION < (3,):
content = _read_python2x(resource)
else:
content = _read_python3x(resource)
return M3U8(content, base_uri=base_uri)
def _url_for(uri):
return urlopen(uri).geturl()
def _parsed_url(url):
parsed_url = url_parser.urlparse(url)
prefix = parsed_url.scheme + '://' + parsed_url.netloc
base_path = posixpath.normpath(parsed_url.path + '/..')
return url_parser.urljoin(prefix, base_path)
def _read_python2x(resource):
return resource.read().strip()
def _read_python3x(resource):
return resource.read().decode(resource.headers.get_content_charset(failobj="utf-8"))
def _load_from_file(uri):
with open(uri) as fileobj:
raw_content = fileobj.read().strip()
base_uri = os.path.dirname(uri)
return M3U8(raw_content, base_uri=base_uri)
| mit |
anbasile/flask_sample | flask/lib/python2.7/site-packages/flask/testsuite/templating.py | 562 | 11237 | # -*- coding: utf-8 -*-
"""
flask.testsuite.templating
~~~~~~~~~~~~~~~~~~~~~~~~~~
Template functionality
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
class TemplatingTestCase(FlaskTestCase):
def test_context_processing(self):
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'injected_value': 42}
@app.route('/')
def index():
return flask.render_template('context_template.html', value=23)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'<p>23|42')
def test_original_win(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template_string('{{ config }}', config=42)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'42')
def test_request_less_rendering(self):
app = flask.Flask(__name__)
app.config['WORLD_NAME'] = 'Special World'
@app.context_processor
def context_processor():
return dict(foo=42)
with app.app_context():
rv = flask.render_template_string('Hello {{ config.WORLD_NAME }} '
'{{ foo }}')
self.assert_equal(rv, 'Hello Special World 42')
def test_standard_context(self):
app = flask.Flask(__name__)
app.secret_key = 'development key'
@app.route('/')
def index():
flask.g.foo = 23
flask.session['test'] = 'aha'
return flask.render_template_string('''
{{ request.args.foo }}
{{ g.foo }}
{{ config.DEBUG }}
{{ session.test }}
''')
rv = app.test_client().get('/?foo=42')
self.assert_equal(rv.data.split(), [b'42', b'23', b'False', b'aha'])
def test_escaping(self):
text = '<p>Hello World!'
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('escaping_template.html', text=text,
html=flask.Markup(text))
lines = app.test_client().get('/').data.splitlines()
self.assert_equal(lines, [
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!'
])
def test_no_escaping(self):
app = flask.Flask(__name__)
with app.test_request_context():
self.assert_equal(flask.render_template_string('{{ foo }}',
foo='<test>'), '<test>')
self.assert_equal(flask.render_template('mail.txt', foo='<test>'),
'<test> Mail')
def test_macros(self):
app = flask.Flask(__name__)
with app.test_request_context():
macro = flask.get_template_attribute('_macro.html', 'hello')
self.assert_equal(macro('World'), 'Hello World!')
def test_template_filter(self):
app = flask.Flask(__name__)
@app.template_filter()
def my_reverse(s):
return s[::-1]
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_add_template_filter(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse)
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_template_filter_with_name(self):
app = flask.Flask(__name__)
@app.template_filter('strrev')
def my_reverse(s):
return s[::-1]
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_add_template_filter_with_name(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'strrev')
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_template_filter_with_template(self):
app = flask.Flask(__name__)
@app.template_filter()
def super_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_template(self):
app = flask.Flask(__name__)
def super_reverse(s):
return s[::-1]
app.add_template_filter(super_reverse)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_filter_with_name_and_template(self):
app = flask.Flask(__name__)
@app.template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_name_and_template(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'super_reverse')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_test(self):
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test(self):
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_name(self):
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test_with_name(self):
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_template(self):
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_template(self):
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_template_test_with_name_and_template(self):
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_name_and_template(self):
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_global(self):
app = flask.Flask(__name__)
@app.template_global()
def get_stuff():
return 42
self.assert_in('get_stuff', app.jinja_env.globals.keys())
self.assert_equal(app.jinja_env.globals['get_stuff'], get_stuff)
self.assert_true(app.jinja_env.globals['get_stuff'](), 42)
with app.app_context():
rv = flask.render_template_string('{{ get_stuff() }}')
self.assert_equal(rv, '42')
def test_custom_template_loader(self):
class MyFlask(flask.Flask):
def create_global_jinja_loader(self):
from jinja2 import DictLoader
return DictLoader({'index.html': 'Hello Custom World!'})
app = MyFlask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html')
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Hello Custom World!')
def test_iterable_loader(self):
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'whiskey': 'Jameson'}
@app.route('/')
def index():
return flask.render_template(
['no_template.xml', # should skip this one
'simple_template.html', # should render this
'context_template.html'],
value=23)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'<h1>Jameson</h1>')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TemplatingTestCase))
return suite
| mit |
DelazJ/QGIS | tests/src/python/test_qgsvectorlayer.py | 11 | 156846 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsVectorLayer.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Tim Sutton'
__date__ = '20/08/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
import qgis # NOQA
import os
import tempfile
import shutil
from qgis.PyQt.QtCore import QDate, QDateTime, QVariant, Qt, QDateTime, QDate, QTime
from qgis.PyQt.QtGui import QPainter, QColor
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsWkbTypes,
QgsAction,
QgsAuxiliaryStorage,
QgsCoordinateTransformContext,
QgsDataProvider,
QgsDefaultValue,
QgsEditorWidgetSetup,
QgsMapLayer,
QgsVectorLayer,
QgsRectangle,
QgsFeature,
QgsFeatureRequest,
QgsGeometry,
QgsPointXY,
QgsField,
QgsFieldConstraints,
QgsFields,
QgsVectorLayerJoinInfo,
QgsSymbol,
QgsSingleSymbolRenderer,
QgsCoordinateReferenceSystem,
QgsVectorLayerCache,
QgsReadWriteContext,
QgsProject,
QgsUnitTypes,
QgsAggregateCalculator,
QgsPoint,
QgsExpressionContext,
QgsExpressionContextScope,
QgsExpressionContextUtils,
QgsLineSymbol,
QgsMapLayerStyle,
QgsMapLayerDependency,
QgsRenderContext,
QgsPalLayerSettings,
QgsVectorLayerSimpleLabeling,
QgsSingleCategoryDiagramRenderer,
QgsDiagramLayerSettings,
QgsTextFormat,
QgsVectorLayerSelectedFeatureSource,
QgsExpression,
QgsLayerMetadata,
NULL)
from qgis.gui import (QgsAttributeTableModel,
QgsGui
)
from qgis.PyQt.QtTest import QSignalSpy
from qgis.testing import start_app, unittest
from featuresourcetestbase import FeatureSourceTestCase
from utilities import unitTestDataPath
TEST_DATA_DIR = unitTestDataPath()
start_app()
def createEmptyLayer():
layer = QgsVectorLayer("Point", "addfeat", "memory")
assert layer.featureCount() == 0
return layer
def createEmptyLayerWithFields():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer", "addfeat", "memory")
assert layer.featureCount() == 0
return layer
def createLayerWithOnePoint():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = layer.dataProvider()
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
assert pr.addFeatures([f])
assert layer.featureCount() == 1
return layer
def createLayerWithTwoPoints():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = layer.dataProvider()
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
f2 = QgsFeature()
f2.setAttributes(["test2", 457])
f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
assert pr.addFeatures([f, f2])
assert layer.featureCount() == 2
return layer
def createLayerWithFivePoints():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = layer.dataProvider()
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
f2 = QgsFeature()
f2.setAttributes(["test2", 457])
f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(200, 200)))
f3 = QgsFeature()
f3.setAttributes(["test2", 888])
f3.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(300, 200)))
f4 = QgsFeature()
f4.setAttributes(["test3", -1])
f4.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(400, 300)))
f5 = QgsFeature()
f5.setAttributes(["test4", 0])
f5.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(0, 0)))
assert pr.addFeatures([f, f2, f3, f4, f5])
assert layer.featureCount() == 5
return layer
def createJoinLayer():
joinLayer = QgsVectorLayer(
"Point?field=x:string&field=y:integer&field=z:integer&field=date:datetime",
"joinlayer", "memory")
pr = joinLayer.dataProvider()
f1 = QgsFeature()
f1.setAttributes(["foo", 123, 321, QDateTime(QDate(2010, 1, 1))])
f1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(1, 1)))
f2 = QgsFeature()
f2.setAttributes(["bar", 456, 654, QDateTime(QDate(2020, 1, 1))])
f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(2, 2)))
f3 = QgsFeature()
f3.setAttributes(["qar", 457, 111, None])
f3.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(2, 2)))
f4 = QgsFeature()
f4.setAttributes(["a", 458, 19, QDateTime(QDate(2012, 1, 1))])
f4.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(2, 2)))
assert pr.addFeatures([f1, f2, f3, f4])
assert joinLayer.featureCount() == 4
return joinLayer
def dumpFeature(f):
print("--- FEATURE DUMP ---")
print(("valid: %d | id: %d" % (f.isValid(), f.id())))
geom = f.geometry()
if geom:
print(("geometry wkb: %d" % geom.wkbType()))
else:
print("no geometry")
print(("attrs: %s" % str(f.attributes())))
def formatAttributes(attrs):
return repr([str(a) for a in attrs])
def dumpEditBuffer(layer):
editBuffer = layer.editBuffer()
if not editBuffer:
print("NO EDITING!")
return
print("ADDED:")
for fid, f in editBuffer.addedFeatures().items():
print(("%d: %s | %s" % (
f.id(), formatAttributes(f.attributes()),
f.geometry().asWkt())))
print("CHANGED GEOM:")
for fid, geom in editBuffer.changedGeometries().items():
print(("%d | %s" % (f.id(), f.geometry().asWkt())))
class TestQgsVectorLayer(unittest.TestCase, FeatureSourceTestCase):
@classmethod
def getSource(cls):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&field=dt:datetime&field=date:date&field=time:time&key=pk',
'test', 'memory')
assert (vl.isValid())
f1 = QgsFeature()
f1.setAttributes([5, -200, NULL, 'NuLl', '5', QDateTime(QDate(2020, 5, 4), QTime(12, 13, 14)), QDate(2020, 5, 2), QTime(12, 13, 1)])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature()
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3', NULL, NULL, NULL])
f3 = QgsFeature()
f3.setAttributes([1, 100, 'Orange', 'oranGe', '1', QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)), QDate(2020, 5, 3), QTime(12, 13, 14)])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature()
f4.setAttributes([2, 200, 'Apple', 'Apple', '2', QDateTime(QDate(2020, 5, 4), QTime(12, 14, 14)), QDate(2020, 5, 4), QTime(12, 14, 14)])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature()
f5.setAttributes([4, 400, 'Honey', 'Honey', '4', QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)), QDate(2021, 5, 4), QTime(13, 13, 14)])
f5.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
vl.dataProvider().addFeatures([f1, f2, f3, f4, f5])
return vl
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QgsGui.editorWidgetRegistry().initEditors()
# Create test layer for FeatureSourceTestCase
cls.source = cls.getSource()
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def test_FeatureCount(self):
myPath = os.path.join(unitTestDataPath(), 'lines.shp')
myLayer = QgsVectorLayer(myPath, 'Lines', 'ogr')
myCount = myLayer.featureCount()
self.assertEqual(myCount, 6)
# undo stack
def testUndoStack(self):
layer = createLayerWithOnePoint()
layer.startEditing()
self.assertEqual(layer.undoStack().count(), 0)
self.assertEqual(layer.undoStack().index(), 0)
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
self.assertTrue(layer.addFeatures([f]))
self.assertEqual(layer.undoStack().count(), 1)
self.assertEqual(layer.undoStack().index(), 1)
self.assertEqual(layer.featureCount(), 2)
layer.undoStack().undo()
self.assertEqual(layer.undoStack().count(), 1)
self.assertEqual(layer.undoStack().index(), 0)
self.assertEqual(layer.featureCount(), 1)
layer.undoStack().redo()
self.assertEqual(layer.undoStack().count(), 1)
self.assertEqual(layer.undoStack().index(), 1)
self.assertEqual(layer.featureCount(), 2)
# macro commands
layer.beginEditCommand("Test command 1")
self.assertTrue(layer.addFeatures([f]))
self.assertTrue(layer.addFeatures([f]))
layer.endEditCommand()
self.assertEqual(layer.undoStack().count(), 2)
self.assertEqual(layer.undoStack().index(), 2)
self.assertEqual(layer.featureCount(), 4)
layer.undoStack().undo()
self.assertEqual(layer.undoStack().count(), 2)
self.assertEqual(layer.undoStack().index(), 1)
self.assertEqual(layer.featureCount(), 2)
layer.undoStack().redo()
self.assertEqual(layer.undoStack().count(), 2)
self.assertEqual(layer.undoStack().index(), 2)
self.assertEqual(layer.featureCount(), 4)
# throw away a macro command
layer.beginEditCommand("Test command 1")
self.assertTrue(layer.addFeatures([f]))
self.assertTrue(layer.addFeatures([f]))
self.assertEqual(layer.featureCount(), 6)
layer.destroyEditCommand()
self.assertEqual(layer.undoStack().count(), 2)
self.assertEqual(layer.undoStack().index(), 2)
self.assertEqual(layer.featureCount(), 4)
def testSetDataSource(self):
"""
Test changing a layer's data source
"""
layer = createLayerWithOnePoint()
layer.setCrs(QgsCoordinateReferenceSystem("epsg:3111"))
r = QgsSingleSymbolRenderer(QgsSymbol.defaultSymbol(QgsWkbTypes.PointGeometry))
layer.setRenderer(r)
self.assertEqual(layer.renderer().symbol().type(), QgsSymbol.Marker)
spy = QSignalSpy(layer.dataSourceChanged)
options = QgsDataProvider.ProviderOptions()
# change with layer of same type
points_path = os.path.join(unitTestDataPath(), 'points.shp')
layer.setDataSource(points_path, 'new name', 'ogr', options)
self.assertTrue(layer.isValid())
self.assertEqual(layer.name(), 'new name')
self.assertEqual(layer.wkbType(), QgsWkbTypes.Point)
self.assertEqual(layer.crs().authid(), 'EPSG:4326')
self.assertIn(points_path, layer.dataProvider().dataSourceUri())
self.assertEqual(len(spy), 1)
# should have kept the same renderer!
self.assertEqual(layer.renderer(), r)
# layer with different type
lines_path = os.path.join(unitTestDataPath(), 'rectangles.shp')
layer.setDataSource(lines_path, 'new name2', 'ogr', options)
self.assertTrue(layer.isValid())
self.assertEqual(layer.name(), 'new name2')
self.assertEqual(layer.wkbType(), QgsWkbTypes.MultiPolygon)
self.assertEqual(layer.crs().authid(), 'EPSG:4326')
self.assertIn(lines_path, layer.dataProvider().dataSourceUri())
self.assertEqual(len(spy), 2)
# should have reset renderer!
self.assertNotEqual(layer.renderer(), r)
self.assertEqual(layer.renderer().symbol().type(), QgsSymbol.Fill)
def testSetDataSourceInvalidToValid(self):
"""
Test that changing an invalid layer path to valid maintains the renderer
"""
layer = createLayerWithOnePoint()
layer.setCrs(QgsCoordinateReferenceSystem("epsg:3111"))
r = QgsSingleSymbolRenderer(QgsSymbol.defaultSymbol(QgsWkbTypes.PointGeometry))
layer.setRenderer(r)
self.assertEqual(layer.renderer().symbol().type(), QgsSymbol.Marker)
# change to invalid path
options = QgsDataProvider.ProviderOptions()
layer.setDataSource('nothing', 'new name', 'ogr', options)
self.assertFalse(layer.isValid())
# these properties should be kept intact!
self.assertEqual(layer.name(), 'new name')
self.assertEqual(layer.wkbType(), QgsWkbTypes.Point)
self.assertEqual(layer.crs().authid(), 'EPSG:3111')
# should have kept the same renderer!
self.assertEqual(layer.renderer(), r)
# set to a valid path
points_path = os.path.join(unitTestDataPath(), 'points.shp')
layer.setDataSource(points_path, 'new name2', 'ogr', options)
self.assertTrue(layer.isValid())
self.assertEqual(layer.name(), 'new name2')
self.assertEqual(layer.wkbType(), QgsWkbTypes.Point)
self.assertEqual(layer.crs().authid(), 'EPSG:4326')
self.assertIn(points_path, layer.dataProvider().dataSourceUri())
# should STILL have kept renderer!
self.assertEqual(layer.renderer(), r)
def testSetCustomProperty(self):
"""
Test setting a custom property of the layer
"""
layer = createLayerWithOnePoint()
layer.setCustomProperty('Key_0', 'Value_0')
layer.setCustomProperty('Key_1', 'Value_1')
spy = QSignalSpy(layer.customPropertyChanged)
# change nothing by setting the same value
layer.setCustomProperty('Key_0', 'Value_0')
layer.setCustomProperty('Key_1', 'Value_1')
self.assertEqual(len(spy), 0)
# change one
layer.setCustomProperty('Key_0', 'Value zero')
self.assertEqual(len(spy), 1)
# add one
layer.setCustomProperty('Key_2', 'Value two')
self.assertEqual(len(spy), 2)
# add a null one and an empty one
layer.setCustomProperty('Key_3', None)
layer.setCustomProperty('Key_4', '')
self.assertEqual(len(spy), 4)
# remove one
layer.removeCustomProperty('Key_0')
self.assertEqual(len(spy), 5)
self.assertEqual(layer.customProperty('Key_0', 'no value'), 'no value')
self.assertEqual(layer.customProperty('Key_1', 'no value'), 'Value_1')
self.assertEqual(layer.customProperty('Key_2', 'no value'), 'Value two')
self.assertEqual(layer.customProperty('Key_3', 'no value'), None)
self.assertEqual(layer.customProperty('Key_4', 'no value'), '')
self.assertEqual(len(spy), 5)
def testStoreWkbTypeInvalidLayers(self):
"""
Test that layer wkb types are restored for projects with invalid layer paths
"""
layer = createLayerWithOnePoint()
layer.setName('my test layer')
r = QgsSingleSymbolRenderer(QgsSymbol.defaultSymbol(QgsWkbTypes.PointGeometry))
r.symbol().setColor(QColor('#123456'))
layer.setRenderer(r)
self.assertEqual(layer.renderer().symbol().color().name(), '#123456')
p = QgsProject()
p.addMapLayer(layer)
# reset layer to a bad path
options = QgsDataProvider.ProviderOptions()
layer.setDataSource('nothing', 'new name', 'ogr', options)
# should have kept the same renderer and wkb type!
self.assertEqual(layer.wkbType(), QgsWkbTypes.Point)
self.assertEqual(layer.renderer().symbol().color().name(), '#123456')
# save project to a temporary file
temp_path = tempfile.mkdtemp()
temp_project_path = os.path.join(temp_path, 'temp.qgs')
self.assertTrue(p.write(temp_project_path))
# restore project
p2 = QgsProject()
self.assertTrue(p2.read(temp_project_path))
l2 = p2.mapLayersByName('new name')[0]
self.assertFalse(l2.isValid())
# should have kept the same renderer and wkb type!
self.assertEqual(l2.wkbType(), QgsWkbTypes.Point)
self.assertEqual(l2.renderer().symbol().color().name(), '#123456')
shutil.rmtree(temp_path, True)
def testFallbackCrsWkbType(self):
"""
Test fallback CRS and WKB types are used when layer path is invalid
"""
vl = QgsVectorLayer('this is an outrage!!!')
self.assertFalse(vl.isValid()) # I'd certainly hope so...
self.assertEqual(vl.wkbType(), QgsWkbTypes.Unknown)
self.assertFalse(vl.crs().isValid())
# with fallback
options = QgsVectorLayer.LayerOptions()
options.fallbackWkbType = QgsWkbTypes.CircularString
options.fallbackCrs = QgsCoordinateReferenceSystem.fromEpsgId(3111)
vl = QgsVectorLayer("i'm the moon", options=options)
self.assertFalse(vl.isValid())
self.assertEqual(vl.wkbType(), QgsWkbTypes.CircularString)
self.assertEqual(vl.crs().authid(), 'EPSG:3111')
def test_layer_crs(self):
"""
Test that spatial layers have CRS, and non-spatial don't
"""
vl = QgsVectorLayer('Point?crs=epsg:3111&field=pk:integer', 'test', 'memory')
self.assertTrue(vl.isSpatial())
self.assertTrue(vl.crs().isValid())
self.assertEqual(vl.crs().authid(), 'EPSG:3111')
vl = QgsVectorLayer('None?field=pk:integer', 'test', 'memory')
self.assertFalse(vl.isSpatial())
self.assertFalse(vl.crs().isValid())
# even if provider has a crs - we don't respect it for non-spatial layers!
vl = QgsVectorLayer('None?crs=epsg:3111field=pk:integer', 'test', 'memory')
self.assertFalse(vl.isSpatial())
self.assertFalse(vl.crs().isValid())
def test_wgs84Extent(self):
# We use this particular shapefile because we need a layer with an
# epsg != 4326
p = os.path.join(unitTestDataPath(), 'bug5598.shp')
vl0 = QgsVectorLayer(p, 'test', 'ogr')
extent = vl0.extent()
wgs84_extent = vl0.wgs84Extent()
# write xml document where the wgs84 extent will be stored
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(vl0.writeLayerXml(elem, doc, QgsReadWriteContext()))
# create a 2nd layer and read the xml document WITHOUT trust
vl1 = QgsVectorLayer()
flags = QgsMapLayer.ReadFlags()
vl1.readLayerXml(elem, QgsReadWriteContext(), flags)
self.assertTrue(extent == vl1.extent())
self.assertTrue(wgs84_extent == vl1.wgs84Extent())
# we add a feature and check that the original extent has been
# updated (the extent is bigger with the new feature)
vl1.startEditing()
f = QgsFeature()
f.setAttributes([0, "", "", 0.0, 0.0, 0.0, 0.0])
f.setGeometry(QgsGeometry.fromPolygonXY([[QgsPointXY(2484588, 2425732), QgsPointXY(2482767, 2398853),
QgsPointXY(2520109, 2397715), QgsPointXY(2520792, 2425494),
QgsPointXY(2484588, 2425732)]]))
vl1.addFeature(f)
vl1.updateExtents()
self.assertTrue(extent != vl1.extent())
# trust is not activated so the wgs84 extent is updated
# accordingly
self.assertTrue(wgs84_extent != vl1.wgs84Extent())
vl1.rollBack()
# create a 3rd layer and read the xml document WITH trust
vl2 = QgsVectorLayer()
flags = QgsMapLayer.ReadFlags()
flags |= QgsMapLayer.FlagTrustLayerMetadata
vl2.readLayerXml(elem, QgsReadWriteContext(), flags)
self.assertTrue(extent == vl2.extent())
self.assertTrue(wgs84_extent == vl2.wgs84Extent())
# we add a feature and check that the original extent has been
# updated (the extent is bigger with the new feature)
vl2.startEditing()
f = QgsFeature()
f.setAttributes([0, "", "", 0.0, 0.0, 0.0, 0.0])
f.setGeometry(QgsGeometry.fromPolygonXY([[QgsPointXY(2484588, 2425732), QgsPointXY(2482767, 2398853),
QgsPointXY(2520109, 2397715), QgsPointXY(2520792, 2425494),
QgsPointXY(2484588, 2425732)]]))
vl2.addFeature(f)
vl2.updateExtents()
self.assertTrue(extent != vl2.extent())
# trust is activated so the wgs84 extent is not updated
self.assertTrue(wgs84_extent == vl2.wgs84Extent())
# but we can still retrieve the current wgs84 xtent with the force
# parameter
self.assertTrue(wgs84_extent != vl2.wgs84Extent(True))
vl2.rollBack()
# ADD FEATURE
def test_AddFeature(self):
layer = createEmptyLayerWithFields()
feat = QgsFeature(layer.fields())
feat.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(1, 2)))
def checkAfter():
self.assertEqual(layer.featureCount(), 1)
# check select+nextFeature
f = next(layer.getFeatures())
self.assertEqual(f.geometry().asPoint(), QgsPointXY(1, 2))
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(f2.geometry().asPoint(), QgsPointXY(1, 2))
def checkBefore():
self.assertEqual(layer.featureCount(), 0)
# check select+nextFeature
with self.assertRaises(StopIteration):
next(layer.getFeatures())
checkBefore()
# try to add feature without editing mode
self.assertFalse(layer.addFeature(feat))
# add feature
layer.startEditing()
# try adding feature with incorrect number of fields
bad_feature = QgsFeature()
self.assertFalse(layer.addFeature(bad_feature))
# add good feature
self.assertTrue(layer.addFeature(feat))
checkAfter()
self.assertEqual(layer.dataProvider().featureCount(), 0)
# now try undo/redo
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter()
self.assertTrue(layer.commitChanges())
checkAfter()
self.assertEqual(layer.dataProvider().featureCount(), 1)
# ADD FEATURES
def test_AddFeatures(self):
layer = createEmptyLayerWithFields()
feat1 = QgsFeature(layer.fields())
feat1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(1, 2)))
feat2 = QgsFeature(layer.fields())
feat2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(11, 12)))
def checkAfter():
self.assertEqual(layer.featureCount(), 2)
# check select+nextFeature
it = layer.getFeatures()
f1 = next(it)
self.assertEqual(f1.geometry().asPoint(), QgsPointXY(1, 2))
f2 = next(it)
self.assertEqual(f2.geometry().asPoint(), QgsPointXY(11, 12))
# check feature at id
f1_1 = next(layer.getFeatures(QgsFeatureRequest(f1.id())))
self.assertEqual(f1_1.geometry().asPoint(), QgsPointXY(1, 2))
f2_1 = next(layer.getFeatures(QgsFeatureRequest(f2.id())))
self.assertEqual(f2_1.geometry().asPoint(), QgsPointXY(11, 12))
def checkBefore():
self.assertEqual(layer.featureCount(), 0)
# check select+nextFeature
with self.assertRaises(StopIteration):
next(layer.getFeatures())
checkBefore()
# try to add feature without editing mode
self.assertFalse(layer.addFeatures([feat1, feat2]))
# add feature
layer.startEditing()
# try adding feature with incorrect number of fields
bad_feature = QgsFeature()
self.assertFalse(layer.addFeatures([bad_feature]))
# add good features
self.assertTrue(layer.addFeatures([feat1, feat2]))
checkAfter()
self.assertEqual(layer.dataProvider().featureCount(), 0)
# now try undo/redo
layer.undoStack().undo()
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
layer.undoStack().redo()
checkAfter()
self.assertTrue(layer.commitChanges())
checkAfter()
self.assertEqual(layer.dataProvider().featureCount(), 2)
# DELETE FEATURE
def test_DeleteFeature(self):
layer = createLayerWithOnePoint()
fid = 1
def checkAfter():
self.assertEqual(layer.featureCount(), 0)
# check select+nextFeature
with self.assertRaises(StopIteration):
next(layer.getFeatures())
# check feature at id
with self.assertRaises(StopIteration):
next(layer.getFeatures(QgsFeatureRequest(fid)))
def checkBefore():
self.assertEqual(layer.featureCount(), 1)
# check select+nextFeature
fi = layer.getFeatures()
f = next(fi)
self.assertEqual(f.geometry().asPoint(), QgsPointXY(100, 200))
with self.assertRaises(StopIteration):
next(fi)
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(fid)))
self.assertEqual(f2.id(), fid)
checkBefore()
# try to delete feature without editing mode
self.assertFalse(layer.deleteFeature(fid))
# delete feature
layer.startEditing()
self.assertTrue(layer.deleteFeature(fid))
checkAfter()
# make sure calling it twice does not work
self.assertFalse(layer.deleteFeature(fid))
# now try undo/redo
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter()
self.assertEqual(layer.dataProvider().featureCount(), 1)
self.assertTrue(layer.commitChanges())
checkAfter()
self.assertEqual(layer.dataProvider().featureCount(), 0)
def test_DeleteFeatureAfterAddFeature(self):
layer = createEmptyLayer()
feat = QgsFeature()
feat.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(1, 2)))
def checkBefore():
self.assertEqual(layer.featureCount(), 0)
# check select+nextFeature
with self.assertRaises(StopIteration):
next(layer.getFeatures())
def checkAfter1():
self.assertEqual(layer.featureCount(), 1)
def checkAfter2():
checkBefore() # should be the same state: no features
checkBefore()
# add feature
layer.startEditing()
self.assertTrue(layer.addFeature(feat))
checkAfter1()
fid = feat.id()
self.assertTrue(layer.deleteFeature(fid))
checkAfter2()
# now try undo/redo
layer.undoStack().undo()
checkAfter1()
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter1()
layer.undoStack().redo()
checkAfter2()
self.assertTrue(layer.commitChanges())
checkAfter2()
self.assertEqual(layer.dataProvider().featureCount(), 0)
def test_DeleteJoinedFeature(self):
joinLayer = createJoinLayer()
joinLayer2 = createJoinLayer()
QgsProject.instance().addMapLayers([joinLayer, joinLayer2])
layer = createLayerWithOnePoint()
join = QgsVectorLayerJoinInfo()
join.setTargetFieldName("fldint")
join.setJoinLayer(joinLayer)
join.setJoinFieldName("y")
join.setUsingMemoryCache(True)
join.setEditable(True)
join.setCascadedDelete(True)
layer.addJoin(join)
join2 = QgsVectorLayerJoinInfo()
join2.setTargetFieldName("fldint")
join2.setJoinLayer(joinLayer2)
join2.setJoinFieldName("y")
join2.setUsingMemoryCache(True)
join2.setPrefix("custom-prefix_")
join2.setEditable(True)
join2.setCascadedDelete(False)
layer.addJoin(join2)
# check number of features
self.assertEqual(layer.featureCount(), 1)
self.assertEqual(joinLayer.featureCount(), 4)
self.assertEqual(joinLayer2.featureCount(), 4)
# delete a feature which is also in joined layers
layer.startEditing()
joinLayer.startEditing()
joinLayer2.startEditing()
filter = QgsExpression.createFieldEqualityExpression('fldint', '123')
feature = next(layer.getFeatures(QgsFeatureRequest().setFilterExpression(filter)))
layer.deleteFeature(feature.id())
# check number of features
self.assertEqual(layer.featureCount(), 0)
self.assertEqual(joinLayer.featureCount(), 3) # deleteCascade activated
self.assertEqual(joinLayer2.featureCount(), 4) # deleteCascade deactivated
# CHANGE ATTRIBUTE
def test_ChangeAttribute(self):
layer = createLayerWithOnePoint()
fid = 1
def checkAfter():
# check select+nextFeature
fi = layer.getFeatures()
f = next(fi)
self.assertEqual(f[0], "good")
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(f2[0], "good")
def checkBefore():
# check select+nextFeature
f = next(layer.getFeatures())
self.assertEqual(f[0], "test")
checkBefore()
# try to change attribute without editing mode
self.assertFalse(layer.changeAttributeValue(fid, 0, "good"))
# change attribute
layer.startEditing()
self.assertTrue(layer.changeAttributeValue(fid, 0, "good"))
checkAfter()
# now try undo/redo
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter()
self.assertTrue(layer.commitChanges())
checkAfter()
def test_ChangeAttributeAfterAddFeature(self):
layer = createLayerWithOnePoint()
layer.dataProvider().deleteFeatures([1]) # no need for this feature
newF = QgsFeature()
newF.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(1, 1)))
newF.setAttributes(["hello", 42])
def checkAfter():
self.assertEqual(len(layer.fields()), 2)
# check feature
fi = layer.getFeatures()
f = next(fi)
attrs = f.attributes()
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs[0], "hello")
self.assertEqual(attrs[1], 12)
with self.assertRaises(StopIteration):
next(fi)
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(f2[0], "hello")
self.assertEqual(f2[1], 12)
def checkBefore():
# check feature
with self.assertRaises(StopIteration):
next(layer.getFeatures())
checkBefore()
layer.startEditing()
layer.beginEditCommand("AddFeature + ChangeAttribute")
self.assertTrue(layer.addFeature(newF))
self.assertTrue(layer.changeAttributeValue(newF.id(), 1, 12))
layer.endEditCommand()
checkAfter()
# now try undo/redo
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter()
self.assertTrue(layer.commitChanges())
checkAfter()
# print "COMMIT ERRORS:"
# for item in list(layer.commitErrors()): print item
# CHANGE GEOMETRY
def test_ChangeGeometry(self):
layer = createLayerWithOnePoint()
fid = 1
def checkAfter():
# check select+nextFeature
f = next(layer.getFeatures())
self.assertEqual(f.geometry().asPoint(), QgsPointXY(300, 400))
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(f2.geometry().asPoint(), QgsPointXY(300, 400))
def checkBefore():
# check select+nextFeature
f = next(layer.getFeatures())
self.assertEqual(f.geometry().asPoint(), QgsPointXY(100, 200))
# try to change geometry without editing mode
self.assertFalse(layer.changeGeometry(fid, QgsGeometry.fromPointXY(QgsPointXY(300, 400))))
checkBefore()
# change geometry
layer.startEditing()
layer.beginEditCommand("ChangeGeometry")
self.assertTrue(layer.changeGeometry(fid, QgsGeometry.fromPointXY(QgsPointXY(300, 400))))
layer.endEditCommand()
checkAfter()
# now try undo/redo
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter()
self.assertTrue(layer.commitChanges())
checkAfter()
def test_ChangeGeometryAfterChangeAttribute(self):
layer = createLayerWithOnePoint()
fid = 1
def checkAfter():
# check select+nextFeature
f = next(layer.getFeatures())
self.assertEqual(f.geometry().asPoint(), QgsPointXY(300, 400))
self.assertEqual(f[0], "changed")
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(f2.geometry().asPoint(), QgsPointXY(300, 400))
self.assertEqual(f2[0], "changed")
def checkBefore():
# check select+nextFeature
f = next(layer.getFeatures())
self.assertEqual(f.geometry().asPoint(), QgsPointXY(100, 200))
self.assertEqual(f[0], "test")
checkBefore()
# change geometry
layer.startEditing()
layer.beginEditCommand("ChangeGeometry + ChangeAttribute")
self.assertTrue(layer.changeAttributeValue(fid, 0, "changed"))
self.assertTrue(layer.changeGeometry(fid, QgsGeometry.fromPointXY(QgsPointXY(300, 400))))
layer.endEditCommand()
checkAfter()
# now try undo/redo
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter()
self.assertTrue(layer.commitChanges())
checkAfter()
def test_ChangeGeometryAfterAddFeature(self):
layer = createLayerWithOnePoint()
layer.dataProvider().deleteFeatures([1]) # no need for this feature
newF = QgsFeature()
newF.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(1, 1)))
newF.setAttributes(["hello", 42])
def checkAfter():
self.assertEqual(len(layer.fields()), 2)
# check feature
f = next(layer.getFeatures())
self.assertEqual(f.geometry().asPoint(), QgsPointXY(2, 2))
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(f2.geometry().asPoint(), QgsPointXY(2, 2))
def checkBefore():
# check feature
with self.assertRaises(StopIteration):
next(layer.getFeatures())
checkBefore()
layer.startEditing()
layer.beginEditCommand("AddFeature+ChangeGeometry")
self.assertTrue(layer.addFeature(newF))
self.assertTrue(layer.changeGeometry(newF.id(), QgsGeometry.fromPointXY(QgsPointXY(2, 2))))
layer.endEditCommand()
checkAfter()
# now try undo/redo
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter()
self.assertTrue(layer.commitChanges())
checkAfter()
# print "COMMIT ERRORS:"
# for item in list(layer.commitErrors()): print item
# updateFeature
def testUpdateFeature(self):
layer = createLayerWithFivePoints()
features = [f for f in layer.getFeatures()]
# try to change feature without editing mode
self.assertFalse(layer.updateFeature(features[0]))
layer.startEditing()
# no matching feature
f = QgsFeature(1123)
self.assertFalse(layer.updateFeature(f))
# change geometry and attributes
f = features[0]
f.setAttributes(['new', 321])
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(-200, -200)))
self.assertTrue(layer.updateFeature(f))
new_feature = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(new_feature.attributes(), ['new', 321])
self.assertEqual(new_feature.geometry().asPoint(), QgsPointXY(-200, -200))
# add feature with no geometry
f6 = QgsFeature()
f6.setAttributes(["test6", 555])
self.assertTrue(layer.dataProvider().addFeatures([f6]))
features = [f for f in layer.getFeatures()]
# update feature with no geometry -> have geometry
f = features[-1]
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(-350, -250)))
self.assertTrue(layer.updateFeature(f))
new_feature = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(new_feature.attributes(), ['test6', 555])
self.assertTrue(new_feature.hasGeometry())
self.assertEqual(new_feature.geometry().asPoint(), QgsPointXY(-350, -250))
# update feature from geometry -> no geometry
f = features[1]
f.clearGeometry()
self.assertTrue(layer.updateFeature(f))
new_feature = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(new_feature.attributes(), ['test2', 457])
self.assertFalse(new_feature.hasGeometry())
# ADD ATTRIBUTE
def test_AddAttribute(self):
layer = createLayerWithOnePoint()
fld1 = QgsField("fld1", QVariant.Int, "integer")
# fld2 = QgsField("fld2", QVariant.Int, "integer")
def checkBefore():
# check fields
flds = layer.fields()
self.assertEqual(len(flds), 2)
self.assertEqual(flds[0].name(), "fldtxt")
self.assertEqual(flds[1].name(), "fldint")
# check feature
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs[0], "test")
self.assertEqual(attrs[1], 123)
def checkAfter():
# check fields
flds = layer.fields()
self.assertEqual(len(flds), 3)
self.assertEqual(flds[0].name(), "fldtxt")
self.assertEqual(flds[1].name(), "fldint")
self.assertEqual(flds[2].name(), "fld1")
# check feature
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 3)
self.assertEqual(attrs[0], "test")
self.assertEqual(attrs[1], 123)
self.assertTrue(attrs[2] is None)
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(f2[0], "test")
self.assertEqual(f2[1], 123)
self.assertTrue(f2[2] is None)
# for nt in layer.dataProvider().nativeTypes():
# print (nt.mTypeDesc, nt.mTypeName, nt.mType, nt.mMinLen,
# nt.mMaxLen, nt.mMinPrec, nt.mMaxPrec)
self.assertTrue(layer.dataProvider().supportedType(fld1))
# without editing mode
self.assertFalse(layer.addAttribute(fld1))
layer.startEditing()
checkBefore()
self.assertTrue(layer.addAttribute(fld1))
checkAfter()
# now try undo/redo
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter()
layer.commitChanges()
checkAfter()
def test_AddAttributeAfterAddFeature(self):
layer = createLayerWithOnePoint()
layer.dataProvider().deleteFeatures([1]) # no need for this feature
newF = QgsFeature()
newF.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(1, 1)))
newF.setAttributes(["hello", 42])
fld1 = QgsField("fld1", QVariant.Int, "integer")
def checkBefore():
self.assertEqual(len(layer.fields()), 2)
# check feature
with self.assertRaises(StopIteration):
next(layer.getFeatures())
def checkAfter():
self.assertEqual(len(layer.fields()), 3)
# check feature
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 3)
self.assertEqual(attrs[0], "hello")
self.assertEqual(attrs[1], 42)
self.assertTrue(attrs[2] is None)
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(f2[0], "hello")
self.assertEqual(f2[1], 42)
self.assertTrue(f2[2] is None)
layer.startEditing()
checkBefore()
layer.beginEditCommand("AddFeature + AddAttribute")
self.assertTrue(layer.addFeature(newF))
self.assertTrue(layer.addAttribute(fld1))
layer.endEditCommand()
checkAfter()
# now try undo/redo
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter()
layer.commitChanges()
checkAfter()
# print "COMMIT ERRORS:"
# for item in list(layer.commitErrors()): print item
def test_AddAttributeAfterChangeValue(self):
pass # not interesting to test...?
def test_AddAttributeAfterDeleteAttribute(self):
pass # maybe it would be good to test
# DELETE ATTRIBUTE
def test_DeleteAttribute(self):
layer = createLayerWithOnePoint()
layer.dataProvider().addAttributes(
[QgsField("flddouble", QVariant.Double, "double")])
layer.dataProvider().changeAttributeValues(
{1: {2: 5.5}})
# without editing mode
self.assertFalse(layer.deleteAttribute(0))
def checkBefore():
flds = layer.fields()
self.assertEqual(len(flds), 3)
self.assertEqual(flds[0].name(), "fldtxt")
self.assertEqual(flds[1].name(), "fldint")
self.assertEqual(flds[2].name(), "flddouble")
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 3)
self.assertEqual(attrs[0], "test")
self.assertEqual(attrs[1], 123)
self.assertEqual(attrs[2], 5.5)
layer.startEditing()
checkBefore()
self.assertTrue(layer.deleteAttribute(0))
def checkAfterOneDelete():
flds = layer.fields()
# for fld in flds: print "FLD", fld.name()
self.assertEqual(len(flds), 2)
self.assertEqual(flds[0].name(), "fldint")
self.assertEqual(flds[1].name(), "flddouble")
self.assertEqual(layer.attributeList(), [0, 1])
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs[0], 123)
self.assertEqual(attrs[1], 5.5)
checkAfterOneDelete()
# delete last attribute
self.assertTrue(layer.deleteAttribute(0))
def checkAfterTwoDeletes():
self.assertEqual(layer.attributeList(), [0])
flds = layer.fields()
# for fld in flds: print "FLD", fld.name()
self.assertEqual(len(flds), 1)
self.assertEqual(flds[0].name(), "flddouble")
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 1)
self.assertEqual(attrs[0], 5.5)
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(len(f2.attributes()), 1)
self.assertEqual(f2[0], 5.5)
checkAfterTwoDeletes()
layer.undoStack().undo()
checkAfterOneDelete()
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfterOneDelete()
layer.undoStack().redo()
checkAfterTwoDeletes()
self.assertTrue(layer.commitChanges()) # COMMIT!
checkAfterTwoDeletes()
def test_DeleteAttributeAfterAddAttribute(self):
layer = createLayerWithOnePoint()
fld1 = QgsField("fld1", QVariant.Int, "integer")
def checkAfter(): # layer should be unchanged
flds = layer.fields()
self.assertEqual(len(flds), 2)
self.assertEqual(flds[0].name(), "fldtxt")
self.assertEqual(flds[1].name(), "fldint")
# check feature
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs[0], "test")
self.assertEqual(attrs[1], 123)
# check feature at id
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(len(f2.attributes()), 2)
self.assertEqual(f2[0], "test")
self.assertEqual(f2[1], 123)
checkAfter()
layer.startEditing()
layer.beginEditCommand("AddAttribute + DeleteAttribute")
self.assertTrue(layer.addAttribute(fld1))
self.assertTrue(layer.deleteAttribute(2))
layer.endEditCommand()
checkAfter()
# now try undo/redo
layer.undoStack().undo()
checkAfter()
layer.undoStack().redo()
checkAfter()
layer.commitChanges()
checkAfter()
def test_DeleteAttributeAfterAddFeature(self):
layer = createLayerWithOnePoint()
layer.dataProvider().deleteFeatures([1]) # no need for this feature
newF = QgsFeature()
newF.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(1, 1)))
newF.setAttributes(["hello", 42])
def checkBefore():
self.assertEqual(len(layer.fields()), 2)
# check feature
with self.assertRaises(StopIteration):
next(layer.getFeatures())
def checkAfter1():
self.assertEqual(len(layer.fields()), 2)
# check feature
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs[0], "hello")
self.assertEqual(attrs[1], 42)
def checkAfter2():
self.assertEqual(len(layer.fields()), 1)
# check feature
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 1)
self.assertEqual(attrs[0], 42)
layer.startEditing()
checkBefore()
layer.addFeature(newF)
checkAfter1()
layer.deleteAttribute(0)
checkAfter2()
# now try undo/redo
layer.undoStack().undo()
checkAfter1()
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter1()
layer.undoStack().redo()
checkAfter2()
layer.commitChanges()
checkAfter2()
def test_DeleteAttributeAfterChangeValue(self):
layer = createLayerWithOnePoint()
def checkBefore():
# check feature
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs[0], "test")
self.assertEqual(attrs[1], 123)
def checkAfter1():
# check feature
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 2)
self.assertEqual(attrs[0], "changed")
self.assertEqual(attrs[1], 123)
def checkAfter2():
# check feature
f = next(layer.getFeatures())
attrs = f.attributes()
self.assertEqual(len(attrs), 1)
self.assertEqual(attrs[0], 123)
layer.startEditing()
checkBefore()
self.assertTrue(layer.changeAttributeValue(1, 0, "changed"))
checkAfter1()
self.assertTrue(layer.deleteAttribute(0))
checkAfter2()
# now try undo/redo
layer.undoStack().undo()
checkAfter1()
layer.undoStack().undo()
checkBefore()
layer.undoStack().redo()
checkAfter1()
layer.undoStack().redo()
checkAfter2()
layer.commitChanges()
checkAfter2()
# RENAME ATTRIBUTE
def test_RenameAttribute(self):
layer = createLayerWithOnePoint()
# without editing mode
self.assertFalse(layer.renameAttribute(0, 'renamed'))
def checkFieldNames(names):
flds = layer.fields()
f = next(layer.getFeatures())
self.assertEqual(flds.count(), len(names))
self.assertEqual(f.fields().count(), len(names))
for idx, expected_name in enumerate(names):
self.assertEqual(flds[idx].name(), expected_name)
self.assertEqual(f.fields().at(idx).name(), expected_name)
layer.startEditing()
checkFieldNames(['fldtxt', 'fldint'])
self.assertFalse(layer.renameAttribute(-1, 'fldtxt2'))
self.assertFalse(layer.renameAttribute(10, 'fldtxt2'))
self.assertFalse(layer.renameAttribute(0, 'fldint')) # duplicate name
self.assertTrue(layer.renameAttribute(0, 'fldtxt2'))
checkFieldNames(['fldtxt2', 'fldint'])
layer.undoStack().undo()
checkFieldNames(['fldtxt', 'fldint'])
layer.undoStack().redo()
checkFieldNames(['fldtxt2', 'fldint'])
# change two fields
self.assertTrue(layer.renameAttribute(1, 'fldint2'))
checkFieldNames(['fldtxt2', 'fldint2'])
layer.undoStack().undo()
checkFieldNames(['fldtxt2', 'fldint'])
layer.undoStack().undo()
checkFieldNames(['fldtxt', 'fldint'])
layer.undoStack().redo()
checkFieldNames(['fldtxt2', 'fldint'])
layer.undoStack().redo()
checkFieldNames(['fldtxt2', 'fldint2'])
# two renames
self.assertTrue(layer.renameAttribute(0, 'fldtxt3'))
checkFieldNames(['fldtxt3', 'fldint2'])
self.assertTrue(layer.renameAttribute(0, 'fldtxt4'))
checkFieldNames(['fldtxt4', 'fldint2'])
layer.undoStack().undo()
checkFieldNames(['fldtxt3', 'fldint2'])
layer.undoStack().undo()
checkFieldNames(['fldtxt2', 'fldint2'])
layer.undoStack().redo()
checkFieldNames(['fldtxt3', 'fldint2'])
layer.undoStack().redo()
checkFieldNames(['fldtxt4', 'fldint2'])
def test_RenameAttributeAfterAdd(self):
layer = createLayerWithOnePoint()
def checkFieldNames(names):
flds = layer.fields()
f = next(layer.getFeatures())
self.assertEqual(flds.count(), len(names))
self.assertEqual(f.fields().count(), len(names))
for idx, expected_name in enumerate(names):
self.assertEqual(flds[idx].name(), expected_name)
self.assertEqual(f.fields().at(idx).name(), expected_name)
layer.startEditing()
checkFieldNames(['fldtxt', 'fldint'])
self.assertTrue(layer.renameAttribute(1, 'fldint2'))
checkFieldNames(['fldtxt', 'fldint2'])
# add an attribute
self.assertTrue(layer.addAttribute(QgsField("flddouble", QVariant.Double, "double")))
checkFieldNames(['fldtxt', 'fldint2', 'flddouble'])
# rename it
self.assertTrue(layer.renameAttribute(2, 'flddouble2'))
checkFieldNames(['fldtxt', 'fldint2', 'flddouble2'])
self.assertTrue(layer.addAttribute(QgsField("flddate", QVariant.Date, "date")))
checkFieldNames(['fldtxt', 'fldint2', 'flddouble2', 'flddate'])
self.assertTrue(layer.renameAttribute(2, 'flddouble3'))
checkFieldNames(['fldtxt', 'fldint2', 'flddouble3', 'flddate'])
self.assertTrue(layer.renameAttribute(3, 'flddate2'))
checkFieldNames(['fldtxt', 'fldint2', 'flddouble3', 'flddate2'])
layer.undoStack().undo()
checkFieldNames(['fldtxt', 'fldint2', 'flddouble3', 'flddate'])
layer.undoStack().undo()
checkFieldNames(['fldtxt', 'fldint2', 'flddouble2', 'flddate'])
layer.undoStack().undo()
checkFieldNames(['fldtxt', 'fldint2', 'flddouble2'])
layer.undoStack().undo()
checkFieldNames(['fldtxt', 'fldint2', 'flddouble'])
layer.undoStack().undo()
checkFieldNames(['fldtxt', 'fldint2'])
layer.undoStack().undo()
checkFieldNames(['fldtxt', 'fldint'])
layer.undoStack().redo()
checkFieldNames(['fldtxt', 'fldint2'])
layer.undoStack().redo()
checkFieldNames(['fldtxt', 'fldint2', 'flddouble'])
layer.undoStack().redo()
checkFieldNames(['fldtxt', 'fldint2', 'flddouble2'])
layer.undoStack().redo()
checkFieldNames(['fldtxt', 'fldint2', 'flddouble2', 'flddate'])
layer.undoStack().redo()
checkFieldNames(['fldtxt', 'fldint2', 'flddouble3', 'flddate'])
layer.undoStack().redo()
checkFieldNames(['fldtxt', 'fldint2', 'flddouble3', 'flddate2'])
def test_RenameAttributeAndDelete(self):
layer = createLayerWithOnePoint()
layer.dataProvider().addAttributes(
[QgsField("flddouble", QVariant.Double, "double")])
layer.updateFields()
def checkFieldNames(names):
flds = layer.fields()
f = next(layer.getFeatures())
self.assertEqual(flds.count(), len(names))
self.assertEqual(f.fields().count(), len(names))
for idx, expected_name in enumerate(names):
self.assertEqual(flds[idx].name(), expected_name)
self.assertEqual(f.fields().at(idx).name(), expected_name)
layer.startEditing()
checkFieldNames(['fldtxt', 'fldint', 'flddouble'])
self.assertTrue(layer.renameAttribute(0, 'fldtxt2'))
checkFieldNames(['fldtxt2', 'fldint', 'flddouble'])
self.assertTrue(layer.renameAttribute(2, 'flddouble2'))
checkFieldNames(['fldtxt2', 'fldint', 'flddouble2'])
# delete an attribute
self.assertTrue(layer.deleteAttribute(0))
checkFieldNames(['fldint', 'flddouble2'])
# rename remaining
self.assertTrue(layer.renameAttribute(0, 'fldint2'))
checkFieldNames(['fldint2', 'flddouble2'])
self.assertTrue(layer.renameAttribute(1, 'flddouble3'))
checkFieldNames(['fldint2', 'flddouble3'])
# delete an attribute
self.assertTrue(layer.deleteAttribute(0))
checkFieldNames(['flddouble3'])
self.assertTrue(layer.renameAttribute(0, 'flddouble4'))
checkFieldNames(['flddouble4'])
layer.undoStack().undo()
checkFieldNames(['flddouble3'])
layer.undoStack().undo()
checkFieldNames(['fldint2', 'flddouble3'])
layer.undoStack().undo()
checkFieldNames(['fldint2', 'flddouble2'])
layer.undoStack().undo()
checkFieldNames(['fldint', 'flddouble2'])
layer.undoStack().undo()
checkFieldNames(['fldtxt2', 'fldint', 'flddouble2'])
layer.undoStack().undo()
checkFieldNames(['fldtxt2', 'fldint', 'flddouble'])
layer.undoStack().undo()
checkFieldNames(['fldtxt', 'fldint', 'flddouble'])
# layer.undoStack().redo()
# checkFieldNames(['fldtxt2', 'fldint'])
# layer.undoStack().redo()
# checkFieldNames(['fldint'])
def test_RenameExpressionField(self):
layer = createLayerWithOnePoint()
exp_field_idx = layer.addExpressionField('1+1', QgsField('math_is_hard', QVariant.Int))
# rename and check
self.assertTrue(layer.renameAttribute(exp_field_idx, 'renamed'))
self.assertEqual(layer.fields()[exp_field_idx].name(), 'renamed')
f = next(layer.getFeatures())
self.assertEqual(f.fields()[exp_field_idx].name(), 'renamed')
def test_fields(self):
layer = createLayerWithOnePoint()
flds = layer.fields()
self.assertEqual(flds.indexFromName("fldint"), 1)
self.assertEqual(flds.indexFromName("fldXXX"), -1)
def test_getFeatures(self):
layer = createLayerWithOnePoint()
f = QgsFeature()
fi = layer.getFeatures()
self.assertTrue(fi.nextFeature(f))
self.assertTrue(f.isValid())
self.assertEqual(f.id(), 1)
self.assertEqual(f.geometry().asPoint(), QgsPointXY(100, 200))
self.assertEqual(f["fldtxt"], "test")
self.assertEqual(f["fldint"], 123)
self.assertFalse(fi.nextFeature(f))
layer2 = createLayerWithFivePoints()
# getFeature(fid)
feat = layer2.getFeature(4)
self.assertTrue(feat.isValid())
self.assertEqual(feat['fldtxt'], 'test3')
self.assertEqual(feat['fldint'], -1)
feat = layer2.getFeature(10)
self.assertFalse(feat.isValid())
# getFeatures(expression)
it = layer2.getFeatures("fldint <= 0")
fids = [f.id() for f in it]
self.assertEqual(set(fids), set([4, 5]))
# getFeatures(fids)
it = layer2.getFeatures([1, 2])
fids = [f.id() for f in it]
self.assertEqual(set(fids), set([1, 2]))
# getFeatures(rect)
it = layer2.getFeatures(QgsRectangle(99, 99, 201, 201))
fids = [f.id() for f in it]
self.assertEqual(set(fids), set([1, 2]))
def test_join(self):
joinLayer = createJoinLayer()
joinLayer2 = createJoinLayer()
QgsProject.instance().addMapLayers([joinLayer, joinLayer2])
layer = createLayerWithOnePoint()
join = QgsVectorLayerJoinInfo()
join.setTargetFieldName("fldint")
join.setJoinLayer(joinLayer)
join.setJoinFieldName("y")
join.setUsingMemoryCache(True)
layer.addJoin(join)
join2 = QgsVectorLayerJoinInfo()
join2.setTargetFieldName("fldint")
join2.setJoinLayer(joinLayer2)
join2.setJoinFieldName("y")
join2.setUsingMemoryCache(True)
join2.setPrefix("custom-prefix_")
layer.addJoin(join2)
flds = layer.fields()
self.assertEqual(len(flds), 8)
self.assertEqual(flds[2].name(), "joinlayer_x")
self.assertEqual(flds[3].name(), "joinlayer_z")
self.assertEqual(flds[5].name(), "custom-prefix_x")
self.assertEqual(flds[6].name(), "custom-prefix_z")
self.assertEqual(flds.fieldOrigin(0), QgsFields.OriginProvider)
self.assertEqual(flds.fieldOrigin(2), QgsFields.OriginJoin)
self.assertEqual(flds.fieldOrigin(3), QgsFields.OriginJoin)
self.assertEqual(flds.fieldOriginIndex(0), 0)
self.assertEqual(flds.fieldOriginIndex(2), 0)
self.assertEqual(flds.fieldOriginIndex(3), 2)
f = QgsFeature()
fi = layer.getFeatures()
self.assertTrue(fi.nextFeature(f))
attrs = f.attributes()
self.assertEqual(len(attrs), 8)
self.assertEqual(attrs[0], "test")
self.assertEqual(attrs[1], 123)
self.assertEqual(attrs[2], "foo")
self.assertEqual(attrs[3], 321)
self.assertFalse(fi.nextFeature(f))
f2 = next(layer.getFeatures(QgsFeatureRequest(f.id())))
self.assertEqual(len(f2.attributes()), 8)
self.assertEqual(f2[2], "foo")
self.assertEqual(f2[3], 321)
def test_JoinStats(self):
""" test calculating min/max/uniqueValues on joined field """
joinLayer = createJoinLayer()
layer = createLayerWithTwoPoints()
QgsProject.instance().addMapLayers([joinLayer, layer])
join = QgsVectorLayerJoinInfo()
join.setTargetFieldName("fldint")
join.setJoinLayer(joinLayer)
join.setJoinFieldName("y")
join.setUsingMemoryCache(True)
layer.addJoin(join)
# stats on joined fields should only include values present by join
# strings
self.assertEqual(layer.minimumValue(2), "foo")
self.assertEqual(layer.maximumValue(2), "qar")
self.assertEqual(layer.minimumAndMaximumValue(2), ("foo", "qar"))
# numbers
self.assertEqual(layer.minimumValue(3), 111)
self.assertEqual(layer.maximumValue(3), 321)
self.assertEqual(layer.minimumAndMaximumValue(3), (111, 321))
# dates (maximumValue also tests we properly handle null values by skipping those)
self.assertEqual(layer.minimumValue(4), QDateTime(QDate(2010, 1, 1)))
self.assertEqual(layer.maximumValue(4), QDateTime(QDate(2010, 1, 1)))
self.assertEqual(layer.minimumAndMaximumValue(4), (QDateTime(QDate(2010, 1, 1)), QDateTime(QDate(2010, 1, 1))))
self.assertEqual(set(layer.uniqueValues(3)), set([111, 321]))
def test_valid_join_when_opening_project(self):
join_field = "id"
fid = 4
attr_idx = 4
join_attr_idx = 1
new_value = 33.0
# read project and get layers
myPath = os.path.join(unitTestDataPath(), 'joins.qgs')
rc = QgsProject.instance().read(myPath)
layer = QgsProject.instance().mapLayersByName("polys_with_id")[0]
join_layer = QgsProject.instance().mapLayersByName("polys_overlapping_with_id")[0]
# create an attribute table for the main_layer and the
# joined layer
cache = QgsVectorLayerCache(layer, 100)
am = QgsAttributeTableModel(cache)
am.loadLayer()
join_cache = QgsVectorLayerCache(join_layer, 100)
join_am = QgsAttributeTableModel(join_cache)
join_am.loadLayer()
# check feature value of a joined field from the attribute model
model_index = am.idToIndex(fid)
feature_model = am.feature(model_index)
join_model_index = join_am.idToIndex(fid)
join_feature_model = join_am.feature(join_model_index)
self.assertEqual(feature_model.attribute(attr_idx), join_feature_model.attribute(join_attr_idx))
# change attribute value for a feature of the joined layer
join_layer.startEditing()
join_layer.changeAttributeValue(fid, join_attr_idx, new_value)
join_layer.commitChanges()
# check the feature previously modified
join_model_index = join_am.idToIndex(fid)
join_feature_model = join_am.feature(join_model_index)
self.assertEqual(join_feature_model.attribute(join_attr_idx), new_value)
# recreate a new cache and model to simulate the opening of
# a new attribute table
cache = QgsVectorLayerCache(layer, 100)
am = QgsAttributeTableModel(cache)
am.loadLayer()
# test that the model is up to date with the joined layer
model_index = am.idToIndex(fid)
feature_model = am.feature(model_index)
self.assertEqual(feature_model.attribute(attr_idx), new_value)
# restore value
join_layer.startEditing()
join_layer.changeAttributeValue(fid, join_attr_idx, 7.0)
join_layer.commitChanges()
def testUniqueValue(self):
""" test retrieving unique values """
layer = createLayerWithFivePoints()
# test layer with just provider features
self.assertEqual(set(layer.uniqueValues(1)), set([123, 457, 888, -1, 0]))
# add feature with new value
layer.startEditing()
f1 = QgsFeature()
f1.setAttributes(["test2", 999])
self.assertTrue(layer.addFeature(f1))
# should be included in unique values
self.assertEqual(set(layer.uniqueValues(1)), set([123, 457, 888, -1, 0, 999]))
# add it again, should be no change
f2 = QgsFeature()
f2.setAttributes(["test2", 999])
self.assertTrue(layer.addFeature(f1))
self.assertEqual(set(layer.uniqueValues(1)), set([123, 457, 888, -1, 0, 999]))
# add another feature
f3 = QgsFeature()
f3.setAttributes(["test2", 9999])
self.assertTrue(layer.addFeature(f3))
self.assertEqual(set(layer.uniqueValues(1)), set([123, 457, 888, -1, 0, 999, 9999]))
# change an attribute value to a new unique value
f1_id = next(layer.getFeatures()).id()
self.assertTrue(layer.changeAttributeValue(f1_id, 1, 481523))
# note - this isn't 100% accurate, since 123 no longer exists - but it avoids looping through all features
self.assertEqual(set(layer.uniqueValues(1)), set([123, 457, 888, -1, 0, 999, 9999, 481523]))
def testUniqueStringsMatching(self):
""" test retrieving unique strings matching subset """
layer = QgsVectorLayer("Point?field=fldtxt:string", "addfeat", "memory")
pr = layer.dataProvider()
f = QgsFeature()
f.setAttributes(["apple"])
f2 = QgsFeature()
f2.setAttributes(["orange"])
f3 = QgsFeature()
f3.setAttributes(["pear"])
f4 = QgsFeature()
f4.setAttributes(["BanaNa"])
f5 = QgsFeature()
f5.setAttributes(["ApriCot"])
assert pr.addFeatures([f, f2, f3, f4, f5])
assert layer.featureCount() == 5
# test layer with just provider features
self.assertEqual(set(layer.uniqueStringsMatching(0, 'N')), set(['orange', 'BanaNa']))
# add feature with new value
layer.startEditing()
f1 = QgsFeature()
f1.setAttributes(["waterMelon"])
self.assertTrue(layer.addFeature(f1))
# should be included in unique values
self.assertEqual(set(layer.uniqueStringsMatching(0, 'N')), set(['orange', 'BanaNa', 'waterMelon']))
# add it again, should be no change
f2 = QgsFeature()
f2.setAttributes(["waterMelon"])
self.assertTrue(layer.addFeature(f1))
self.assertEqual(set(layer.uniqueStringsMatching(0, 'N')), set(['orange', 'BanaNa', 'waterMelon']))
self.assertEqual(set(layer.uniqueStringsMatching(0, 'aN')), set(['orange', 'BanaNa']))
# add another feature
f3 = QgsFeature()
f3.setAttributes(["pineapple"])
self.assertTrue(layer.addFeature(f3))
self.assertEqual(set(layer.uniqueStringsMatching(0, 'n')), set(['orange', 'BanaNa', 'waterMelon', 'pineapple']))
# change an attribute value to a new unique value
f = QgsFeature()
f1_id = next(layer.getFeatures()).id()
self.assertTrue(layer.changeAttributeValue(f1_id, 0, 'coconut'))
# note - this isn't 100% accurate, since orange no longer exists - but it avoids looping through all features
self.assertEqual(set(layer.uniqueStringsMatching(0, 'n')),
set(['orange', 'BanaNa', 'waterMelon', 'pineapple', 'coconut']))
def test_subsetString(self):
subset_string_changed = False
def onSubsetStringChanged():
nonlocal subset_string_changed
subset_string_changed = True
path = os.path.join(unitTestDataPath(), 'lines.shp')
layer = QgsVectorLayer(path, 'test', 'ogr')
layer.subsetStringChanged.connect(onSubsetStringChanged)
layer.setSubsetString("\"Name\" = 'Highway'")
self.assertTrue(subset_string_changed)
self.assertEqual(layer.featureCount(), 2)
def testMinValue(self):
""" test retrieving minimum values """
layer = createLayerWithFivePoints()
# test layer with just provider features
self.assertEqual(layer.minimumValue(1), -1)
# add feature with new value
layer.startEditing()
f1 = QgsFeature()
f1.setAttributes(["test2", -999])
self.assertTrue(layer.addFeature(f1))
# should be new minimum value
self.assertEqual(layer.minimumValue(1), -999)
# add it again, should be no change
f2 = QgsFeature()
f2.setAttributes(["test2", -999])
self.assertTrue(layer.addFeature(f1))
self.assertEqual(layer.minimumValue(1), -999)
# add another feature
f3 = QgsFeature()
f3.setAttributes(["test2", -1000])
self.assertTrue(layer.addFeature(f3))
self.assertEqual(layer.minimumValue(1), -1000)
# change an attribute value to a new minimum value
f1_id = next(layer.getFeatures()).id()
self.assertTrue(layer.changeAttributeValue(f1_id, 1, -1001))
self.assertEqual(layer.minimumValue(1), -1001)
def testMaxValue(self):
""" test retrieving maximum values """
layer = createLayerWithFivePoints()
# test layer with just provider features
self.assertEqual(layer.maximumValue(1), 888)
# add feature with new value
layer.startEditing()
f1 = QgsFeature()
f1.setAttributes(["test2", 999])
self.assertTrue(layer.addFeature(f1))
# should be new maximum value
self.assertEqual(layer.maximumValue(1), 999)
# add it again, should be no change
f2 = QgsFeature()
f2.setAttributes(["test2", 999])
self.assertTrue(layer.addFeature(f1))
self.assertEqual(layer.maximumValue(1), 999)
# add another feature
f3 = QgsFeature()
f3.setAttributes(["test2", 1000])
self.assertTrue(layer.addFeature(f3))
self.assertEqual(layer.maximumValue(1), 1000)
# change an attribute value to a new maximum value
f1_id = next(layer.getFeatures()).id()
self.assertTrue(layer.changeAttributeValue(f1_id, 1, 1001))
self.assertEqual(layer.maximumValue(1), 1001)
def testMinAndMaxValue(self):
""" test retrieving minimum and maximum values at once"""
layer = createLayerWithFivePoints()
# test layer with just provider features
self.assertEqual(layer.minimumAndMaximumValue(1), (-1, 888))
# add feature with new value
layer.startEditing()
f1 = QgsFeature()
f1.setAttributes(["test2", 999])
self.assertTrue(layer.addFeature(f1))
# should be new maximum value
self.assertEqual(layer.minimumAndMaximumValue(1), (-1, 999))
# add it again, should be no change
f2 = QgsFeature()
f2.setAttributes(["test2", 999])
self.assertTrue(layer.addFeature(f1))
self.assertEqual(layer.minimumAndMaximumValue(1), (-1, 999))
# add another feature
f3 = QgsFeature()
f3.setAttributes(["test2", 1000])
self.assertTrue(layer.addFeature(f3))
self.assertEqual(layer.minimumAndMaximumValue(1), (-1, 1000))
# add feature with new minimum value
layer.startEditing()
f1 = QgsFeature()
f1.setAttributes(["test2", -999])
self.assertTrue(layer.addFeature(f1))
# should be new minimum value
self.assertEqual(layer.minimumAndMaximumValue(1), (-999, 1000))
# add it again, should be no change
f2 = QgsFeature()
f2.setAttributes(["test2", -999])
self.assertTrue(layer.addFeature(f1))
self.assertEqual(layer.minimumAndMaximumValue(1), (-999, 1000))
# add another feature
f3 = QgsFeature()
f3.setAttributes(["test2", -1000])
self.assertTrue(layer.addFeature(f3))
self.assertEqual(layer.minimumAndMaximumValue(1), (-1000, 1000))
# change an attribute value to a new maximum value
it = layer.getFeatures()
f1_id = next(it).id()
self.assertTrue(layer.changeAttributeValue(f1_id, 1, 1001))
self.assertEqual(layer.minimumAndMaximumValue(1), (-1000, 1001))
f1_id = next(it).id()
self.assertTrue(layer.changeAttributeValue(f1_id, 1, -1001))
self.assertEqual(layer.minimumAndMaximumValue(1), (-1001, 1001))
def testMinMaxInVirtualField(self):
"""
Test minimum and maximum values in a virtual field
"""
layer = QgsVectorLayer("Point?field=fldstr:string", "layer", "memory")
pr = layer.dataProvider()
int_values = ['2010-01-01', None, '2020-01-01']
features = []
for i in int_values:
f = QgsFeature()
f.setFields(layer.fields())
f.setAttributes([i])
features.append(f)
assert pr.addFeatures(features)
field = QgsField('virtual', QVariant.Date)
layer.addExpressionField('to_date("fldstr")', field)
self.assertEqual(len(layer.getFeature(1).attributes()), 2)
self.assertEqual(layer.minimumValue(1), QDate(2010, 1, 1))
self.assertEqual(layer.maximumValue(1), QDate(2020, 1, 1))
self.assertEqual(layer.minimumAndMaximumValue(1), (QDate(2010, 1, 1), QDate(2020, 1, 1)))
def test_InvalidOperations(self):
layer = createLayerWithOnePoint()
layer.startEditing()
# ADD FEATURE
newF1 = QgsFeature()
self.assertFalse(layer.addFeature(newF1)) # need attributes like the layer has)
# DELETE FEATURE
self.assertFalse(layer.deleteFeature(-333))
# we do not check for existence of the feature id if it's
# not newly added feature
# self.assertFalse(layer.deleteFeature(333))
# CHANGE GEOMETRY
self.assertFalse(layer.changeGeometry(
-333, QgsGeometry.fromPointXY(QgsPointXY(1, 1))))
# CHANGE VALUE
self.assertFalse(layer.changeAttributeValue(-333, 0, 1))
self.assertFalse(layer.changeAttributeValue(1, -1, 1))
# ADD ATTRIBUTE
self.assertFalse(layer.addAttribute(QgsField()))
# DELETE ATTRIBUTE
self.assertFalse(layer.deleteAttribute(-1))
def onBlendModeChanged(self, mode):
self.blendModeTest = mode
def test_setBlendMode(self):
layer = createLayerWithOnePoint()
self.blendModeTest = 0
layer.blendModeChanged.connect(self.onBlendModeChanged)
layer.setBlendMode(QPainter.CompositionMode_Screen)
self.assertEqual(self.blendModeTest, QPainter.CompositionMode_Screen)
self.assertEqual(layer.blendMode(), QPainter.CompositionMode_Screen)
def test_setFeatureBlendMode(self):
layer = createLayerWithOnePoint()
self.blendModeTest = 0
layer.featureBlendModeChanged.connect(self.onBlendModeChanged)
layer.setFeatureBlendMode(QPainter.CompositionMode_Screen)
self.assertEqual(self.blendModeTest, QPainter.CompositionMode_Screen)
self.assertEqual(layer.featureBlendMode(), QPainter.CompositionMode_Screen)
def test_ExpressionField(self):
layer = createLayerWithOnePoint()
cnt = layer.fields().count()
idx = layer.addExpressionField('5', QgsField('test', QVariant.LongLong))
fet = next(layer.getFeatures())
self.assertEqual(fet[idx], 5)
# check fields
self.assertEqual(layer.fields().count(), cnt + 1)
self.assertEqual(fet.fields(), layer.fields())
# retrieve single feature and check fields
fet = next(layer.getFeatures(QgsFeatureRequest().setFilterFid(1)))
self.assertEqual(fet.fields(), layer.fields())
layer.updateExpressionField(idx, '9')
self.assertEqual(next(layer.getFeatures())[idx], 9)
layer.removeExpressionField(idx)
self.assertEqual(layer.fields().count(), cnt)
# expression field which references itself
idx = layer.addExpressionField('sum(test2)', QgsField('test2', QVariant.LongLong))
fet = next(layer.getFeatures())
self.assertEqual(fet['test2'], 0)
def test_ExpressionFieldEllipsoidLengthCalculation(self):
# create a temporary layer
temp_layer = QgsVectorLayer("LineString?crs=epsg:3111&field=pk:int", "vl", "memory")
self.assertTrue(temp_layer.isValid())
f1 = QgsFeature(temp_layer.dataProvider().fields(), 1)
f1.setAttribute("pk", 1)
f1.setGeometry(QgsGeometry.fromPolylineXY([QgsPointXY(2484588, 2425722), QgsPointXY(2482767, 2398853)]))
temp_layer.dataProvider().addFeatures([f1])
# set project CRS and ellipsoid
srs = QgsCoordinateReferenceSystem.fromEpsgId(3111)
QgsProject.instance().setCrs(srs)
QgsProject.instance().setEllipsoid("WGS84")
QgsProject.instance().setDistanceUnits(QgsUnitTypes.DistanceMeters)
idx = temp_layer.addExpressionField('$length', QgsField('length', QVariant.Double)) # NOQA
# check value
f = next(temp_layer.getFeatures())
expected = 26932.156
self.assertAlmostEqual(f['length'], expected, 3)
# change project length unit, check calculation respects unit
QgsProject.instance().setDistanceUnits(QgsUnitTypes.DistanceFeet)
f = next(temp_layer.getFeatures())
expected = 88360.0918635
self.assertAlmostEqual(f['length'], expected, 3)
def test_ExpressionFieldEllipsoidAreaCalculation(self):
# create a temporary layer
temp_layer = QgsVectorLayer("Polygon?crs=epsg:3111&field=pk:int", "vl", "memory")
self.assertTrue(temp_layer.isValid())
f1 = QgsFeature(temp_layer.dataProvider().fields(), 1)
f1.setAttribute("pk", 1)
f1.setGeometry(QgsGeometry.fromPolygonXY([[QgsPointXY(2484588, 2425722), QgsPointXY(2482767, 2398853),
QgsPointXY(2520109, 2397715), QgsPointXY(2520792, 2425494),
QgsPointXY(2484588, 2425722)]]))
temp_layer.dataProvider().addFeatures([f1])
# set project CRS and ellipsoid
srs = QgsCoordinateReferenceSystem.fromEpsgId(3111)
QgsProject.instance().setCrs(srs)
QgsProject.instance().setEllipsoid("WGS84")
QgsProject.instance().setAreaUnits(QgsUnitTypes.AreaSquareMeters)
idx = temp_layer.addExpressionField('$area', QgsField('area', QVariant.Double)) # NOQA
# check value
f = next(temp_layer.getFeatures())
expected = 1005755617.8191342
self.assertAlmostEqual(f['area'], expected, delta=1.0)
# change project area unit, check calculation respects unit
QgsProject.instance().setAreaUnits(QgsUnitTypes.AreaSquareMiles)
f = next(temp_layer.getFeatures())
expected = 388.3244150061589
self.assertAlmostEqual(f['area'], expected, 3)
def test_ExpressionFilter(self):
layer = createLayerWithOnePoint()
idx = layer.addExpressionField('5', QgsField('test', QVariant.LongLong)) # NOQA
features = layer.getFeatures(QgsFeatureRequest().setFilterExpression('"test" = 6'))
assert (len(list(features)) == 0)
features = layer.getFeatures(QgsFeatureRequest().setFilterExpression('"test" = 5'))
assert (len(list(features)) == 1)
def testSelectByIds(self):
""" Test selecting by ID"""
layer = QgsVectorLayer(os.path.join(unitTestDataPath(), 'points.shp'), 'Points', 'ogr')
# SetSelection
layer.selectByIds([1, 3, 5, 7], QgsVectorLayer.SetSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([1, 3, 5, 7]))
# check that existing selection is cleared
layer.selectByIds([2, 4, 6], QgsVectorLayer.SetSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([2, 4, 6]))
# AddToSelection
layer.selectByIds([3, 5], QgsVectorLayer.AddToSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([2, 3, 4, 5, 6]))
layer.selectByIds([1], QgsVectorLayer.AddToSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([1, 2, 3, 4, 5, 6]))
# IntersectSelection
layer.selectByIds([1, 3, 5, 6], QgsVectorLayer.IntersectSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([1, 3, 5, 6]))
layer.selectByIds([1, 2, 5, 6], QgsVectorLayer.IntersectSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([1, 5, 6]))
# RemoveFromSelection
layer.selectByIds([2, 6, 7], QgsVectorLayer.RemoveFromSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([1, 5]))
layer.selectByIds([1, 5], QgsVectorLayer.RemoveFromSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([]))
def testSelectByExpression(self):
""" Test selecting by expression """
layer = QgsVectorLayer(os.path.join(unitTestDataPath(), 'points.shp'), 'Points', 'ogr')
# SetSelection
layer.selectByExpression('"Class"=\'B52\' and "Heading" > 10 and "Heading" <70', QgsVectorLayer.SetSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([10, 11]))
# check that existing selection is cleared
layer.selectByExpression('"Class"=\'Biplane\'', QgsVectorLayer.SetSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([1, 5, 6, 7, 8]))
# SetSelection no matching
layer.selectByExpression('"Class"=\'A380\'', QgsVectorLayer.SetSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([]))
# AddToSelection
layer.selectByExpression('"Importance"=3', QgsVectorLayer.AddToSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([0, 2, 3, 4, 14]))
layer.selectByExpression('"Importance"=4', QgsVectorLayer.AddToSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([0, 2, 3, 4, 13, 14]))
# IntersectSelection
layer.selectByExpression('"Heading"<100', QgsVectorLayer.IntersectSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([0, 2, 3, 4]))
layer.selectByExpression('"Cabin Crew"=1', QgsVectorLayer.IntersectSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([2, 3]))
# RemoveFromSelection
layer.selectByExpression('"Heading"=85', QgsVectorLayer.RemoveFromSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([3]))
layer.selectByExpression('"Heading"=95', QgsVectorLayer.RemoveFromSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([]))
def testSelectByRect(self):
""" Test selecting by rectangle """
layer = QgsVectorLayer(os.path.join(unitTestDataPath(), 'points.shp'), 'Points', 'ogr')
# SetSelection
layer.selectByRect(QgsRectangle(-112, 30, -94, 45), QgsVectorLayer.SetSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([2, 3, 7, 10, 11, 15]))
# check that existing selection is cleared
layer.selectByRect(QgsRectangle(-112, 30, -94, 37), QgsVectorLayer.SetSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([2, 3, 10, 15]))
# SetSelection no matching
layer.selectByRect(QgsRectangle(112, 30, 115, 45), QgsVectorLayer.SetSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([]))
# AddToSelection
layer.selectByRect(QgsRectangle(-112, 30, -94, 37), QgsVectorLayer.AddToSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([2, 3, 10, 15]))
layer.selectByRect(QgsRectangle(-112, 37, -94, 45), QgsVectorLayer.AddToSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([2, 3, 7, 10, 11, 15]))
# IntersectSelection
layer.selectByRect(QgsRectangle(-112, 30, -94, 37), QgsVectorLayer.IntersectSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([2, 3, 10, 15]))
layer.selectByIds([2, 10, 13])
layer.selectByRect(QgsRectangle(-112, 30, -94, 37), QgsVectorLayer.IntersectSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([2, 10]))
# RemoveFromSelection
layer.selectByRect(QgsRectangle(-112, 30, -94, 45), QgsVectorLayer.SetSelection)
layer.selectByRect(QgsRectangle(-112, 30, -94, 37), QgsVectorLayer.RemoveFromSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([7, 11]))
layer.selectByRect(QgsRectangle(-112, 30, -94, 45), QgsVectorLayer.RemoveFromSelection)
self.assertEqual(set(layer.selectedFeatureIds()), set([]))
def testReselect(self):
layer = QgsVectorLayer(os.path.join(unitTestDataPath(), 'points.shp'), 'Points', 'ogr')
layer.selectByIds([1, 3, 5, 7], QgsVectorLayer.SetSelection)
self.assertCountEqual(layer.selectedFeatureIds(), [1, 3, 5, 7])
layer.reselect() # no effect, selection has not been cleared
self.assertCountEqual(layer.selectedFeatureIds(), [1, 3, 5, 7])
# clear selection
layer.removeSelection()
self.assertCountEqual(layer.selectedFeatureIds(), [])
# reselect should bring this back
layer.reselect()
self.assertCountEqual(layer.selectedFeatureIds(), [1, 3, 5, 7])
layer.reselect() # no change
self.assertCountEqual(layer.selectedFeatureIds(), [1, 3, 5, 7])
# change an existing selection
layer.selectByIds([1, 3, 5], QgsVectorLayer.SetSelection)
self.assertCountEqual(layer.selectedFeatureIds(), [1, 3, 5])
layer.reselect() # no change
self.assertCountEqual(layer.selectedFeatureIds(), [1, 3, 5])
layer.removeSelection()
self.assertCountEqual(layer.selectedFeatureIds(), [])
# reselect should bring this back
layer.reselect()
self.assertCountEqual(layer.selectedFeatureIds(), [1, 3, 5])
layer.select(7)
self.assertCountEqual(layer.selectedFeatureIds(), [1, 3, 5, 7])
layer.reselect()
self.assertCountEqual(layer.selectedFeatureIds(), [1, 3, 5, 7])
layer.removeSelection()
layer.select([3, 5])
self.assertCountEqual(layer.selectedFeatureIds(), [3, 5])
layer.reselect()
self.assertCountEqual(layer.selectedFeatureIds(), [3, 5])
layer.deselect([5])
self.assertCountEqual(layer.selectedFeatureIds(), [3])
layer.reselect()
self.assertCountEqual(layer.selectedFeatureIds(), [3])
layer.modifySelection([5], [3])
self.assertCountEqual(layer.selectedFeatureIds(), [5])
layer.reselect()
self.assertCountEqual(layer.selectedFeatureIds(), [5])
def testAggregate(self):
""" Test aggregate calculation """
layer = QgsVectorLayer("Point?field=fldint:integer", "layer", "memory")
pr = layer.dataProvider()
int_values = [4, 2, 3, 2, 5, None, 8]
features = []
for i in int_values:
f = QgsFeature()
f.setFields(layer.fields())
f.setAttributes([i])
features.append(f)
assert pr.addFeatures(features)
tests = [[QgsAggregateCalculator.Count, 6],
[QgsAggregateCalculator.Sum, 24],
[QgsAggregateCalculator.Mean, 4],
[QgsAggregateCalculator.StDev, 2.0816],
[QgsAggregateCalculator.StDevSample, 2.2803],
[QgsAggregateCalculator.Min, 2],
[QgsAggregateCalculator.Max, 8],
[QgsAggregateCalculator.Range, 6],
[QgsAggregateCalculator.Median, 3.5],
[QgsAggregateCalculator.CountDistinct, 5],
[QgsAggregateCalculator.CountMissing, 1],
[QgsAggregateCalculator.FirstQuartile, 2],
[QgsAggregateCalculator.ThirdQuartile, 5.0],
[QgsAggregateCalculator.InterQuartileRange, 3.0]
]
for t in tests:
val, ok = layer.aggregate(t[0], 'fldint')
self.assertTrue(ok)
if isinstance(t[1], int):
self.assertEqual(val, t[1])
else:
self.assertAlmostEqual(val, t[1], 3)
# test with parameters
layer = QgsVectorLayer("Point?field=fldstring:string", "layer", "memory")
pr = layer.dataProvider()
string_values = ['this', 'is', 'a', 'test', 'a', 'nice', 'test']
features = []
for s in string_values:
f = QgsFeature()
f.setFields(layer.fields())
f.setAttributes([s])
features.append(f)
assert pr.addFeatures(features)
params = QgsAggregateCalculator.AggregateParameters()
params.delimiter = ' '
val, ok = layer.aggregate(QgsAggregateCalculator.StringConcatenate, 'fldstring', params)
self.assertTrue(ok)
self.assertEqual(val, 'this is a test a nice test')
val, ok = layer.aggregate(QgsAggregateCalculator.StringConcatenateUnique, 'fldstring', params)
self.assertTrue(ok)
self.assertEqual(val, 'this is a test nice')
def testAggregateInVirtualField(self):
"""
Test aggregates in a virtual field
"""
layer = QgsVectorLayer("Point?field=fldint:integer", "layer", "memory")
pr = layer.dataProvider()
int_values = [4, 2, 3, 2, 5, None, 8]
features = []
for i in int_values:
f = QgsFeature()
f.setFields(layer.fields())
f.setAttributes([i])
features.append(f)
assert pr.addFeatures(features)
field = QgsField('virtual', QVariant.Double)
layer.addExpressionField('sum(fldint*2)', field)
vals = [f['virtual'] for f in layer.getFeatures()]
self.assertEqual(vals, [48, 48, 48, 48, 48, 48, 48])
def testAggregateFilter(self):
""" Test aggregate calculation """
layer = QgsVectorLayer("Point?field=fldint:integer", "layer", "memory")
pr = layer.dataProvider()
int_values = [4, 2, 3, 2, 5, None, 8]
features = []
for i in int_values:
f = QgsFeature()
f.setFields(layer.fields())
f.setAttributes([i])
features.append(f)
assert pr.addFeatures(features)
val, ok = layer.aggregate(QgsAggregateCalculator.Sum, 'fldint', fids=[1, 2])
self.assertTrue(ok)
self.assertEqual(val, 6.0)
def onLayerOpacityChanged(self, tr):
self.opacityTest = tr
def test_setLayerOpacity(self):
layer = createLayerWithOnePoint()
self.opacityTest = 0
layer.opacityChanged.connect(self.onLayerOpacityChanged)
layer.setOpacity(0.5)
self.assertEqual(self.opacityTest, 0.5)
self.assertEqual(layer.opacity(), 0.5)
def onRendererChanged(self):
self.rendererChanged = True
def test_setRenderer(self):
layer = createLayerWithOnePoint()
self.rendererChanged = False
layer.rendererChanged.connect(self.onRendererChanged)
r = QgsSingleSymbolRenderer(QgsSymbol.defaultSymbol(QgsWkbTypes.PointGeometry))
layer.setRenderer(r)
self.assertTrue(self.rendererChanged)
self.assertEqual(layer.renderer(), r)
def testGetSetAliases(self):
""" test getting and setting aliases """
layer = createLayerWithOnePoint()
self.assertEqual(len(layer.attributeAliases()), 2)
self.assertFalse(layer.attributeAlias(0))
self.assertFalse(layer.attributeAlias(1))
self.assertFalse(layer.attributeAlias(2))
layer.setFieldAlias(0, "test")
self.assertEqual(layer.attributeAlias(0), "test")
self.assertFalse(layer.attributeAlias(1))
self.assertFalse(layer.attributeAlias(2))
self.assertEqual(layer.fields().at(0).alias(), "test")
layer.setFieldAlias(1, "test2")
self.assertEqual(layer.attributeAlias(0), "test")
self.assertEqual(layer.attributeAlias(1), "test2")
self.assertFalse(layer.attributeAlias(2))
self.assertEqual(layer.fields().at(0).alias(), "test")
self.assertEqual(layer.fields().at(1).alias(), "test2")
layer.setFieldAlias(1, None)
self.assertEqual(layer.attributeAlias(0), "test")
self.assertFalse(layer.attributeAlias(1))
self.assertFalse(layer.attributeAlias(2))
self.assertEqual(layer.fields().at(0).alias(), "test")
self.assertFalse(layer.fields().at(1).alias())
layer.removeFieldAlias(0)
self.assertFalse(layer.attributeAlias(0))
self.assertFalse(layer.attributeAlias(1))
self.assertFalse(layer.attributeAlias(2))
self.assertFalse(layer.fields().at(0).alias())
self.assertFalse(layer.fields().at(1).alias())
def testSaveRestoreAliases(self):
""" test saving and restoring aliases from xml"""
layer = createLayerWithOnePoint()
# no default expressions
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(layer.writeXml(elem, doc, QgsReadWriteContext()))
layer2 = createLayerWithOnePoint()
self.assertTrue(layer2.readXml(elem, QgsReadWriteContext()))
self.assertFalse(layer2.attributeAlias(0))
self.assertFalse(layer2.attributeAlias(1))
# set some aliases
layer.setFieldAlias(0, "test")
layer.setFieldAlias(1, "test2")
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(layer.writeXml(elem, doc, QgsReadWriteContext()))
layer3 = createLayerWithOnePoint()
self.assertTrue(layer3.readXml(elem, QgsReadWriteContext()))
self.assertEqual(layer3.attributeAlias(0), "test")
self.assertEqual(layer3.attributeAlias(1), "test2")
self.assertEqual(layer3.fields().at(0).alias(), "test")
self.assertEqual(layer3.fields().at(1).alias(), "test2")
def testGetSetDefaults(self):
""" test getting and setting default expressions """
layer = createLayerWithOnePoint()
self.assertFalse(layer.defaultValueDefinition(0))
self.assertFalse(layer.defaultValueDefinition(0).expression())
self.assertFalse(layer.defaultValueDefinition(0).applyOnUpdate())
self.assertFalse(layer.defaultValueDefinition(1))
self.assertFalse(layer.defaultValueDefinition(2))
layer.setDefaultValueDefinition(0, QgsDefaultValue("'test'"))
self.assertTrue(layer.defaultValueDefinition(0))
self.assertEqual(layer.defaultValueDefinition(0).expression(), "'test'")
self.assertFalse(layer.defaultValueDefinition(0).applyOnUpdate())
self.assertFalse(layer.defaultValueDefinition(1))
self.assertFalse(layer.defaultValueDefinition(1).applyOnUpdate())
self.assertFalse(layer.defaultValueDefinition(2))
self.assertFalse(layer.defaultValueDefinition(2).applyOnUpdate())
self.assertEqual(layer.fields().at(0).defaultValueDefinition().expression(), "'test'")
layer.setDefaultValueDefinition(1, QgsDefaultValue("2+2"))
self.assertEqual(layer.defaultValueDefinition(0).expression(), "'test'")
self.assertFalse(layer.defaultValueDefinition(0).applyOnUpdate())
self.assertEqual(layer.defaultValueDefinition(1).expression(), "2+2")
self.assertFalse(layer.defaultValueDefinition(1).applyOnUpdate())
self.assertFalse(layer.defaultValueDefinition(2))
self.assertFalse(layer.defaultValueDefinition(2).applyOnUpdate())
self.assertEqual(layer.fields().at(0).defaultValueDefinition().expression(), "'test'")
self.assertEqual(layer.fields().at(1).defaultValueDefinition().expression(), "2+2")
layer.setDefaultValueDefinition(1, QgsDefaultValue("2+2", True))
self.assertEqual(layer.defaultValueDefinition(0).expression(), "'test'")
self.assertFalse(layer.defaultValueDefinition(0).applyOnUpdate())
self.assertEqual(layer.defaultValueDefinition(1).expression(), "2+2")
self.assertTrue(layer.defaultValueDefinition(1).applyOnUpdate())
self.assertEqual(layer.fields().at(0).defaultValueDefinition().expression(), "'test'")
self.assertEqual(layer.fields().at(1).defaultValueDefinition().expression(), "2+2")
def testSaveRestoreDefaults(self):
""" test saving and restoring default expressions from xml"""
layer = createLayerWithOnePoint()
# no default expressions
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(layer.writeXml(elem, doc, QgsReadWriteContext()))
layer2 = createLayerWithOnePoint()
self.assertTrue(layer2.readXml(elem, QgsReadWriteContext()))
self.assertFalse(layer2.defaultValueDefinition(0))
self.assertFalse(layer2.defaultValueDefinition(1))
# set some default expressions
layer.setDefaultValueDefinition(0, QgsDefaultValue("'test'"))
layer.setDefaultValueDefinition(1, QgsDefaultValue("2+2"))
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(layer.writeXml(elem, doc, QgsReadWriteContext()))
layer3 = createLayerWithOnePoint()
self.assertTrue(layer3.readXml(elem, QgsReadWriteContext()))
self.assertEqual(layer3.defaultValueDefinition(0).expression(), "'test'")
self.assertEqual(layer3.defaultValueDefinition(1).expression(), "2+2")
self.assertEqual(layer3.fields().at(0).defaultValueDefinition().expression(), "'test'")
self.assertEqual(layer3.fields().at(1).defaultValueDefinition().expression(), "2+2")
def testEvaluatingDefaultExpressions(self):
""" tests calculation of default values"""
layer = createLayerWithOnePoint()
layer.setDefaultValueDefinition(0, QgsDefaultValue("'test'"))
layer.setDefaultValueDefinition(1, QgsDefaultValue("2+2"))
self.assertEqual(layer.defaultValue(0), 'test')
self.assertEqual(layer.defaultValue(1), 4)
# using feature
layer.setDefaultValueDefinition(1, QgsDefaultValue('$id * 2'))
feature = QgsFeature(4)
feature.setValid(True)
feature.setFields(layer.fields())
# no feature:
self.assertFalse(layer.defaultValue(1))
# with feature:
self.assertEqual(layer.defaultValue(0, feature), 'test')
self.assertEqual(layer.defaultValue(1, feature), 8)
# using feature geometry
layer.setDefaultValueDefinition(1, QgsDefaultValue('$x * 2'))
feature.setGeometry(QgsGeometry(QgsPoint(6, 7)))
self.assertEqual(layer.defaultValue(1, feature), 12)
# using contexts
scope = QgsExpressionContextScope()
scope.setVariable('var1', 16)
context = QgsExpressionContext()
context.appendScope(scope)
layer.setDefaultValueDefinition(1, QgsDefaultValue('$id + @var1'))
self.assertEqual(layer.defaultValue(1, feature, context), 20)
# if no scope passed, should use a default constructed one including layer variables
QgsExpressionContextUtils.setLayerVariable(layer, 'var2', 4)
QgsExpressionContextUtils.setProjectVariable(QgsProject.instance(), 'var3', 8)
layer.setDefaultValueDefinition(1, QgsDefaultValue('to_int(@var2) + to_int(@var3) + $id'))
self.assertEqual(layer.defaultValue(1, feature), 16)
# bad expression
layer.setDefaultValueDefinition(1, QgsDefaultValue('not a valid expression'))
self.assertFalse(layer.defaultValue(1))
def testApplyOnUpdateDefaultExpressions(self):
"""tests apply on update of default values"""
layer = createLayerWithOnePoint()
layer.setDefaultValueDefinition(0, QgsDefaultValue("CONCAT('l: ', @number, ',f: ', \"fldint\" )", True))
layer.setDefaultValueDefinition(1, QgsDefaultValue("1 * @number", False))
QgsExpressionContextUtils.setLayerVariable(layer, 'number', 4)
layer.startEditing()
feature = QgsFeature()
feature.setFields(layer.fields())
feature.setValid(True)
# Both default values should be set on feature create
feature.setAttribute(1, layer.defaultValue(1, feature))
feature.setAttribute(0, layer.defaultValue(0, feature))
self.assertTrue(layer.addFeature(feature))
fid = feature.id()
self.assertEqual(layer.getFeature(fid)['fldtxt'], 'l: 4,f: 4')
self.assertEqual(layer.getFeature(fid)['fldint'], 4)
# ApplyOnUpdateDefaultValue should be set on changeAttributeValue
layer.changeAttributeValue(fid, 1, 20)
self.assertEqual(layer.getFeature(fid)['fldtxt'], 'l: 4,f: 20')
self.assertEqual(layer.getFeature(fid)['fldint'], 20)
# When changing the value of the "derived" attribute, only this one
# should be updated
QgsExpressionContextUtils.setLayerVariable(layer, 'number', 8)
layer.changeAttributeValue(fid, 0, 0)
self.assertEqual(layer.getFeature(fid)['fldtxt'], 'l: 8,f: 20')
self.assertEqual(layer.getFeature(fid)['fldint'], 20)
# Check update on geometry change
layer.setDefaultValueDefinition(1, QgsDefaultValue("x($geometry)", True))
layer.changeGeometry(fid, QgsGeometry.fromPointXY(QgsPointXY(300, 200)))
self.assertEqual(layer.getFeature(fid)['fldint'], 300)
def testGetSetConstraints(self):
""" test getting and setting field constraints """
layer = createLayerWithOnePoint()
self.assertFalse(layer.fieldConstraints(0))
self.assertFalse(layer.fieldConstraints(1))
self.assertFalse(layer.fieldConstraints(2))
layer.setFieldConstraint(0, QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(layer.fieldConstraints(0), QgsFieldConstraints.ConstraintNotNull)
self.assertFalse(layer.fieldConstraints(1))
self.assertFalse(layer.fieldConstraints(2))
self.assertEqual(layer.fields().at(0).constraints().constraints(), QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(layer.fields().at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginLayer)
self.assertEqual(layer.fields().at(0).constraints().constraintStrength(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintStrengthHard)
layer.setFieldConstraint(1, QgsFieldConstraints.ConstraintNotNull)
layer.setFieldConstraint(1, QgsFieldConstraints.ConstraintUnique)
self.assertEqual(layer.fieldConstraints(0), QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(layer.fieldConstraints(1),
QgsFieldConstraints.ConstraintNotNull | QgsFieldConstraints.ConstraintUnique)
self.assertFalse(layer.fieldConstraints(2))
self.assertEqual(layer.fields().at(0).constraints().constraints(), QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(layer.fields().at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginLayer)
self.assertEqual(layer.fields().at(0).constraints().constraintStrength(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintStrengthHard)
self.assertEqual(layer.fields().at(1).constraints().constraints(),
QgsFieldConstraints.ConstraintNotNull | QgsFieldConstraints.ConstraintUnique)
self.assertEqual(layer.fields().at(1).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginLayer)
self.assertEqual(layer.fields().at(1).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique),
QgsFieldConstraints.ConstraintOriginLayer)
self.assertEqual(layer.fields().at(1).constraints().constraintStrength(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintStrengthHard)
self.assertEqual(layer.fields().at(1).constraints().constraintStrength(QgsFieldConstraints.ConstraintUnique),
QgsFieldConstraints.ConstraintStrengthHard)
layer.removeFieldConstraint(1, QgsFieldConstraints.ConstraintNotNull)
layer.removeFieldConstraint(1, QgsFieldConstraints.ConstraintUnique)
self.assertEqual(layer.fieldConstraints(0), QgsFieldConstraints.ConstraintNotNull)
self.assertFalse(layer.fieldConstraints(1))
self.assertFalse(layer.fieldConstraints(2))
self.assertEqual(layer.fields().at(0).constraints().constraints(), QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(layer.fields().at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginLayer)
self.assertEqual(layer.fields().at(0).constraints().constraintStrength(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintStrengthHard)
self.assertFalse(layer.fields().at(1).constraints().constraints())
self.assertEqual(layer.fields().at(1).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginNotSet)
self.assertEqual(layer.fields().at(1).constraints().constraintStrength(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintStrengthNotSet)
def testSaveRestoreConstraints(self):
""" test saving and restoring constraints from xml"""
layer = createLayerWithOnePoint()
# no constraints
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(layer.writeXml(elem, doc, QgsReadWriteContext()))
layer2 = createLayerWithOnePoint()
self.assertTrue(layer2.readXml(elem, QgsReadWriteContext()))
self.assertFalse(layer2.fieldConstraints(0))
self.assertFalse(layer2.fieldConstraints(1))
# set some constraints
layer.setFieldConstraint(0, QgsFieldConstraints.ConstraintNotNull)
layer.setFieldConstraint(1, QgsFieldConstraints.ConstraintNotNull, QgsFieldConstraints.ConstraintStrengthSoft)
layer.setFieldConstraint(1, QgsFieldConstraints.ConstraintUnique)
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(layer.writeXml(elem, doc, QgsReadWriteContext()))
layer3 = createLayerWithOnePoint()
self.assertTrue(layer3.readXml(elem, QgsReadWriteContext()))
self.assertEqual(layer3.fieldConstraints(0), QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(layer3.fieldConstraints(1),
QgsFieldConstraints.ConstraintNotNull | QgsFieldConstraints.ConstraintUnique)
self.assertEqual(layer3.fields().at(0).constraints().constraints(), QgsFieldConstraints.ConstraintNotNull)
self.assertEqual(layer3.fields().at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginLayer)
self.assertEqual(layer.fields().at(0).constraints().constraintStrength(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintStrengthHard)
self.assertEqual(layer3.fields().at(1).constraints().constraints(),
QgsFieldConstraints.ConstraintNotNull | QgsFieldConstraints.ConstraintUnique)
self.assertEqual(layer3.fields().at(1).constraints().constraintOrigin(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintOriginLayer)
self.assertEqual(layer3.fields().at(1).constraints().constraintOrigin(QgsFieldConstraints.ConstraintUnique),
QgsFieldConstraints.ConstraintOriginLayer)
self.assertEqual(layer.fields().at(1).constraints().constraintStrength(QgsFieldConstraints.ConstraintNotNull),
QgsFieldConstraints.ConstraintStrengthSoft)
self.assertEqual(layer.fields().at(1).constraints().constraintStrength(QgsFieldConstraints.ConstraintUnique),
QgsFieldConstraints.ConstraintStrengthHard)
def testGetSetConstraintExpressions(self):
""" test getting and setting field constraint expressions """
layer = createLayerWithOnePoint()
self.assertFalse(layer.constraintExpression(0))
self.assertFalse(layer.constraintExpression(1))
self.assertFalse(layer.constraintExpression(2))
layer.setConstraintExpression(0, '1+2')
self.assertEqual(layer.constraintExpression(0), '1+2')
self.assertFalse(layer.constraintExpression(1))
self.assertFalse(layer.constraintExpression(2))
self.assertEqual(layer.fields().at(0).constraints().constraintExpression(), '1+2')
layer.setConstraintExpression(1, '3+4', 'desc')
self.assertEqual(layer.constraintExpression(0), '1+2')
self.assertEqual(layer.constraintExpression(1), '3+4')
self.assertEqual(layer.constraintDescription(1), 'desc')
self.assertFalse(layer.constraintExpression(2))
self.assertEqual(layer.fields().at(0).constraints().constraintExpression(), '1+2')
self.assertEqual(layer.fields().at(1).constraints().constraintExpression(), '3+4')
self.assertEqual(layer.fields().at(1).constraints().constraintDescription(), 'desc')
layer.setConstraintExpression(1, None)
self.assertEqual(layer.constraintExpression(0), '1+2')
self.assertFalse(layer.constraintExpression(1))
self.assertFalse(layer.constraintExpression(2))
self.assertEqual(layer.fields().at(0).constraints().constraintExpression(), '1+2')
self.assertFalse(layer.fields().at(1).constraints().constraintExpression())
def testSaveRestoreConstraintExpressions(self):
""" test saving and restoring constraint expressions from xml"""
layer = createLayerWithOnePoint()
# no constraints
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(layer.writeXml(elem, doc, QgsReadWriteContext()))
layer2 = createLayerWithOnePoint()
self.assertTrue(layer2.readXml(elem, QgsReadWriteContext()))
self.assertFalse(layer2.constraintExpression(0))
self.assertFalse(layer2.constraintExpression(1))
# set some constraints
layer.setConstraintExpression(0, '1+2')
layer.setConstraintExpression(1, '3+4', 'desc')
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(layer.writeXml(elem, doc, QgsReadWriteContext()))
layer3 = createLayerWithOnePoint()
self.assertTrue(layer3.readXml(elem, QgsReadWriteContext()))
self.assertEqual(layer3.constraintExpression(0), '1+2')
self.assertEqual(layer3.constraintExpression(1), '3+4')
self.assertEqual(layer3.constraintDescription(1), 'desc')
self.assertEqual(layer3.fields().at(0).constraints().constraintExpression(), '1+2')
self.assertEqual(layer3.fields().at(1).constraints().constraintExpression(), '3+4')
self.assertEqual(layer3.fields().at(1).constraints().constraintDescription(), 'desc')
self.assertEqual(layer3.fields().at(0).constraints().constraints(), QgsFieldConstraints.ConstraintExpression)
self.assertEqual(layer3.fields().at(1).constraints().constraints(), QgsFieldConstraints.ConstraintExpression)
self.assertEqual(layer3.fields().at(0).constraints().constraintOrigin(QgsFieldConstraints.ConstraintExpression),
QgsFieldConstraints.ConstraintOriginLayer)
self.assertEqual(layer3.fields().at(1).constraints().constraintOrigin(QgsFieldConstraints.ConstraintExpression),
QgsFieldConstraints.ConstraintOriginLayer)
def testGetFeatureLimitWithEdits(self):
""" test getting features with a limit, when edits are present """
layer = createLayerWithOnePoint()
# now has one feature with id 0
pr = layer.dataProvider()
f1 = QgsFeature(1)
f1.setAttributes(["test", 3])
f1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(300, 200)))
f2 = QgsFeature(2)
f2.setAttributes(["test", 3])
f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
f3 = QgsFeature(3)
f3.setAttributes(["test", 3])
f3.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
self.assertTrue(pr.addFeatures([f1, f2, f3]))
req = QgsFeatureRequest().setLimit(2)
self.assertEqual(len(list(layer.getFeatures(req))), 2)
# now delete feature f1
layer.startEditing()
self.assertTrue(layer.deleteFeature(1))
req = QgsFeatureRequest().setLimit(2)
self.assertEqual(len(list(layer.getFeatures(req))), 2)
layer.rollBack()
# change an attribute value required by filter
layer.startEditing()
req = QgsFeatureRequest().setFilterExpression('fldint=3').setLimit(2)
self.assertTrue(layer.changeAttributeValue(2, 1, 4))
self.assertEqual(len(list(layer.getFeatures(req))), 2)
layer.rollBack()
layer.startEditing()
req = QgsFeatureRequest().setFilterRect(QgsRectangle(50, 100, 150, 300)).setLimit(2)
self.assertTrue(layer.changeGeometry(2, QgsGeometry.fromPointXY(QgsPointXY(500, 600))))
self.assertEqual(len(list(layer.getFeatures(req))), 2)
layer.rollBack()
def testClone(self):
# init crs
srs = QgsCoordinateReferenceSystem.fromEpsgId(3111)
# init map layer styles
tmplayer = createLayerWithTwoPoints()
sym1 = QgsLineSymbol()
sym1.setColor(Qt.magenta)
tmplayer.setRenderer(QgsSingleSymbolRenderer(sym1))
style0 = QgsMapLayerStyle()
style0.readFromLayer(tmplayer)
style1 = QgsMapLayerStyle()
style1.readFromLayer(tmplayer)
# init dependencies layers
ldep = createLayerWithTwoPoints()
dep = QgsMapLayerDependency(ldep.id())
# init layer
layer = createLayerWithTwoPoints()
layer.setBlendMode(QPainter.CompositionMode_Screen)
layer.styleManager().addStyle('style0', style0)
layer.styleManager().addStyle('style1', style1)
layer.setName('MyName')
layer.setShortName('MyShortName')
layer.setMaximumScale(0.5)
layer.setMinimumScale(1.5)
layer.setScaleBasedVisibility(True)
layer.setTitle('MyTitle')
layer.setAbstract('MyAbstract')
layer.setKeywordList('MyKeywordList')
layer.setDataUrl('MyDataUrl')
layer.setDataUrlFormat('MyDataUrlFormat')
layer.setAttribution('MyAttribution')
layer.setAttributionUrl('MyAttributionUrl')
layer.setMetadataUrl('MyMetadataUrl')
layer.setMetadataUrlType('MyMetadataUrlType')
layer.setMetadataUrlFormat('MyMetadataUrlFormat')
layer.setLegendUrl('MyLegendUrl')
layer.setLegendUrlFormat('MyLegendUrlFormat')
layer.setDependencies([dep])
layer.setCrs(srs)
layer.setCustomProperty('MyKey0', 'MyValue0')
layer.setCustomProperty('MyKey1', 'MyValue1')
layer.setOpacity(0.66)
layer.setProviderEncoding('latin9')
layer.setDisplayExpression('MyDisplayExpression')
layer.setMapTipTemplate('MyMapTipTemplate')
layer.setExcludeAttributesWfs(['MyExcludeAttributeWFS'])
layer.setExcludeAttributesWms(['MyExcludeAttributeWMS'])
layer.setFeatureBlendMode(QPainter.CompositionMode_Xor)
sym = QgsLineSymbol()
sym.setColor(Qt.magenta)
layer.setRenderer(QgsSingleSymbolRenderer(sym))
simplify = layer.simplifyMethod()
simplify.setTolerance(33.3)
simplify.setThreshold(0.333)
layer.setSimplifyMethod(simplify)
layer.setFieldAlias(0, 'MyAlias0')
layer.setFieldAlias(1, 'MyAlias1')
jl0 = createLayerWithTwoPoints()
j0 = QgsVectorLayerJoinInfo()
j0.setJoinLayer(jl0)
jl1 = createLayerWithTwoPoints()
j1 = QgsVectorLayerJoinInfo()
j1.setJoinLayer(jl1)
layer.addJoin(j0)
layer.addJoin(j1)
fids = layer.allFeatureIds()
selected_fids = fids[0:3]
layer.selectByIds(selected_fids)
cfg = layer.attributeTableConfig()
cfg.setSortOrder(Qt.DescendingOrder) # by default AscendingOrder
layer.setAttributeTableConfig(cfg)
pal = QgsPalLayerSettings()
text_format = QgsTextFormat()
text_format.setSize(33)
text_format.setColor(Qt.magenta)
pal.setFormat(text_format)
labeling = QgsVectorLayerSimpleLabeling(pal)
layer.setLabeling(labeling)
diag_renderer = QgsSingleCategoryDiagramRenderer()
diag_renderer.setAttributeLegend(False) # true by default
layer.setDiagramRenderer(diag_renderer)
diag_settings = QgsDiagramLayerSettings()
diag_settings.setPriority(3)
diag_settings.setZIndex(0.33)
layer.setDiagramLayerSettings(diag_settings)
edit_form_config = layer.editFormConfig()
edit_form_config.setUiForm("MyUiForm")
edit_form_config.setInitFilePath("MyInitFilePath")
layer.setEditFormConfig(edit_form_config)
widget_setup = QgsEditorWidgetSetup("MyWidgetSetupType", {})
layer.setEditorWidgetSetup(0, widget_setup)
layer.setConstraintExpression(0, "MyFieldConstraintExpression")
layer.setFieldConstraint(0, QgsFieldConstraints.ConstraintUnique, QgsFieldConstraints.ConstraintStrengthHard)
layer.setDefaultValueDefinition(0, QgsDefaultValue("MyDefaultValueExpression"))
action = QgsAction(QgsAction.Unix, "MyActionDescription", "MyActionCmd")
layer.actions().addAction(action)
metadata = QgsLayerMetadata()
metadata.setFees('a handful of roos')
layer.setMetadata(metadata)
# clone layer
clone = layer.clone()
self.assertEqual(layer.metadata().fees(), 'a handful of roos')
# generate xml from layer
layer_doc = QDomDocument("doc")
layer_elem = layer_doc.createElement("maplayer")
layer.writeLayerXml(layer_elem, layer_doc, QgsReadWriteContext())
# generate xml from clone
clone_doc = QDomDocument("doc")
clone_elem = clone_doc.createElement("maplayer")
clone.writeLayerXml(clone_elem, clone_doc, QgsReadWriteContext())
# replace id within xml of clone
clone_id_elem = clone_elem.firstChildElement("id")
clone_id_elem_patch = clone_doc.createElement("id")
clone_id_elem_patch_value = clone_doc.createTextNode(layer.id())
clone_id_elem_patch.appendChild(clone_id_elem_patch_value)
clone_elem.replaceChild(clone_id_elem_patch, clone_id_elem)
# update doc
clone_doc.appendChild(clone_elem)
layer_doc.appendChild(layer_elem)
# compare xml documents
self.assertEqual(layer_doc.toString(), clone_doc.toString())
def testQgsVectorLayerSelectedFeatureSource(self):
"""
test QgsVectorLayerSelectedFeatureSource
"""
layer = QgsVectorLayer("Point?crs=epsg:3111&field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = layer.dataProvider()
f1 = QgsFeature(1)
f1.setAttributes(["test", 123])
f1.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
f2 = QgsFeature(2)
f2.setAttributes(["test2", 457])
f2.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(200, 200)))
f3 = QgsFeature(3)
f3.setAttributes(["test2", 888])
f3.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(300, 200)))
f4 = QgsFeature(4)
f4.setAttributes(["test3", -1])
f4.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(400, 300)))
f5 = QgsFeature(5)
f5.setAttributes(["test4", 0])
f5.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(0, 0)))
self.assertTrue(pr.addFeatures([f1, f2, f3, f4, f5]))
self.assertEqual(layer.featureCount(), 5)
source = QgsVectorLayerSelectedFeatureSource(layer)
self.assertEqual(source.sourceCrs().authid(), 'EPSG:3111')
self.assertEqual(source.wkbType(), QgsWkbTypes.Point)
self.assertEqual(source.fields(), layer.fields())
# no selection
self.assertEqual(source.featureCount(), 0)
it = source.getFeatures()
f = QgsFeature()
self.assertFalse(it.nextFeature(f))
# with selection
layer.selectByIds([f1.id(), f3.id(), f5.id()])
source = QgsVectorLayerSelectedFeatureSource(layer)
self.assertEqual(source.featureCount(), 3)
ids = set([f.id() for f in source.getFeatures()])
self.assertEqual(ids, {f1.id(), f3.id(), f5.id()})
# test that requesting subset of ids intersects this request with the selected ids
ids = set([f.id() for f in source.getFeatures(QgsFeatureRequest().setFilterFids([f1.id(), f2.id(), f5.id()]))])
self.assertEqual(ids, {f1.id(), f5.id()})
# test that requesting id works
ids = set([f.id() for f in source.getFeatures(QgsFeatureRequest().setFilterFid(f1.id()))])
self.assertEqual(ids, {f1.id()})
ids = set([f.id() for f in source.getFeatures(QgsFeatureRequest().setFilterFid(f5.id()))])
self.assertEqual(ids, {f5.id()})
# test that source has stored snapshot of selected features
layer.selectByIds([f2.id(), f4.id()])
self.assertEqual(source.featureCount(), 3)
ids = set([f.id() for f in source.getFeatures()])
self.assertEqual(ids, {f1.id(), f3.id(), f5.id()})
# test that source is not dependent on layer
del layer
ids = set([f.id() for f in source.getFeatures()])
self.assertEqual(ids, {f1.id(), f3.id(), f5.id()})
def testFeatureRequestWithReprojectionAndVirtualFields(self):
layer = self.getSource()
field = QgsField('virtual', QVariant.Double)
layer.addExpressionField('$x', field)
virtual_values = [f['virtual'] for f in layer.getFeatures()]
self.assertAlmostEqual(virtual_values[0], -71.123, 2)
self.assertEqual(virtual_values[1], NULL)
self.assertAlmostEqual(virtual_values[2], -70.332, 2)
self.assertAlmostEqual(virtual_values[3], -68.2, 2)
self.assertAlmostEqual(virtual_values[4], -65.32, 2)
# repeat, with reprojection on request
request = QgsFeatureRequest().setDestinationCrs(QgsCoordinateReferenceSystem.fromEpsgId(3785),
QgsProject.instance().transformContext())
features = [f for f in layer.getFeatures(request)]
# virtual field value should not change, even though geometry has
self.assertAlmostEqual(features[0]['virtual'], -71.123, 2)
self.assertAlmostEqual(features[0].geometry().constGet().x(), -7917376, -5)
self.assertEqual(features[1]['virtual'], NULL)
self.assertFalse(features[1].hasGeometry())
self.assertAlmostEqual(features[2]['virtual'], -70.332, 2)
self.assertAlmostEqual(features[2].geometry().constGet().x(), -7829322, -5)
self.assertAlmostEqual(features[3]['virtual'], -68.2, 2)
self.assertAlmostEqual(features[3].geometry().constGet().x(), -7591989, -5)
self.assertAlmostEqual(features[4]['virtual'], -65.32, 2)
self.assertAlmostEqual(features[4].geometry().constGet().x(), -7271389, -5)
def testPrecision(self):
layer = QgsVectorLayer("Polygon?crs=epsg:2056&field=pk:int", "vl", "memory")
layer.geometryOptions().setGeometryPrecision(10)
geom = QgsGeometry.fromWkt('Polygon ((2596411 1224654, 2596400 1224652, 2596405 1224640, 2596410 1224641, 2596411 1224654))')
feature = QgsFeature(layer.fields())
feature.setGeometry(geom)
layer.startEditing()
layer.addFeature(feature)
self.assertGeometriesEqual(QgsGeometry.fromWkt('Polygon ((2596410 1224650, 2596400 1224650, 2596410 1224640, 2596410 1224650))'), feature.geometry(), 'geometry with unsnapped nodes', 'fixed geometry')
layer.geometryOptions().setGeometryPrecision(0.0)
feature.setGeometry(QgsGeometry.fromWkt('Polygon ((2596411 1224654, 2596400 1224652, 2596405 1224640, 2596410 1224641, 2596411 1224654))'))
layer.addFeature(feature)
self.assertGeometriesEqual(QgsGeometry.fromWkt('Polygon ((2596411 1224654, 2596400 1224652, 2596405 1224640, 2596410 1224641, 2596411 1224654))'), feature.geometry(), 'geometry with duplicates', 'unchanged geometry')
def testRemoveDuplicateNodes(self):
layer = QgsVectorLayer("Polygon?crs=epsg:2056&field=pk:int", "vl", "memory")
layer.geometryOptions().setRemoveDuplicateNodes(True)
geom = QgsGeometry.fromWkt('Polygon ((70 80, 80 90, 80 90, 60 50, 70 80))')
feature = QgsFeature(layer.fields())
feature.setGeometry(geom)
layer.startEditing()
layer.addFeature(feature)
self.assertGeometriesEqual(feature.geometry(), QgsGeometry.fromWkt('Polygon ((70 80, 80 90, 60 50, 70 80))'), 'fixed geometry', 'geometry with duplicates')
layer.geometryOptions().setRemoveDuplicateNodes(False)
feature.setGeometry(QgsGeometry.fromWkt('Polygon ((70 80, 80 90, 80 90, 60 50, 70 80))'))
layer.addFeature(feature)
self.assertGeometriesEqual(feature.geometry(), QgsGeometry.fromWkt('Polygon ((70 80, 80 90, 80 90, 60 50, 70 80))'), 'unchanged geometry', 'geometry with duplicates')
def testPrecisionAndDuplicateNodes(self):
layer = QgsVectorLayer("Polygon?crs=epsg:2056&field=pk:int", "vl", "memory")
layer.geometryOptions().setGeometryPrecision(10)
layer.geometryOptions().setRemoveDuplicateNodes(True)
geom = QgsGeometry.fromWkt('Polygon ((2596411 1224654, 2596400 1224652, 2596402 1224653, 2596405 1224640, 2596410 1224641, 2596411 1224654))')
feature = QgsFeature(layer.fields())
feature.setGeometry(geom)
layer.startEditing()
layer.addFeature(feature)
self.assertGeometriesEqual(QgsGeometry.fromWkt('Polygon ((2596410 1224650, 2596400 1224650, 2596410 1224640, 2596410 1224650))'), feature.geometry(), 'geometry with unsnapped nodes', 'fixed geometry')
def testDefaultDisplayExpression(self):
"""
Test that default display expression gravitates to most interesting column names
"""
layer = QgsVectorLayer("Polygon?crs=epsg:2056&field=pk:int", "vl", "memory")
self.assertEqual(layer.displayExpression(), '"pk"')
self.assertEqual(layer.displayField(), 'pk')
layer = QgsVectorLayer("Polygon?crs=epsg:2056&field=pk:int&field=DESCRIPTION:string&field=fid:int", "vl", "memory")
self.assertEqual(layer.displayExpression(), '"DESCRIPTION"')
self.assertEqual(layer.displayField(), 'DESCRIPTION')
layer = QgsVectorLayer("Polygon?crs=epsg:2056&field=pk:int&field=DESCRIPTION:string&field=fid:int&field=NAME:string", "vl", "memory")
self.assertEqual(layer.displayExpression(), '"NAME"')
self.assertEqual(layer.displayField(), 'NAME')
layer = QgsVectorLayer("Polygon?crs=epsg:2056&field=pk:int&field=DESCRIPTION:string&field=fid:int&field=BETTER_NAME:string&field=NAME:string", "vl", "memory")
self.assertEqual(layer.displayExpression(), '"BETTER_NAME"')
self.assertEqual(layer.displayField(), 'BETTER_NAME')
class TestQgsVectorLayerSourceAddedFeaturesInBuffer(unittest.TestCase, FeatureSourceTestCase):
@classmethod
def getSource(cls):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&field=dt:datetime&field=date:date&field=time:time&key=pk',
'test', 'memory')
assert (vl.isValid())
f1 = QgsFeature()
f1.setAttributes([5, -200, NULL, 'NuLl', '5', QDateTime(QDate(2020, 5, 4), QTime(12, 13, 14)), QDate(2020, 5, 2), QTime(12, 13, 1)])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature()
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3', NULL, NULL, NULL])
f3 = QgsFeature()
f3.setAttributes([1, 100, 'Orange', 'oranGe', '1', QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)), QDate(2020, 5, 3), QTime(12, 13, 14)])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature()
f4.setAttributes([2, 200, 'Apple', 'Apple', '2', QDateTime(QDate(2020, 5, 4), QTime(12, 14, 14)), QDate(2020, 5, 4), QTime(12, 14, 14)])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature()
f5.setAttributes([4, 400, 'Honey', 'Honey', '4', QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)), QDate(2021, 5, 4), QTime(13, 13, 14)])
f5.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
# create a layer with features only in the added features buffer - not the provider
vl.startEditing()
vl.addFeatures([f1, f2, f3, f4, f5])
return vl
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer for FeatureSourceTestCase
cls.source = cls.getSource()
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testOrderBy(self):
""" Skip order by tests - edited features are not sorted in iterators.
(Maybe they should be??)
"""
pass
def testMinimumValue(self):
""" Skip min values test - due to inconsistencies in how null values are treated by providers.
They are included here, but providers don't include them.... which is right?
"""
pass
class TestQgsVectorLayerSourceChangedGeometriesInBuffer(unittest.TestCase, FeatureSourceTestCase):
@classmethod
def getSource(cls):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&field=dt:datetime&field=date:date&field=time:time&key=pk',
'test', 'memory')
assert (vl.isValid())
f1 = QgsFeature()
f1.setAttributes([5, -200, NULL, 'NuLl', '5', QDateTime(QDate(2020, 5, 4), QTime(12, 13, 14)), QDate(2020, 5, 2), QTime(12, 13, 1)])
f2 = QgsFeature()
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3', NULL, NULL, NULL])
f2.setGeometry(QgsGeometry.fromWkt('Point (-70.5 65.2)'))
f3 = QgsFeature()
f3.setAttributes([1, 100, 'Orange', 'oranGe', '1', QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)), QDate(2020, 5, 3), QTime(12, 13, 14)])
f4 = QgsFeature()
f4.setAttributes([2, 200, 'Apple', 'Apple', '2', QDateTime(QDate(2020, 5, 4), QTime(12, 14, 14)), QDate(2020, 5, 4), QTime(12, 14, 14)])
f5 = QgsFeature()
f5.setAttributes([4, 400, 'Honey', 'Honey', '4', QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)), QDate(2021, 5, 4), QTime(13, 13, 14)])
vl.dataProvider().addFeatures([f1, f2, f3, f4, f5])
ids = {f['pk']: f.id() for f in vl.getFeatures()}
# modify geometries in buffer
vl.startEditing()
vl.changeGeometry(ids[5], QgsGeometry.fromWkt('Point (-71.123 78.23)'))
vl.changeGeometry(ids[3], QgsGeometry())
vl.changeGeometry(ids[1], QgsGeometry.fromWkt('Point (-70.332 66.33)'))
vl.changeGeometry(ids[2], QgsGeometry.fromWkt('Point (-68.2 70.8)'))
vl.changeGeometry(ids[4], QgsGeometry.fromWkt('Point (-65.32 78.3)'))
return vl
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer for FeatureSourceTestCase
cls.source = cls.getSource()
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testOrderBy(self):
""" Skip order by tests - edited features are not sorted in iterators.
(Maybe they should be??)
"""
pass
class TestQgsVectorLayerSourceChangedAttributesInBuffer(unittest.TestCase, FeatureSourceTestCase):
@classmethod
def getSource(cls):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&field=dt:datetime&field=date:date&field=time:time&key=pk',
'test', 'memory')
assert (vl.isValid())
f1 = QgsFeature()
f1.setAttributes([5, 200, 'a', 'b', 'c', QDateTime(2020, 4, 5, 1, 2, 3), QDate(2020, 4, 5), QTime(1, 2, 3)])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature()
f2.setAttributes([3, -200, 'd', 'e', 'f', QDateTime(2020, 4, 5, 1, 2, 3), QDate(2020, 4, 5), QTime(1, 2, 3)])
f3 = QgsFeature()
f3.setAttributes([1, -100, 'g', 'h', 'i', QDateTime(2020, 4, 5, 1, 2, 3), QDate(2020, 4, 5), QTime(1, 2, 3)])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature()
f4.setAttributes([2, -200, 'j', 'k', 'l', QDateTime(2020, 4, 5, 1, 2, 3), QDate(2020, 4, 5), QTime(1, 2, 3)])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature()
f5.setAttributes([4, 400, 'm', 'n', 'o', QDateTime(2020, 4, 5, 1, 2, 3), QDate(2020, 4, 5), QTime(1, 2, 3)])
f5.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
vl.dataProvider().addFeatures([f1, f2, f3, f4, f5])
ids = {f['pk']: f.id() for f in vl.getFeatures()}
# modify geometries in buffer
vl.startEditing()
vl.changeAttributeValue(ids[5], 1, -200)
vl.changeAttributeValue(ids[5], 2, NULL)
vl.changeAttributeValue(ids[5], 3, 'NuLl')
vl.changeAttributeValue(ids[5], 4, '5')
vl.changeAttributeValue(ids[5], 5, QDateTime(QDate(2020, 5, 4), QTime(12, 13, 14)))
vl.changeAttributeValue(ids[5], 6, QDate(2020, 5, 2))
vl.changeAttributeValue(ids[5], 7, QTime(12, 13, 1))
vl.changeAttributeValue(ids[3], 1, 300)
vl.changeAttributeValue(ids[3], 2, 'Pear')
vl.changeAttributeValue(ids[3], 3, 'PEaR')
vl.changeAttributeValue(ids[3], 4, '3')
vl.changeAttributeValue(ids[3], 5, NULL)
vl.changeAttributeValue(ids[3], 6, NULL)
vl.changeAttributeValue(ids[3], 7, NULL)
vl.changeAttributeValue(ids[1], 1, 100)
vl.changeAttributeValue(ids[1], 2, 'Orange')
vl.changeAttributeValue(ids[1], 3, 'oranGe')
vl.changeAttributeValue(ids[1], 4, '1')
vl.changeAttributeValue(ids[1], 5, QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)))
vl.changeAttributeValue(ids[1], 6, QDate(2020, 5, 3))
vl.changeAttributeValue(ids[1], 7, QTime(12, 13, 14))
vl.changeAttributeValue(ids[2], 1, 200)
vl.changeAttributeValue(ids[2], 2, 'Apple')
vl.changeAttributeValue(ids[2], 3, 'Apple')
vl.changeAttributeValue(ids[2], 4, '2')
vl.changeAttributeValue(ids[2], 5, QDateTime(QDate(2020, 5, 4), QTime(12, 14, 14)))
vl.changeAttributeValue(ids[2], 6, QDate(2020, 5, 4))
vl.changeAttributeValue(ids[2], 7, QTime(12, 14, 14))
vl.changeAttributeValue(ids[4], 1, 400)
vl.changeAttributeValue(ids[4], 2, 'Honey')
vl.changeAttributeValue(ids[4], 3, 'Honey')
vl.changeAttributeValue(ids[4], 4, '4')
vl.changeAttributeValue(ids[4], 5, QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)))
vl.changeAttributeValue(ids[4], 6, QDate(2021, 5, 4))
vl.changeAttributeValue(ids[4], 7, QTime(13, 13, 14))
return vl
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer for FeatureSourceTestCase
cls.source = cls.getSource()
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testOrderBy(self):
""" Skip order by tests - edited features are not sorted in iterators.
(Maybe they should be??)
"""
pass
def testUniqueValues(self):
""" Skip unique values test - as noted in the docs this is unreliable when features are in the buffer
"""
pass
def testMinimumValue(self):
""" Skip min values test - as noted in the docs this is unreliable when features are in the buffer
"""
pass
def testMaximumValue(self):
""" Skip max values test - as noted in the docs this is unreliable when features are in the buffer
"""
pass
class TestQgsVectorLayerSourceDeletedFeaturesInBuffer(unittest.TestCase, FeatureSourceTestCase):
@classmethod
def getSource(cls):
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&field=dt:datetime&field=date:date&field=time:time&&key=pk',
'test', 'memory')
assert (vl.isValid())
# add a bunch of similar features to the provider
b1 = QgsFeature()
b1.setAttributes([5, -300, 'Apple', 'PEaR', '1', QDateTime(QDate(2020, 5, 5), QTime(12, 11, 14)), QDate(2020, 5, 1), QTime(10, 13, 1)])
b1.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
b2 = QgsFeature()
b2.setAttributes([3, 100, 'Orange', 'NuLl', '2', QDateTime(QDate(2020, 5, 1), QTime(12, 13, 14)), QDate(2020, 5, 9), QTime(9, 13, 1)])
b2.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
b3 = QgsFeature()
b3.setAttributes([1, -200, 'Honey', 'oranGe', '5', QDateTime(QDate(2020, 5, 1), QTime(12, 13, 14)), QDate(2020, 5, 19), QTime(2, 13, 1)])
b4 = QgsFeature()
b4.setAttributes([2, 400, 'Pear', 'Honey', '3', QDateTime(QDate(2020, 4, 4), QTime(12, 13, 14)), QDate(2020, 4, 2), QTime(4, 13, 1)])
b4.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
b5 = QgsFeature()
b5.setAttributes([4, 200, NULL, 'oranGe', '3', QDateTime(QDate(2019, 5, 4), QTime(12, 13, 14)), QDate(2019, 5, 2), QTime(1, 13, 1)])
b5.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
vl.dataProvider().addFeatures([b1, b2, b3, b4, b5])
bad_ids = [f['pk'] for f in vl.getFeatures()]
# here's our good features
f1 = QgsFeature()
f1.setAttributes([5, -200, NULL, 'NuLl', '5', QDateTime(QDate(2020, 5, 4), QTime(12, 13, 14)), QDate(2020, 5, 2), QTime(12, 13, 1)])
f1.setGeometry(QgsGeometry.fromWkt('Point (-71.123 78.23)'))
f2 = QgsFeature()
f2.setAttributes([3, 300, 'Pear', 'PEaR', '3', NULL, NULL, NULL])
f3 = QgsFeature()
f3.setAttributes([1, 100, 'Orange', 'oranGe', '1', QDateTime(QDate(2020, 5, 3), QTime(12, 13, 14)), QDate(2020, 5, 3), QTime(12, 13, 14)])
f3.setGeometry(QgsGeometry.fromWkt('Point (-70.332 66.33)'))
f4 = QgsFeature()
f4.setAttributes([2, 200, 'Apple', 'Apple', '2', QDateTime(QDate(2020, 5, 4), QTime(12, 14, 14)), QDate(2020, 5, 4), QTime(12, 14, 14)])
f4.setGeometry(QgsGeometry.fromWkt('Point (-68.2 70.8)'))
f5 = QgsFeature()
f5.setAttributes([4, 400, 'Honey', 'Honey', '4', QDateTime(QDate(2021, 5, 4), QTime(13, 13, 14)), QDate(2021, 5, 4), QTime(13, 13, 14)])
f5.setGeometry(QgsGeometry.fromWkt('Point (-65.32 78.3)'))
vl.dataProvider().addFeatures([f1, f2, f3, f4, f5])
# delete the bad features, but don't commit
vl.startEditing()
vl.deleteFeatures(bad_ids)
return vl
@classmethod
def setUpClass(cls):
"""Run before all tests"""
# Create test layer for FeatureSourceTestCase
cls.source = cls.getSource()
def testGetFeaturesSubsetAttributes2(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testGetFeaturesNoGeometry(self):
""" Override and skip this QgsFeatureSource test. We are using a memory provider, and it's actually more efficient for the memory provider to return
its features as direct copies (due to implicit sharing of QgsFeature)
"""
pass
def testOrderBy(self):
""" Skip order by tests - edited features are not sorted in iterators.
(Maybe they should be??)
"""
pass
def testUniqueValues(self):
""" Skip unique values test - as noted in the docs this is unreliable when features are in the buffer
"""
pass
def testMinimumValue(self):
""" Skip min values test - as noted in the docs this is unreliable when features are in the buffer
"""
pass
def testMaximumValue(self):
""" Skip max values test - as noted in the docs this is unreliable when features are in the buffer
"""
pass
class TestQgsVectorLayerTransformContext(unittest.TestCase):
def setUp(self):
"""Prepare tc"""
super(TestQgsVectorLayerTransformContext, self).setUp()
self.ctx = QgsCoordinateTransformContext()
self.ctx.addSourceDestinationDatumTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857), 1234, 1235)
self.ctx.addCoordinateOperation(QgsCoordinateReferenceSystem.fromEpsgId(4326),
QgsCoordinateReferenceSystem.fromEpsgId(3857), 'test')
def testTransformContextIsSetInCtor(self):
"""Test transform context can be set from ctor"""
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
self.assertFalse(vl.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
options = QgsVectorLayer.LayerOptions(self.ctx)
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory', options)
self.assertTrue(vl.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
def testTransformContextInheritsFromProject(self):
"""Test that when a layer is added to a project it inherits its context"""
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
self.assertFalse(vl.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
p = QgsProject()
self.assertFalse(p.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
p.setTransformContext(self.ctx)
self.assertTrue(p.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
p.addMapLayers([vl])
self.assertTrue(vl.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
def testTransformContextIsSyncedFromProject(self):
"""Test that when a layer is synced when project context changes"""
vl = QgsVectorLayer(
'Point?crs=epsg:4326&field=pk:integer&field=cnt:integer&field=name:string(0)&field=name2:string(0)&field=num_char:string&key=pk',
'test', 'memory')
self.assertFalse(vl.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
p = QgsProject()
self.assertFalse(p.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
p.setTransformContext(self.ctx)
self.assertTrue(p.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
p.addMapLayers([vl])
self.assertTrue(vl.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
# Now change the project context
tc2 = QgsCoordinateTransformContext()
p.setTransformContext(tc2)
self.assertFalse(p.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
self.assertFalse(vl.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
p.setTransformContext(self.ctx)
self.assertTrue(p.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
self.assertTrue(vl.transformContext().hasTransform(QgsCoordinateReferenceSystem.fromEpsgId(4326), QgsCoordinateReferenceSystem.fromEpsgId(3857)))
def testSubsetStringInvalidLayer(self):
"""
Test that subset strings can be set on invalid layers, and retrieved later...
"""
vl = QgsVectorLayer(
'nope',
'test', 'no')
self.assertFalse(vl.isValid())
self.assertIsNone(vl.dataProvider())
vl.setSubsetString('xxxxxxxxx')
self.assertEqual(vl.subsetString(), 'xxxxxxxxx')
# invalid layer subset strings must be persisted via xml
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
self.assertTrue(vl.writeXml(elem, doc, QgsReadWriteContext()))
vl2 = QgsVectorLayer(
'nope',
'test', 'no')
vl2.readXml(elem, QgsReadWriteContext())
self.assertEqual(vl2.subsetString(), 'xxxxxxxxx')
def testLayerWithoutProvider(self):
"""Test that we don't crash when invoking methods on a layer with a broken provider"""
layer = QgsVectorLayer("test", "test", "broken_provider")
layer.clone()
layer.storageType()
layer.capabilitiesString()
layer.dataComment()
layer.displayField()
layer.setDisplayExpression('')
layer.displayExpression()
layer.dataProvider()
layer.temporalProperties()
layer.setProviderEncoding('utf-8')
layer.setCoordinateSystem()
layer.addJoin(QgsVectorLayerJoinInfo())
layer.removeJoin('id')
layer.joinBuffer()
layer.vectorJoins()
layer.setDependencies([])
layer.dependencies()
idx = layer.addExpressionField('1+1', QgsField('foo'))
# layer.expressionField(idx)
# layer.updateExpressionField(idx, '')
# layer.removeExpressionField(idx)
layer.actions()
layer.serverProperties()
layer.selectedFeatureCount()
layer.selectByRect(QgsRectangle())
layer.selectByExpression('1')
layer.selectByIds([0])
layer.modifySelection([], [])
layer.invertSelection()
layer.selectAll()
layer.invertSelectionInRectangle(QgsRectangle())
layer.selectedFeatures()
layer.getSelectedFeatures()
layer.selectedFeatureIds()
layer.boundingBoxOfSelected()
layer.labelsEnabled()
layer.setLabelsEnabled(False)
layer.diagramsEnabled()
layer.setDiagramRenderer(None)
layer.diagramRenderer()
layer.diagramLayerSettings()
layer.setDiagramLayerSettings(QgsDiagramLayerSettings())
layer.renderer()
layer.setRenderer(None)
layer.addFeatureRendererGenerator(None)
layer.removeFeatureRendererGenerator(None)
layer.featureRendererGenerators()
layer.geometryType()
layer.wkbType()
layer.sourceCrs()
layer.sourceName()
layer.readXml
doc = QDomDocument("testdoc")
elem = doc.createElement("maplayer")
layer.writeXml(elem, doc, QgsReadWriteContext())
layer.readXml(elem, QgsReadWriteContext())
layer.encodedSource('', QgsReadWriteContext())
layer.decodedSource('', 'invalid_provider', QgsReadWriteContext())
layer.resolveReferences(QgsProject())
layer.saveStyleToDatabase('name', 'description', False, 'uiFileContent')
layer.listStylesInDatabase()
layer.getStyleFromDatabase('id')
layer.deleteStyleFromDatabase('id')
layer.loadNamedStyle('uri', False)
layer.loadAuxiliaryLayer(QgsAuxiliaryStorage())
layer.setAuxiliaryLayer(None)
layer.auxiliaryLayer()
# layer.readSymbology()
# layer.readStyle()
# layer.writeSymbology()
# layer.writeStyle()
# layer.writeSld()
# layer.readSld()
layer.featureCount(None)
layer.symbolFeatureIds(None)
layer.hasFeatures()
layer.loadDefaultStyle()
layer.countSymbolFeatures()
layer.setSubsetString(None)
layer.subsetString()
layer.getFeatures()
layer.getFeature(0)
layer.getGeometry(0)
layer.getFeatures([0])
layer.getFeatures(QgsRectangle())
layer.addFeature(QgsFeature())
layer.updateFeature(QgsFeature())
layer.insertVertex(0, 0, 0, False)
layer.moveVertex(0, 0, 0, False)
layer.moveVertexV2(QgsPoint(), 0, False)
layer.deleteVertex(0, 0)
layer.deleteSelectedFeatures()
layer.addRing([QgsPointXY()])
# layer.addRing(QgsPointSequence())
# layer.addRing(QgsCurve())
# layer.addPart()
layer.translateFeature(0, 0, 0)
layer.splitParts([])
layer.splitFeatures([])
layer.addTopologicalPoints(QgsPoint())
layer.labeling()
layer.setLabeling(None)
layer.isEditable()
layer.isSpatial()
layer.isModified()
layer.isAuxiliaryField(0)
layer.reload()
layer.createMapRenderer(QgsRenderContext())
layer.extent()
layer.sourceExtent()
layer.fields()
layer.attributeList()
layer.primaryKeyAttributes()
layer.featureCount()
layer.setReadOnly(False)
layer.supportsEditing()
layer.changeGeometry(0, QgsGeometry())
layer.changeAttributeValue(0, 0, '')
layer.changeAttributeValues(0, {})
layer.addAttribute(QgsField('foo'))
layer.setFieldAlias(0, 'bar')
layer.removeFieldAlias(0)
layer.renameAttribute(0, 'bar')
layer.attributeAlias(0)
layer.attributeDisplayName(0)
layer.attributeAliases()
layer.deleteAttribute(0)
layer.deleteAttributes([])
layer.deleteFeature(0)
layer.deleteFeatures([])
layer.commitChanges()
layer.commitErrors()
layer.rollBack()
layer.referencingRelations(0)
layer.editBuffer()
layer.beginEditCommand('foo')
layer.endEditCommand()
layer.destroyEditCommand()
layer.updateFields()
layer.defaultValue(0)
layer.setDefaultValueDefinition(0, layer.defaultValueDefinition(0))
layer.fieldConstraints(0)
layer.fieldConstraintsAndStrength(0)
layer.setFieldConstraint(0, QgsFieldConstraints.ConstraintUnique)
layer.removeFieldConstraint(0, QgsFieldConstraints.ConstraintUnique)
layer.constraintExpression(0)
layer.constraintDescription(0)
layer.setConstraintExpression(0, '1')
layer.setEditorWidgetSetup(0, QgsEditorWidgetSetup('Hidden', {}))
layer.editorWidgetSetup(0)
layer.uniqueValues(0)
layer.uniqueStringsMatching(0, None)
layer.minimumValue(0)
layer.maximumValue(0)
layer.minimumAndMaximumValue(0)
layer.aggregate(QgsAggregateCalculator.Count, 'foo')
layer.setFeatureBlendMode(QPainter.CompositionMode_Screen)
layer.featureBlendMode()
layer.htmlMetadata()
layer.setSimplifyMethod(layer.simplifyMethod())
# layer.simplifyDrawingCanbeApplied()
layer.conditionalStyles()
layer.attributeTableConfig()
layer.setAttributeTableConfig(layer.attributeTableConfig())
layer.mapTipTemplate()
layer.setMapTipTemplate('')
layer.createExpressionContext()
layer.editFormConfig()
layer.setEditFormConfig(layer.editFormConfig())
layer.setReadExtentFromXml(False)
layer.readExtentFromXml()
layer.isEditCommandActive()
layer.storedExpressionManager()
layer.select(0)
layer.select([])
layer.deselect(0)
layer.deselect([])
layer.removeSelection()
layer.reselect()
layer.updateExtents()
layer.startEditing()
layer.setTransformContext(QgsCoordinateTransformContext())
layer.hasSpatialIndex()
# layer.accept(QgsStyleEntityVisitorInterface())
# TODO:
# - fetch rect: feat with changed geometry: 1. in rect, 2. out of rect
# - more join tests
# - import
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
fajoy/nova | nova/tests/virt/xenapi/test_volumeops.py | 2 | 4068 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import test
from nova.tests.xenapi import stubs
from nova.virt.xenapi import volumeops
class VolumeAttachTestCase(test.TestCase):
def test_attach_volume_call(self):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(ops, 'connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
ops.connect_volume(
'conn_data', 'devnumber', 'instance_1', 'vmref', hotplug=True)
self.mox.ReplayAll()
ops.attach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint')
def test_attach_volume_no_hotplug(self):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(ops, 'connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
ops.connect_volume(
'conn_data', 'devnumber', 'instance_1', 'vmref', hotplug=False)
self.mox.ReplayAll()
ops.attach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint', hotplug=False)
def test_connect_volume_no_hotplug(self):
session = stubs.FakeSessionForVolumeTests('fake_uri')
ops = volumeops.VolumeOps(session)
instance_name = 'instance_1'
sr_uuid = '1'
sr_label = 'Disk-for:%s' % instance_name
sr_params = ''
sr_ref = 'sr_ref'
vdi_uuid = '2'
vdi_ref = 'vdi_ref'
vbd_ref = 'vbd_ref'
connection_data = {'vdi_uuid': vdi_uuid}
vm_ref = 'vm_ref'
dev_number = 1
called = {'xenapi': False}
def fake_call_xenapi(self, *args, **kwargs):
# Only used for VBD.plug in this code path.
called['xenapi'] = True
raise Exception()
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
self.mox.StubOutWithMock(ops, 'introduce_sr')
self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
self.mox.StubOutWithMock(volumeops.vm_utils, 'create_vbd')
volumeops.volume_utils.parse_sr_info(
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
ops.introduce_sr(sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
volumeops.volume_utils.introduce_vdi(
session, sr_ref, vdi_uuid, None).AndReturn(vdi_ref)
volumeops.vm_utils.create_vbd(
session, vm_ref, vdi_ref, dev_number,
bootable=False, osvol=True).AndReturn(vbd_ref)
self.mox.ReplayAll()
ops.connect_volume(connection_data, dev_number, instance_name,
vm_ref, hotplug=False)
self.assertEquals(False, called['xenapi'])
| apache-2.0 |
ron1818/Singaboat_RobotX2016 | robotx_nav/nodes/task1_toplevel.py | 3 | 9161 | #!/usr/bin/env python
""" task 1:
-----------------
Created by Ren Ye @ 2016-11-06
Authors: Ren Ye, Reinaldo
-----------------
<put the descriptions from robotx.org pdf file>
<put the algorithms in natural language, can use bullet points, best is to use markdown format>
<if you have plan b, can put it here>
## example ##
+ Go to start point
+ Rotate in position to detect red_1 and green_1 buoys
+ Plot perpendicular waypoints wrt to position of red and green buoys
+ Move towards waypoints move_base_forward
+ meanwhile requesting positions of red_2 and green_2
+ shutdown move_base_forward, create new move_base_forward towards mid of red_2 and green_2
<change log put here>
### @ 2016-11-06 ###
+ create template
renye's approach:
1. drive to gps waypoint
2. slowly in place rotate # noneed
3. detect red and green totems by any camera
4. rotate to bow to red and green totems
5. roi of red in bow/left and roi of green in bow/right, calculate center
6. drive until roi vanishes from both bow cameras, detect totem from port and starboard
7. see new roi from bow
8. drive with 5 and 6
reinaldo's approach:
1. fill bucket of markers array until full
2. do k-means clustering to differentiate monocolor totems
3. get closest pairs
4. plan based on pairs, replan if new plan is far from old plan
5. loop to 2.
6. terminate if displacement from start to end > termination_distance
"""
import rospy
import multiprocessing as mp
import math
import time
import numpy as np
import os
import tf
from sklearn.cluster import KMeans
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point, Pose
from visualization_msgs.msg import MarkerArray, Marker
from move_base_forward import Forward
from move_base_force_cancel import ForceCancel
from tf.transformations import euler_from_quaternion
from nav_msgs.msg import Odometry
def constant_heading(goal):
constant_obj = Forward(nodename="constant_heading", target=goal, waypoint_separation=5, is_relative=False)
def cancel_forward():
os.system('rosnode kill constant_heading')
class PassGates(object):
pool = mp.Pool()
x0, y0, yaw0= 0, 0, 0
MAX_DATA=30
markers_array=MarkerArray()
red_totem=np.zeros((MAX_DATA, 2)) #unordered list
green_totem=np.zeros((MAX_DATA, 2))
red_centers=np.zeros((2, 2)) #ordered list of centers x, y
green_centers=np.zeros((2, 2))
red_position=np.zeros((2, 2)) #ordered list of centers x, y
green_position=np.zeros((2, 2))
red_counter=0
green_counter=0
replan_min=5
termination_displacement=60
def __init__(self):
print("starting task 1")
rospy.init_node('task_1', anonymous=True)
rospy.Subscriber("/filtered_marker_array", MarkerArray, self.marker_callback, queue_size = 50)
self.marker_pub= rospy.Publisher('waypoint_markers', Marker, queue_size=5)
self.odom_received = False
self.base_frame = rospy.get_param("~base_frame", "base_link")
self.fixed_frame = rospy.get_param("~fixed_frame", "map")
# tf_listener
self.tf_listener = tf.TransformListener()
rospy.wait_for_message("/odometry/filtered/global", Odometry)
rospy.Subscriber("/odometry/filtered/global", Odometry, self.odom_callback, queue_size=50)
while not self.odom_received:
rospy.sleep(1)
print("odom received")
init_position =np.array([self.x0, self.y0, 0])
prev_target=np.array([self.x0, self.y0, 0])
while(self.red_counter<self.MAX_DATA and self.green_counter<self.MAX_DATA):
#wait for data bucket to fill up
time.sleep(1)
print("bucket full")
while not rospy.is_shutdown():
self.matrix_reorder()
print("reorder complete")
target = self.plan_waypoint()
print(target)
if self.euclid_distance(target, prev_target)>self.replan_min:
#replan
#force cancel
self.pool.apply(cancel_forward)
#plan new constant heading
print("replan")
self.pool.apply_async(constant_heading, args = (target, ))
prev_target=target
else:
pass
#termination condition
if self.euclid_distance(np.array([self.x0, self.y0, 0]), init_position)>self.termination_displacement:
self.pool.apply(cancel_forward)
print("Task 1 Completed")
break
time.sleep(1)
self.pool.close()
self.pool.join()
def plan_waypoint(self):
distance=20
dis_red=1000
dis_green=1000
#find closest available totem pairs
for m in self.red_position:
if self.distance_from_boat(m) < dis_red:
nearest_red=m
dis_red=self.distance_from_boat(m)
for n in self.green_position:
if self.distance_from_boat(n) < dis_green:
nearest_green=n
dis_green=self.distance_from_boat(n)
#plan
dis=nearest_red-nearest_green
[x_center, y_center]=[(nearest_red[0]+nearest_green[0])/2, (nearest_red[1]+nearest_green[1])/2]
if math.sqrt(dis.dot(dis.T)) <20:
theta=math.atan2(math.sin(math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.pi/2), math.cos(math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.pi/2))
#theta = math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.pi/2
else:
theta = math.atan2(nearest_green[1]-nearest_red[1], nearest_green[0]-nearest_red[0])+math.atan2(10,30)
return np.array([x_center+distance*math.cos(theta), y_center+distance*math.sin(theta), theta])
def distance_from_boat(self, target):
return math.sqrt((target[0]-self.x0)**2+(target[1]-self.y0)**2)
def euclid_distance(self, target1, target2):
return math.sqrt((target1[0]-target2[0])**2+(target1[1]-target2[1])**2)
def is_complete(self):
pass
def marker_callback(self, msg):
if len(msg.markers)>0:
for i in range(len(msg.markers)):
if msg.markers[i].type == 3:
#may append more than 1 markers
if msg.markers[i].id == 0:
self.red_totem[self.red_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.red_counter+=1
elif msg.markers[i].id == 1:
self.green_totem[self.green_counter%self.MAX_DATA]=[msg.markers[i].pose.position.x, msg.markers[i].pose.position.y]
self.green_counter+=1
else:
pass
# list is full
if (self.red_counter>self.MAX_DATA):
red_kmeans = KMeans(n_clusters=2).fit(self.red_totem)
self.red_centers=red_kmeans.cluster_centers_
if(self.green_counter>self.MAX_DATA):
green_kmeans = KMeans(n_clusters=2).fit(self.green_totem)
self.green_centers=green_kmeans.cluster_centers_
#visualize markers in rviz
for i in range(len(msg.markers)):
self.marker_pub.publish(msg.markers[i])
def matrix_reorder(self):
if self.red_centers[0].dot(self.red_centers[0].T)< self.red_centers[1].dot(self.red_centers[1].T):
self.red_position=self.red_centers
else:
self.red_position[0]=self.red_centers[1]
self.red_position[1]=self.red_centers[0]
if self.green_centers[0].dot(self.green_centers[0].T)< self.green_centers[1].dot(self.green_centers[1].T):
self.green_position=self.green_centers
else:
self.green_position[0]=self.green_centers[1]
self.green_position[1]=self.green_centers[0]
def get_tf(self, fixed_frame, base_frame):
""" transform from base_link to map """
trans_received = False
while not trans_received:
try:
(trans, rot) = self.tf_listener.lookupTransform(fixed_frame,
base_frame,
rospy.Time(0))
trans_received = True
return (Point(*trans), Quaternion(*rot))
except (tf.LookupException,
tf.ConnectivityException,
tf.ExtrapolationException):
pass
def odom_callback(self, msg):
trans, rot = self.get_tf("map", "base_link")
self.x0 = trans.x
self.y0 = trans.y
_, _, self.yaw0 = euler_from_quaternion((rot.x, rot.y, rot.z, rot.w))
self.odom_received = True
if __name__ == '__main__':
try:
PassGates()
# stage 1: gps
except rospy.ROSInterruptException:
rospy.loginfo("Task 1 Finished")
| gpl-3.0 |
mandeepdhami/horizon | openstack_dashboard/dashboards/project/images/tests.py | 9 | 19522 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from socket import timeout as socket_timeout # noqa
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.images import utils
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:images:index')
CREATE_URL = reverse('horizon:project:images:images:create')
class ImagesAndSnapshotsTests(test.TestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_index(self):
images = self.images.list()
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([images,
False, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/images/index.html')
self.assertContains(res, 'help_text="Deleted images'
' are not recoverable."')
self.assertIn('images_table', res.context)
images_table = res.context['images_table']
images = images_table.data
self.assertTrue(len(images), 3)
row_actions = images_table.get_row_actions(images[0])
self.assertTrue(len(row_actions), 3)
row_actions = images_table.get_row_actions(images[1])
self.assertTrue(len(row_actions), 2)
self.assertTrue('delete_image' not in
[a.name for a in row_actions])
row_actions = images_table.get_row_actions(images[2])
self.assertTrue(len(row_actions), 3)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_index_no_images(self):
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([(),
False, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/images/index.html')
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_index_error(self):
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None) \
.AndRaise(self.exceptions.glance)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/images/index.html')
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_snapshot_actions(self):
snapshots = self.snapshots.list()
api.glance.image_list_detailed(IsA(http.HttpRequest), marker=None) \
.AndReturn([snapshots, False, False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/images/index.html')
self.assertIn('images_table', res.context)
snaps = res.context['images_table']
self.assertEqual(len(snaps.get_rows()), 3)
row_actions = snaps.get_row_actions(snaps.data[0])
# first instance - status active, owned
self.assertEqual(len(row_actions), 4)
self.assertEqual(row_actions[0].verbose_name, u"Launch Instance")
self.assertEqual(row_actions[1].verbose_name, u"Create Volume")
self.assertEqual(row_actions[2].verbose_name, u"Edit Image")
self.assertEqual(row_actions[3].verbose_name, u"Delete Image")
row_actions = snaps.get_row_actions(snaps.data[1])
# second instance - status active, not owned
self.assertEqual(len(row_actions), 2)
self.assertEqual(row_actions[0].verbose_name, u"Launch Instance")
self.assertEqual(row_actions[1].verbose_name, u"Create Volume")
row_actions = snaps.get_row_actions(snaps.data[2])
# third instance - status queued, only delete is available
self.assertEqual(len(row_actions), 1)
self.assertEqual(unicode(row_actions[0].verbose_name),
u"Delete Image")
self.assertEqual(str(row_actions[0]), "<DeleteImage: delete>")
class ImagesAndSnapshotsUtilsTests(test.TestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_list_image(self):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([public_images, False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([private_images, False, False])
self.mox.ReplayAll()
ret = utils.get_available_images(self.request, self.tenant.id)
expected_images = [image for image in self.images.list()
if (image.status == 'active' and
image.container_format not in ('ami', 'aki'))]
self.assertEqual(len(expected_images), len(ret))
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_list_image_using_cache(self):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([public_images, False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([private_images, False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': 'other-tenant',
'status': 'active'}) \
.AndReturn([private_images, False, False])
self.mox.ReplayAll()
expected_images = [image for image in self.images.list()
if (image.status == 'active' and
image.container_format not in ('ari', 'aki'))]
images_cache = {}
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
self.assertEqual(len(expected_images), len(ret))
# image list for other-tenant
ret = utils.get_available_images(self.request, 'other-tenant',
images_cache)
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(2, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project']['other-tenant']))
@test.create_stubs({api.glance: ('image_list_detailed',),
exceptions: ('handle',)})
def test_list_image_error_public_image_list(self):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndRaise(self.exceptions.glance)
exceptions.handle(IsA(http.HttpRequest),
"Unable to retrieve public images.")
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([private_images, False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([public_images, False, False])
self.mox.ReplayAll()
images_cache = {}
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
expected_images = [image for image in private_images
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertNotIn('public_images', images_cache)
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
expected_images = [image for image in self.images.list()
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
@test.create_stubs({api.glance: ('image_list_detailed',),
exceptions: ('handle',)})
def test_list_image_error_private_image_list(self):
public_images = [image for image in self.images.list()
if image.status == 'active' and image.is_public]
private_images = [image for image in self.images.list()
if (image.status == 'active' and
not image.is_public)]
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([public_images, False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndRaise(self.exceptions.glance)
exceptions.handle(IsA(http.HttpRequest),
"Unable to retrieve images for the current project.")
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([private_images, False, False])
self.mox.ReplayAll()
images_cache = {}
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
expected_images = [image for image in public_images
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertFalse(len(images_cache['images_by_project']))
ret = utils.get_available_images(self.request, self.tenant.id,
images_cache)
expected_images = [image for image in self.images.list()
if image.container_format not in ('ami', 'aki')]
self.assertEqual(len(expected_images), len(ret))
self.assertEqual(
len(public_images),
len(images_cache['public_images']))
self.assertEqual(1, len(images_cache['images_by_project']))
self.assertEqual(
len(private_images),
len(images_cache['images_by_project'][self.tenant.id]))
class SeleniumTests(test.SeleniumTestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_modal_create_image_from_url(self):
driver = self.selenium
images = self.images.list()
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([images,
False, False])
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
driver.get("%s%s" % (self.live_server_url, INDEX_URL))
# Open the modal menu
driver.find_element_by_id("images__action_create").send_keys("\n")
wait = self.ui.WebDriverWait(self.selenium, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
srctypes = self.ui.Select(driver.find_element_by_id("id_source_type"))
srctypes.select_by_value("url")
copyfrom = driver.find_element_by_id("id_image_url")
copyfrom.send_keys("http://www.test.com/test.iso")
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
body = formats.first_selected_option
self.assertTrue("ISO" in body.text,
"ISO should be selected when the extension is *.iso")
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_modal_create_image_from_file(self):
driver = self.selenium
images = self.images.list()
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None).AndReturn([images,
False, False])
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
driver.get("%s%s" % (self.live_server_url, INDEX_URL))
# Open the modal menu
driver.find_element_by_id("images__action_create").send_keys("\n")
wait = self.ui.WebDriverWait(driver, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
srctypes = self.ui.Select(driver.find_element_by_id("id_source_type"))
srctypes.select_by_value("file")
driver.find_element_by_id("id_image_file").send_keys("/tmp/test.iso")
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
body = formats.first_selected_option
self.assertTrue("ISO" in body.text,
"ISO should be selected when the extension is *.iso")
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_create_image_from_url(self):
driver = self.selenium
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
driver.get("%s%s" % (self.live_server_url, CREATE_URL))
wait = self.ui.WebDriverWait(driver, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
srctypes = self.ui.Select(driver.find_element_by_id("id_source_type"))
srctypes.select_by_value("url")
copyfrom = driver.find_element_by_id("id_image_url")
copyfrom.send_keys("http://www.test.com/test.iso")
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
body = formats.first_selected_option
self.assertTrue("ISO" in body.text,
"ISO should be selected when the extension is *.iso")
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_create_image_from_file(self):
driver = self.selenium
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
driver.get("%s%s" % (self.live_server_url, CREATE_URL))
wait = self.ui.WebDriverWait(driver, 10,
ignored_exceptions=[socket_timeout])
wait.until(lambda x: driver.find_element_by_id("id_disk_format"))
srctypes = self.ui.Select(driver.find_element_by_id("id_source_type"))
srctypes.select_by_value("file")
driver.find_element_by_id("id_image_file").send_keys("/tmp/test.iso")
formats = self.ui.Select(driver.find_element_by_id("id_disk_format"))
body = formats.first_selected_option
self.assertTrue("ISO" in body.text,
"ISO should be selected when the extension is *.iso")
| apache-2.0 |
davidcox/glumpy | setup.py | 1 | 1234 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (C) 2009,2010,2011 Nicolas P. Rougier
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import os
from distutils.core import setup
from common import *
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
write_info(os.path.join("glumpy", "info.py"))
write_version(os.path.join("glumpy", "version.py"))
if os.path.exists(os.path.join("doc", "src")):
write_version(os.path.join("doc", "src", "glumpy_version.py"))
setup(name=DISTNAME,
version=build_fverstring(),
description=DESCRIPTION,
long_description = LONG_DESCRIPTION,
maintainer= MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license = LICENSE,
packages=['glumpy', 'glumpy.atb', 'glumpy.shader'],
package_data={'glumpy': ['shader/*.txt']},
classifiers=CLASSIFIERS)
| bsd-3-clause |
Gui13/CouchPotatoServer | CouchPotato.py | 5 | 4531 | #!/usr/bin/env python
from __future__ import print_function
from logging import handlers
from os.path import dirname
import logging
import os
import select
import signal
import socket
import subprocess
import sys
import traceback
import time
# Root path
base_path = dirname(os.path.abspath(__file__))
# Insert local directories into path
sys.path.insert(0, os.path.join(base_path, 'libs'))
from couchpotato.environment import Env
from couchpotato.core.helpers.variable import getDataDir
class Loader(object):
do_restart = False
def __init__(self):
# Get options via arg
from couchpotato.runner import getOptions
self.options = getOptions(sys.argv[1:])
# Load settings
settings = Env.get('settings')
settings.setFile(self.options.config_file)
# Create data dir if needed
if self.options.data_dir:
self.data_dir = self.options.data_dir
else:
self.data_dir = os.path.expanduser(Env.setting('data_dir'))
if self.data_dir == '':
self.data_dir = getDataDir()
if not os.path.isdir(self.data_dir):
os.makedirs(self.data_dir)
# Create logging dir
self.log_dir = os.path.join(self.data_dir, 'logs');
if not os.path.isdir(self.log_dir):
os.makedirs(self.log_dir)
# Logging
from couchpotato.core.logger import CPLog
self.log = CPLog(__name__)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S')
hdlr = handlers.RotatingFileHandler(os.path.join(self.log_dir, 'error.log'), 'a', 500000, 10)
hdlr.setLevel(logging.CRITICAL)
hdlr.setFormatter(formatter)
self.log.logger.addHandler(hdlr)
def addSignals(self):
signal.signal(signal.SIGINT, self.onExit)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
from couchpotato.core.event import addEvent
addEvent('app.after_shutdown', self.afterShutdown)
def afterShutdown(self, restart):
self.do_restart = restart
def onExit(self, signal, frame):
from couchpotato.core.event import fireEvent
fireEvent('app.shutdown', single = True)
def run(self):
self.addSignals()
from couchpotato.runner import runCouchPotato
runCouchPotato(self.options, base_path, sys.argv[1:], data_dir = self.data_dir, log_dir = self.log_dir, Env = Env)
if self.do_restart:
self.restart()
def restart(self):
try:
# remove old pidfile first
try:
if self.runAsDaemon():
try: self.daemon.stop()
except: pass
except:
self.log.critical(traceback.format_exc())
# Release log files and shutdown logger
logging.shutdown()
time.sleep(3)
args = [sys.executable] + [os.path.join(base_path, os.path.basename(__file__))] + sys.argv[1:]
subprocess.Popen(args)
except:
self.log.critical(traceback.format_exc())
def daemonize(self):
if self.runAsDaemon():
try:
from daemon import Daemon
self.daemon = Daemon(self.options.pid_file)
self.daemon.daemonize()
except SystemExit:
raise
except:
self.log.critical(traceback.format_exc())
def runAsDaemon(self):
return self.options.daemon and self.options.pid_file
if __name__ == '__main__':
l = None
try:
l = Loader()
l.daemonize()
l.run()
except KeyboardInterrupt:
pass
except select.error:
pass
except SystemExit:
raise
except socket.error as e:
# log when socket receives SIGINT, but continue.
# previous code would have skipped over other types of IO errors too.
nr, msg = e
if nr != 4:
try:
l.log.critical(traceback.format_exc())
except:
print(traceback.format_exc())
raise
except:
try:
# if this fails we will have two tracebacks
# one for failing to log, and one for the exception that got us here.
if l:
l.log.critical(traceback.format_exc())
else:
print(traceback.format_exc())
except:
print(traceback.format_exc())
raise
| gpl-3.0 |
resmo/ansible | lib/ansible/modules/storage/netapp/_na_cdot_aggregate.py | 44 | 7263 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_aggregate
short_description: Manage NetApp cDOT aggregates.
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar (@timuster) <sumit4@netapp.com>
deprecated:
removed_in: '2.11'
why: Updated modules released with increased functionality
alternative: Use M(na_ontap_aggregate) instead.
description:
- Create or destroy aggregates on NetApp cDOT.
options:
state:
required: true
description:
- Whether the specified aggregate should exist or not.
choices: ['present', 'absent']
name:
required: true
description:
- The name of the aggregate to manage.
disk_count:
description:
- Number of disks to place into the aggregate, including parity disks.
- The disks in this newly-created aggregate come from the spare disk pool.
- The smallest disks in this pool join the aggregate first, unless the C(disk-size) argument is provided.
- Either C(disk-count) or C(disks) must be supplied. Range [0..2^31-1].
- Required when C(state=present).
'''
EXAMPLES = """
- name: Manage Aggregates
na_cdot_aggregate:
state: present
name: ansibleAggr
disk_count: 1
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Manage Aggregates
na_cdot_aggregate:
state: present
name: ansibleAggr
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTAggregate(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
disk_count=dict(required=False, type='int'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['disk_count'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.disk_count = p['disk_count']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_aggr(self):
"""
Checks if aggregate exists.
:return:
True if aggregate found
False if aggregate is not found
:rtype: bool
"""
aggr_get_iter = netapp_utils.zapi.NaElement('aggr-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-attributes', **{'aggregate-name': self.name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
aggr_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(aggr_get_iter,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
# Error 13040 denotes an aggregate not being found.
if to_native(e.code) == "13040":
return False
else:
self.module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
return True
else:
return False
def create_aggr(self):
aggr_create = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-create', **{'aggregate': self.name,
'disk-count': str(self.disk_count)})
try:
self.server.invoke_successfully(aggr_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error provisioning aggregate %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def delete_aggr(self):
aggr_destroy = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-destroy', **{'aggregate': self.name})
try:
self.server.invoke_successfully(aggr_destroy,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error removing aggregate %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def rename_aggregate(self):
aggr_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'aggr-rename', **{'aggregate': self.name,
'new-aggregate-name':
self.name})
try:
self.server.invoke_successfully(aggr_rename,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg="Error renaming aggregate %s: %s" % (self.name, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
aggregate_exists = self.get_aggr()
rename_aggregate = False
# check if anything needs to be changed (add/delete/update)
if aggregate_exists:
if self.state == 'absent':
changed = True
elif self.state == 'present':
if self.name is not None and not self.name == self.name:
rename_aggregate = True
changed = True
else:
if self.state == 'present':
# Aggregate does not exist, but requested state is present.
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not aggregate_exists:
self.create_aggr()
else:
if rename_aggregate:
self.rename_aggregate()
elif self.state == 'absent':
self.delete_aggr()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTAggregate()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
funbaker/astropy | astropy/utils/argparse.py | 2 | 1552 | """Utilities and extensions for use with `argparse`."""
import os
import argparse
def directory(arg):
"""
An argument type (for use with the ``type=`` argument to
`argparse.ArgumentParser.add_argument` which determines if the argument is
an existing directory (and returns the absolute path).
"""
if not isinstance(arg, str) and os.path.isdir(arg):
raise argparse.ArgumentTypeError(
"{0} is not a directory or does not exist (the directory must "
"be created first)".format(arg))
return os.path.abspath(arg)
def readable_directory(arg):
"""
An argument type (for use with the ``type=`` argument to
`argparse.ArgumentParser.add_argument` which determines if the argument is
a directory that exists and is readable (and returns the absolute path).
"""
arg = directory(arg)
if not os.access(arg, os.R_OK):
raise argparse.ArgumentTypeError(
"{0} exists but is not readable with its current "
"permissions".format(arg))
return arg
def writeable_directory(arg):
"""
An argument type (for use with the ``type=`` argument to
`argparse.ArgumentParser.add_argument` which determines if the argument is
a directory that exists and is writeable (and returns the absolute path).
"""
arg = directory(arg)
if not os.access(arg, os.W_OK):
raise argparse.ArgumentTypeError(
"{0} exists but is not writeable with its current "
"permissions".format(arg))
return arg
| bsd-3-clause |
Eagles2F/sync-engine | inbox/models/backends/oauth.py | 3 | 2666 | """
Generic OAuth class that provides abstraction for access and
refresh tokens.
"""
from datetime import datetime, timedelta
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declared_attr
from inbox.models.secret import Secret
from nylas.logging import get_logger
log = get_logger()
class TokenManager(object):
def __init__(self):
self._tokens = {}
def get_token(self, account, force_refresh=False):
if account.id in self._tokens:
token, expiration = self._tokens[account.id]
if not force_refresh and expiration > datetime.utcnow():
return token
new_token, expires_in = account.new_token()
self.cache_token(account, new_token, expires_in)
return new_token
def cache_token(self, account, token, expires_in):
expires_in -= 10
expiration = datetime.utcnow() + timedelta(seconds=expires_in)
self._tokens[account.id] = token, expiration
token_manager = TokenManager()
class OAuthAccount(object):
# Secret
@declared_attr
def refresh_token_id(cls):
return Column(Integer, ForeignKey(Secret.id), nullable=False)
@declared_attr
def secret(cls):
return relationship('Secret', cascade='all', uselist=False)
@property
def refresh_token(self):
if not self.secret:
return None
return self.secret.secret
@refresh_token.setter
def refresh_token(self, value):
# Must be a valid UTF-8 byte sequence without NULL bytes.
if isinstance(value, unicode):
value = value.encode('utf-8')
try:
unicode(value, 'utf-8')
except UnicodeDecodeError:
raise ValueError('Invalid refresh_token')
if b'\x00' in value:
raise ValueError('Invalid refresh_token')
if not self.secret:
self.secret = Secret()
self.secret.secret = value
self.secret.type = 'token'
def new_token(self):
try:
return self.auth_handler.new_token(self.refresh_token,
self.client_id,
self.client_secret)
except Exception as e:
log.error('Error while getting access token: {}'.format(e),
account_id=self.id,
exc_info=True)
raise
def verify(self):
# TODO(emfree): update callers and remove this silliness.
token = token_manager.get_token(self)
return self.auth_handler.validate_token(token)
| agpl-3.0 |
kallimachos/archive | pygame/inkspill/inkspill.py | 1 | 18847 | # Ink Spill (a Flood It clone)
# http://inventwithpython.com/pygame
# By Al Sweigart al@inventwithpython.com
# Released under a "Simplified BSD" license
import random, sys, webbrowser, copy, pygame
from pygame.locals import *
# There are different box sizes, number of boxes, and
# life depending on the "board size" setting selected.
SMALLBOXSIZE = 60 # size is in pixels
MEDIUMBOXSIZE = 20
LARGEBOXSIZE = 11
SMALLBOARDSIZE = 6 # size is in boxes
MEDIUMBOARDSIZE = 17
LARGEBOARDSIZE = 30
SMALLMAXLIFE = 10 # number of turns
MEDIUMMAXLIFE = 30
LARGEMAXLIFE = 64
FPS = 30
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
boxSize = MEDIUMBOXSIZE
PALETTEGAPSIZE = 10
PALETTESIZE = 45
EASY = 0 # arbitrary but unique value
MEDIUM = 1 # arbitrary but unique value
HARD = 2 # arbitrary but unique value
difficulty = MEDIUM # game starts in "medium" mode
maxLife = MEDIUMMAXLIFE
boardWidth = MEDIUMBOARDSIZE
boardHeight = MEDIUMBOARDSIZE
# R G B
WHITE = (255, 255, 255)
DARKGRAY = ( 70, 70, 70)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 128, 0)
PURPLE = (255, 0, 255)
# The first color in each scheme is the background color, the next six are the palette colors.
COLORSCHEMES = (((150, 200, 255), RED, GREEN, BLUE, YELLOW, ORANGE, PURPLE),
((0, 155, 104), (97, 215, 164), (228, 0, 69), (0, 125, 50), (204, 246, 0), (148, 0, 45), (241, 109, 149)),
((195, 179, 0), (255, 239, 115), (255, 226, 0), (147, 3, 167), (24, 38, 176), (166, 147, 0), (197, 97, 211)),
((85, 0, 0), (155, 39, 102), (0, 201, 13), (255, 118, 0), (206, 0, 113), (0, 130, 9), (255, 180, 115)),
((191, 159, 64), (183, 182, 208), (4, 31, 183), (167, 184, 45), (122, 128, 212), (37, 204, 7), (88, 155, 213)),
((200, 33, 205), (116, 252, 185), (68, 56, 56), (52, 238, 83), (23, 149, 195), (222, 157, 227), (212, 86, 185)))
for i in range(len(COLORSCHEMES)):
assert len(COLORSCHEMES[i]) == 7, 'Color scheme %s does not have exactly 7 colors.' % (i)
bgColor = COLORSCHEMES[0][0]
paletteColors = COLORSCHEMES[0][1:]
def main():
global FPSCLOCK, DISPLAYSURF, LOGOIMAGE, SPOTIMAGE, SETTINGSIMAGE, SETTINGSBUTTONIMAGE, RESETBUTTONIMAGE
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
# Load images
LOGOIMAGE = pygame.image.load('inkspilllogo.png')
SPOTIMAGE = pygame.image.load('inkspillspot.png')
SETTINGSIMAGE = pygame.image.load('inkspillsettings.png')
SETTINGSBUTTONIMAGE = pygame.image.load('inkspillsettingsbutton.png')
RESETBUTTONIMAGE = pygame.image.load('inkspillresetbutton.png')
pygame.display.set_caption('Ink Spill')
mousex = 0
mousey = 0
mainBoard = generateRandomBoard(boardWidth, boardHeight, difficulty)
life = maxLife
lastPaletteClicked = None
while True: # main game loop
paletteClicked = None
resetGame = False
# Draw the screen.
DISPLAYSURF.fill(bgColor)
drawLogoAndButtons()
drawBoard(mainBoard)
drawLifeMeter(life)
drawPalettes()
checkForQuit()
for event in pygame.event.get(): # event handling loop
if event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
if pygame.Rect(WINDOWWIDTH - SETTINGSBUTTONIMAGE.get_width(),
WINDOWHEIGHT - SETTINGSBUTTONIMAGE.get_height(),
SETTINGSBUTTONIMAGE.get_width(),
SETTINGSBUTTONIMAGE.get_height()).collidepoint(mousex, mousey):
resetGame = showSettingsScreen() # clicked on Settings button
elif pygame.Rect(WINDOWWIDTH - RESETBUTTONIMAGE.get_width(),
WINDOWHEIGHT - SETTINGSBUTTONIMAGE.get_height() - RESETBUTTONIMAGE.get_height(),
RESETBUTTONIMAGE.get_width(),
RESETBUTTONIMAGE.get_height()).collidepoint(mousex, mousey):
resetGame = True # clicked on Reset button
else:
# check if a palette button was clicked
paletteClicked = getColorOfPaletteAt(mousex, mousey)
if paletteClicked != None and paletteClicked != lastPaletteClicked:
# a palette button was clicked that is different from the
# last palette button clicked (this check prevents the player
# from accidentally clicking the same palette twice)
lastPaletteClicked = paletteClicked
floodAnimation(mainBoard, paletteClicked)
life -= 1
resetGame = False
if hasWon(mainBoard):
for i in range(4): # flash border 4 times
flashBorderAnimation(WHITE, mainBoard)
resetGame = True
pygame.time.wait(2000) # pause so the player can bask in victory
elif life == 0:
# life is zero, so player has lost
drawLifeMeter(0)
pygame.display.update()
pygame.time.wait(400)
for i in range(4):
flashBorderAnimation(BLACK, mainBoard)
resetGame = True
pygame.time.wait(2000) # pause so the player can suffer in their defeat
if resetGame:
# start a new game
mainBoard = generateRandomBoard(boardWidth, boardHeight, difficulty)
life = maxLife
lastPaletteClicked = None
pygame.display.update()
FPSCLOCK.tick(FPS)
def checkForQuit():
# Terminates the program if there are any QUIT or escape key events.
for event in pygame.event.get(QUIT): # get all the QUIT events
pygame.quit() # terminate if any QUIT events are present
sys.exit()
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
pygame.quit() # terminate if the KEYUP event was for the Esc key
sys.exit()
pygame.event.post(event) # put the other KEYUP event objects back
def hasWon(board):
# if the entire board is the same color, player has won
for x in range(boardWidth):
for y in range(boardHeight):
if board[x][y] != board[0][0]:
return False # found a different color, player has not won
return True
def showSettingsScreen():
global difficulty, boxSize, boardWidth, boardHeight, maxLife, paletteColors, bgColor
# The pixel coordinates in this function were obtained by loading
# the inkspillsettings.png image into a graphics editor and reading
# the pixel coordinates from there. Handy trick.
origDifficulty = difficulty
origBoxSize = boxSize
screenNeedsRedraw = True
while True:
if screenNeedsRedraw:
DISPLAYSURF.fill(bgColor)
DISPLAYSURF.blit(SETTINGSIMAGE, (0,0))
# place the ink spot marker next to the selected difficulty
if difficulty == EASY:
DISPLAYSURF.blit(SPOTIMAGE, (30, 4))
if difficulty == MEDIUM:
DISPLAYSURF.blit(SPOTIMAGE, (8, 41))
if difficulty == HARD:
DISPLAYSURF.blit(SPOTIMAGE, (30, 76))
# place the ink spot marker next to the selected size
if boxSize == SMALLBOXSIZE:
DISPLAYSURF.blit(SPOTIMAGE, (22, 150))
if boxSize == MEDIUMBOXSIZE:
DISPLAYSURF.blit(SPOTIMAGE, (11, 185))
if boxSize == LARGEBOXSIZE:
DISPLAYSURF.blit(SPOTIMAGE, (24, 220))
for i in range(len(COLORSCHEMES)):
drawColorSchemeBoxes(500, i * 60 + 30, i)
pygame.display.update()
screenNeedsRedraw = False # by default, don't redraw the screen
for event in pygame.event.get(): # event handling loop
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == KEYUP:
if event.key == K_ESCAPE:
# Esc key on settings screen goes back to game
return not (origDifficulty == difficulty and origBoxSize == boxSize)
elif event.type == MOUSEBUTTONUP:
screenNeedsRedraw = True # screen should be redrawn
mousex, mousey = event.pos # syntactic sugar
# check for clicks on the difficulty buttons
if pygame.Rect(74, 16, 111, 30).collidepoint(mousex, mousey):
difficulty = EASY
elif pygame.Rect(53, 50, 104, 29).collidepoint(mousex, mousey):
difficulty = MEDIUM
elif pygame.Rect(72, 85, 65, 31).collidepoint(mousex, mousey):
difficulty = HARD
# check for clicks on the size buttons
elif pygame.Rect(63, 156, 84, 31).collidepoint(mousex, mousey):
# small board size setting:
boxSize = SMALLBOXSIZE
boardWidth = SMALLBOARDSIZE
boardHeight = SMALLBOARDSIZE
maxLife = SMALLMAXLIFE
elif pygame.Rect(52, 192, 106,32).collidepoint(mousex, mousey):
# medium board size setting:
boxSize = MEDIUMBOXSIZE
boardWidth = MEDIUMBOARDSIZE
boardHeight = MEDIUMBOARDSIZE
maxLife = MEDIUMMAXLIFE
elif pygame.Rect(67, 228, 58, 37).collidepoint(mousex, mousey):
# large board size setting:
boxSize = LARGEBOXSIZE
boardWidth = LARGEBOARDSIZE
boardHeight = LARGEBOARDSIZE
maxLife = LARGEMAXLIFE
elif pygame.Rect(14, 299, 371, 97).collidepoint(mousex, mousey):
# clicked on the "learn programming" ad
webbrowser.open('http://inventwithpython.com') # opens a web browser
elif pygame.Rect(178, 418, 215, 34).collidepoint(mousex, mousey):
# clicked on the "back to game" button
return not (origDifficulty == difficulty and origBoxSize == boxSize)
for i in range(len(COLORSCHEMES)):
# clicked on a color scheme button
if pygame.Rect(500, 30 + i * 60, MEDIUMBOXSIZE * 3, MEDIUMBOXSIZE * 2).collidepoint(mousex, mousey):
bgColor = COLORSCHEMES[i][0]
paletteColors = COLORSCHEMES[i][1:]
def drawColorSchemeBoxes(x, y, schemeNum):
# Draws the color scheme boxes that appear on the "Settings" screen.
for boxy in range(2):
for boxx in range(3):
pygame.draw.rect(DISPLAYSURF, COLORSCHEMES[schemeNum][3 * boxy + boxx + 1], (x + MEDIUMBOXSIZE * boxx, y + MEDIUMBOXSIZE * boxy, MEDIUMBOXSIZE, MEDIUMBOXSIZE))
if paletteColors == COLORSCHEMES[schemeNum][1:]:
# put the ink spot next to the selected color scheme
DISPLAYSURF.blit(SPOTIMAGE, (x - 50, y))
def flashBorderAnimation(color, board, animationSpeed=30):
origSurf = DISPLAYSURF.copy()
flashSurf = pygame.Surface(DISPLAYSURF.get_size())
flashSurf = flashSurf.convert_alpha()
for start, end, step in ((0, 256, 1), (255, 0, -1)):
# the first iteration on the outer loop will set the inner loop
# to have transparency go from 0 to 255, the second iteration will
# have it go from 255 to 0. This is the "flash".
for transparency in range(start, end, animationSpeed * step):
DISPLAYSURF.blit(origSurf, (0, 0))
r, g, b = color
flashSurf.fill((r, g, b, transparency))
DISPLAYSURF.blit(flashSurf, (0, 0))
drawBoard(board) # draw board ON TOP OF the transparency layer
pygame.display.update()
FPSCLOCK.tick(FPS)
DISPLAYSURF.blit(origSurf, (0, 0)) # redraw the original surface
def floodAnimation(board, paletteClicked, animationSpeed=25):
origBoard = copy.deepcopy(board)
floodFill(board, board[0][0], paletteClicked, 0, 0)
for transparency in range(0, 255, animationSpeed):
# The "new" board slowly become opaque over the original board.
drawBoard(origBoard)
drawBoard(board, transparency)
pygame.display.update()
FPSCLOCK.tick(FPS)
def generateRandomBoard(width, height, difficulty=MEDIUM):
# Creates a board data structure with random colors for each box.
board = []
for x in range(width):
column = []
for y in range(height):
column.append(random.randint(0, len(paletteColors) - 1))
board.append(column)
# Make board easier by setting some boxes to same color as a neighbor.
# Determine how many boxes to change.
if difficulty == EASY:
if boxSize == SMALLBOXSIZE:
boxesToChange = 100
else:
boxesToChange = 1500
elif difficulty == MEDIUM:
if boxSize == SMALLBOXSIZE:
boxesToChange = 5
else:
boxesToChange = 200
else:
boxesToChange = 0
# Change neighbor's colors:
for i in range(boxesToChange):
# Randomly choose a box whose color to copy
x = random.randint(1, width-2)
y = random.randint(1, height-2)
# Randomly choose neighbors to change.
direction = random.randint(0, 3)
if direction == 0: # change left and up neighbor
board[x-1][y] == board[x][y]
board[x][y-1] == board[x][y]
elif direction == 1: # change right and down neighbor
board[x+1][y] == board[x][y]
board[x][y+1] == board[x][y]
elif direction == 2: # change right and up neighbor
board[x][y-1] == board[x][y]
board[x+1][y] == board[x][y]
else: # change left and down neighbor
board[x][y+1] == board[x][y]
board[x-1][y] == board[x][y]
return board
def drawLogoAndButtons():
# draw the Ink Spill logo and Settings and Reset buttons.
DISPLAYSURF.blit(LOGOIMAGE, (WINDOWWIDTH - LOGOIMAGE.get_width(), 0))
DISPLAYSURF.blit(SETTINGSBUTTONIMAGE, (WINDOWWIDTH - SETTINGSBUTTONIMAGE.get_width(), WINDOWHEIGHT - SETTINGSBUTTONIMAGE.get_height()))
DISPLAYSURF.blit(RESETBUTTONIMAGE, (WINDOWWIDTH - RESETBUTTONIMAGE.get_width(), WINDOWHEIGHT - SETTINGSBUTTONIMAGE.get_height() - RESETBUTTONIMAGE.get_height()))
def drawBoard(board, transparency=255):
# The colored squares are drawn to a temporary surface which is then
# drawn to the DISPLAYSURF surface. This is done so we can draw the
# squares with transparency on top of DISPLAYSURF as it currently is.
tempSurf = pygame.Surface(DISPLAYSURF.get_size())
tempSurf = tempSurf.convert_alpha()
tempSurf.fill((0, 0, 0, 0))
for x in range(boardWidth):
for y in range(boardHeight):
left, top = leftTopPixelCoordOfBox(x, y)
r, g, b = paletteColors[board[x][y]]
pygame.draw.rect(tempSurf, (r, g, b, transparency), (left, top, boxSize, boxSize))
left, top = leftTopPixelCoordOfBox(0, 0)
pygame.draw.rect(tempSurf, BLACK, (left-1, top-1, boxSize * boardWidth + 1, boxSize * boardHeight + 1), 1)
DISPLAYSURF.blit(tempSurf, (0, 0))
def drawPalettes():
# Draws the six color palettes at the bottom of the screen.
numColors = len(paletteColors)
xmargin = int((WINDOWWIDTH - ((PALETTESIZE * numColors) + (PALETTEGAPSIZE * (numColors - 1)))) / 2)
for i in range(numColors):
left = xmargin + (i * PALETTESIZE) + (i * PALETTEGAPSIZE)
top = WINDOWHEIGHT - PALETTESIZE - 10
pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top, PALETTESIZE, PALETTESIZE))
pygame.draw.rect(DISPLAYSURF, bgColor, (left + 2, top + 2, PALETTESIZE - 4, PALETTESIZE - 4), 2)
def drawLifeMeter(currentLife):
lifeBoxSize = int((WINDOWHEIGHT - 40) / maxLife)
# Draw background color of life meter.
pygame.draw.rect(DISPLAYSURF, bgColor, (20, 20, 20, 20 + (maxLife * lifeBoxSize)))
for i in range(maxLife):
if currentLife >= (maxLife - i): # draw a solid red box
pygame.draw.rect(DISPLAYSURF, RED, (20, 20 + (i * lifeBoxSize), 20, lifeBoxSize))
pygame.draw.rect(DISPLAYSURF, WHITE, (20, 20 + (i * lifeBoxSize), 20, lifeBoxSize), 1) # draw white outline
def getColorOfPaletteAt(x, y):
# Returns the index of the color in paletteColors that the x and y parameters
# are over. Returns None if x and y are not over any palette.
numColors = len(paletteColors)
xmargin = int((WINDOWWIDTH - ((PALETTESIZE * numColors) + (PALETTEGAPSIZE * (numColors - 1)))) / 2)
top = WINDOWHEIGHT - PALETTESIZE - 10
for i in range(numColors):
# Find out if the mouse click is inside any of the palettes.
left = xmargin + (i * PALETTESIZE) + (i * PALETTEGAPSIZE)
r = pygame.Rect(left, top, PALETTESIZE, PALETTESIZE)
if r.collidepoint(x, y):
return i
return None # no palette exists at these x, y coordinates
def floodFill(board, oldColor, newColor, x, y):
# This is the flood fill algorithm.
if oldColor == newColor or board[x][y] != oldColor:
return
board[x][y] = newColor # change the color of the current box
# Make the recursive call for any neighboring boxes:
if x > 0:
floodFill(board, oldColor, newColor, x - 1, y) # on box to the left
if x < boardWidth - 1:
floodFill(board, oldColor, newColor, x + 1, y) # on box to the right
if y > 0:
floodFill(board, oldColor, newColor, x, y - 1) # on box to up
if y < boardHeight - 1:
floodFill(board, oldColor, newColor, x, y + 1) # on box to down
def leftTopPixelCoordOfBox(boxx, boxy):
# Returns the x and y of the left-topmost pixel of the xth & yth box.
xmargin = int((WINDOWWIDTH - (boardWidth * boxSize)) / 2)
ymargin = int((WINDOWHEIGHT - (boardHeight * boxSize)) / 2)
return (boxx * boxSize + xmargin, boxy * boxSize + ymargin)
if __name__ == '__main__':
main()
| gpl-3.0 |
erkanay/django | django/db/migrations/operations/fields.py | 14 | 8948 | from __future__ import unicode_literals
from django.db.models.fields import NOT_PROVIDED
from django.utils import six
from .base import Operation
class AddField(Operation):
"""
Adds a field to a model.
"""
def __init__(self, model_name, name, field, preserve_default=True):
self.model_name = model_name
self.name = name
self.field = field
self.preserve_default = preserve_default
def state_forwards(self, app_label, state):
# If preserve default is off, don't use the default for future state
if not self.preserve_default:
field = self.field.clone()
field.default = NOT_PROVIDED
else:
field = self.field
state.models[app_label, self.model_name.lower()].fields.append((self.name, field))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
to_model = to_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
field = to_model._meta.get_field_by_name(self.name)[0]
if not self.preserve_default:
field.default = self.field.default
schema_editor.add_field(
from_model,
field,
)
if not self.preserve_default:
field.default = NOT_PROVIDED
def database_backwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, from_model):
schema_editor.remove_field(from_model, from_model._meta.get_field_by_name(self.name)[0])
def describe(self):
return "Add field %s to %s" % (self.name, self.model_name)
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.name == other.name) and
(self.model_name.lower() == other.model_name.lower()) and
(self.field.deconstruct()[1:] == other.field.deconstruct()[1:])
)
def references_model(self, name, app_label=None):
return name.lower() == self.model_name.lower()
def references_field(self, model_name, name, app_label=None):
return self.references_model(model_name) and name.lower() == self.name.lower()
class RemoveField(Operation):
"""
Removes a field from a model.
"""
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
new_fields = []
for name, instance in state.models[app_label, self.model_name.lower()].fields:
if name != self.name:
new_fields.append((name, instance))
state.models[app_label, self.model_name.lower()].fields = new_fields
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, from_model):
schema_editor.remove_field(from_model, from_model._meta.get_field_by_name(self.name)[0])
def database_backwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
to_model = to_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
schema_editor.add_field(from_model, to_model._meta.get_field_by_name(self.name)[0])
def describe(self):
return "Remove field %s from %s" % (self.name, self.model_name)
def references_model(self, name, app_label=None):
return name.lower() == self.model_name.lower()
def references_field(self, model_name, name, app_label=None):
return self.references_model(model_name) and name.lower() == self.name.lower()
class AlterField(Operation):
"""
Alters a field's database column (e.g. null, max_length) to the provided new field
"""
def __init__(self, model_name, name, field):
self.model_name = model_name
self.name = name
self.field = field
def state_forwards(self, app_label, state):
state.models[app_label, self.model_name.lower()].fields = [
(n, self.field if n == self.name else f) for n, f in state.models[app_label, self.model_name.lower()].fields
]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
to_model = to_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
from_field = from_model._meta.get_field_by_name(self.name)[0]
to_field = to_model._meta.get_field_by_name(self.name)[0]
# If the field is a relatedfield with an unresolved rel.to, just
# set it equal to the other field side. Bandaid fix for AlterField
# migrations that are part of a RenameModel change.
if from_field.rel and from_field.rel.to:
if isinstance(from_field.rel.to, six.string_types):
from_field.rel.to = to_field.rel.to
elif to_field.rel and isinstance(to_field.rel.to, six.string_types):
to_field.rel.to = from_field.rel.to
schema_editor.alter_field(from_model, from_field, to_field)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Alter field %s on %s" % (self.name, self.model_name)
def __eq__(self, other):
return (
(self.__class__ == other.__class__) and
(self.name == other.name) and
(self.model_name.lower() == other.model_name.lower()) and
(self.field.deconstruct()[1:] == other.field.deconstruct()[1:])
)
def references_model(self, name, app_label=None):
return name.lower() == self.model_name.lower()
def references_field(self, model_name, name, app_label=None):
return self.references_model(model_name) and name.lower() == self.name.lower()
class RenameField(Operation):
"""
Renames a field on the model. Might affect db_column too.
"""
def __init__(self, model_name, old_name, new_name):
self.model_name = model_name
self.old_name = old_name
self.new_name = new_name
def state_forwards(self, app_label, state):
# Rename the field
state.models[app_label, self.model_name.lower()].fields = [
(self.new_name if n == self.old_name else n, f) for n, f in state.models[app_label, self.model_name.lower()].fields
]
# Fix unique_together to refer to the new field
options = state.models[app_label, self.model_name.lower()].options
if "unique_together" in options:
options['unique_together'] = [
[self.new_name if n == self.old_name else n for n in unique]
for unique in options['unique_together']
]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
to_model = to_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
schema_editor.alter_field(
from_model,
from_model._meta.get_field_by_name(self.old_name)[0],
to_model._meta.get_field_by_name(self.new_name)[0],
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
to_model = to_state.render().get_model(app_label, self.model_name)
if self.allowed_to_migrate(schema_editor.connection.alias, to_model):
schema_editor.alter_field(
from_model,
from_model._meta.get_field_by_name(self.new_name)[0],
to_model._meta.get_field_by_name(self.old_name)[0],
)
def describe(self):
return "Rename field %s on %s to %s" % (self.old_name, self.model_name, self.new_name)
def references_model(self, name, app_label=None):
return name.lower() == self.model_name.lower()
def references_field(self, model_name, name, app_label=None):
return self.references_model(model_name) and (
name.lower() == self.old_name.lower() or
name.lower() == self.new_name.lower()
)
| bsd-3-clause |
pratapvardhan/scikit-image | skimage/util/tests/test_apply_parallel.py | 8 | 1976 | from __future__ import absolute_import
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing.decorators import skipif
from skimage.filters import threshold_adaptive, gaussian
from skimage.util.apply_parallel import apply_parallel, dask_available
@skipif(not dask_available)
def test_apply_parallel():
# data
a = np.arange(144).reshape(12, 12).astype(float)
# apply the filter
expected1 = threshold_adaptive(a, 3)
result1 = apply_parallel(threshold_adaptive, a, chunks=(6, 6), depth=5,
extra_arguments=(3,),
extra_keywords={'mode': 'reflect'})
assert_array_almost_equal(result1, expected1)
def wrapped_gauss(arr):
return gaussian(arr, 1, mode='reflect')
expected2 = gaussian(a, 1, mode='reflect')
result2 = apply_parallel(wrapped_gauss, a, chunks=(6, 6), depth=5)
assert_array_almost_equal(result2, expected2)
@skipif(not dask_available)
def test_no_chunks():
a = np.ones(1 * 4 * 8 * 9).reshape(1, 4, 8, 9)
def add_42(arr):
return arr + 42
expected = add_42(a)
result = apply_parallel(add_42, a)
assert_array_almost_equal(result, expected)
@skipif(not dask_available)
def test_apply_parallel_wrap():
def wrapped(arr):
return gaussian(arr, 1, mode='wrap')
a = np.arange(144).reshape(12, 12).astype(float)
expected = gaussian(a, 1, mode='wrap')
result = apply_parallel(wrapped, a, chunks=(6, 6), depth=5, mode='wrap')
assert_array_almost_equal(result, expected)
@skipif(not dask_available)
def test_apply_parallel_nearest():
def wrapped(arr):
return gaussian(arr, 1, mode='nearest')
a = np.arange(144).reshape(12, 12).astype(float)
expected = gaussian(a, 1, mode='nearest')
result = apply_parallel(wrapped, a, chunks=(6, 6), depth={0: 5, 1: 5},
mode='nearest')
assert_array_almost_equal(result, expected)
| bsd-3-clause |
pocmo/focus-android | tools/search/scrape_plugins.py | 7 | 7472 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from lxml import html
from lxml import etree
import copy
import json
import os
import requests
import shutil
import sys
import subprocess
import urllib
# Paths for en-US plugins included in the core Android repo.
EN_PLUGINS_FILE_URL = "https://hg.mozilla.org/releases/mozilla-aurora/raw-file/default/mobile/locales/en-US/searchplugins/%s"
# Paths for plugins in the l10n repos.
L10N_PLUGINS_FILE_URL = "https://hg.mozilla.org/releases/l10n/mozilla-aurora/%s/raw-file/default/mobile/searchplugins/%%s"
# TODO: Download list from Android repo once the mobile list is in the tree.
LIST_PATH = "./list.json"
ns = { "search": "http://www.mozilla.org/2006/browser/search/" }
def main():
# Remove and recreate the SearchPlugins directory.
if os.path.exists("SearchPlugins"):
shutil.rmtree("SearchPlugins")
os.makedirs("SearchPlugins")
with open(LIST_PATH) as list:
plugins = json.load(list)
engines = {}
# Import engines from the l10n repos.
locales = plugins["locales"]
for locale in locales:
regions = locales[locale]
for region in regions:
if region == "default":
code = locale
else:
language = locale.split("-")[0]
code = ("%s-%s" % (language, region))
print("adding %s..." % code)
visibleEngines = regions[region]["visibleDefaultEngines"]
downloadEngines(code, L10nScraper(locale), visibleEngines)
engines[code] = visibleEngines
# Import default engines from the core repo.
print("adding defaults...")
defaultEngines = EnScraper().getFileList()
downloadEngines("default", EnScraper(), defaultEngines)
engines['default'] = plugins['default']['visibleDefaultEngines']
# Remove Bing.
if "bing" in engines['default']: engines['default'].remove('bing')
# Make sure fallback directories contain any skipped engines.
verifyEngines(engines)
# Write the list of engine names for each locale.
writeList(engines)
def downloadEngines(locale, scraper, engines):
directory = os.path.join("SearchPlugins", locale)
if not os.path.exists(directory):
os.makedirs(directory)
# Remove Bing.
if 'bing' in engines: engines.remove('bing')
# Always include DuckDuckGo.
if "duckduckgo" not in engines:
lastEngine = '~'
for i, engine in reversed(list(enumerate(engines))):
if i > 0 and "duckduckgo" < engine and engine < lastEngine and not engine.startswith("google"):
lastEngine = engine
continue
engines.insert(i + 1, "duckduckgo")
break
for engine in engines:
file = engine + ".xml"
path = os.path.join(directory, file)
downloadedFile = scraper.getFile(file)
if downloadedFile == None:
print(" skipping: %s..." % file)
continue
print(" downloading: %s..." % file)
name, extension = os.path.splitext(file)
# Apply iOS-specific overlays for this engine if they are defined.
if extension == ".xml":
engine = name.split("-")[0]
overlay = overlayForEngine(engine)
if overlay:
plugin = etree.parse(downloadedFile)
overlay.apply(plugin)
contents = etree.tostring(plugin.getroot(), encoding="utf-8", pretty_print=True)
with open(path, "w") as outfile:
outfile.write(contents)
continue
# Otherwise, just use the downloaded file as is.
shutil.move(downloadedFile, path)
def verifyEngines(engines):
print("verifying engines...")
error = False
for locale in engines:
dirs = [locale, locale.split('-')[0], 'default']
dirs = map(lambda dir: os.path.join('SearchPlugins', dir), dirs)
for engine in engines[locale]:
file = engine + '.xml'
if not any(os.path.exists(os.path.join(dir, file)) for dir in dirs):
error = True
print(" ERROR: missing engine %s for locale %s" % (engine, locale))
if not error:
print(" OK!")
def overlayForEngine(engine):
path = os.path.join("SearchOverlays", "%s.xml" % engine)
if not os.path.exists(path):
return None
return Overlay(path)
def writeList(engines):
# root = etree.Element('dict')
# for locale in sorted(engines.keys()):
# key = etree.Element('key')
# key.text = locale
# root.append(key)
# values = etree.Element('array')
# for engine in engines[locale]:
# value = etree.Element('string')
# value.text = engine
# values.append(value)
# root.append(values)
# plist = etree.tostring(root, encoding="utf-8", pretty_print=True)
with open("search_configuration.json", "w") as outfile:
json.dump(engines, outfile)
#outfile.write(plist)
class Scraper:
def pluginsFileURL(self): pass
def getFile(self, file):
path = self.pluginsFileURL % file
handle = urllib.urlopen(path)
if handle.code != 200:
return None
result = urllib.urlretrieve(path)
return result[0]
def getFileList(self):
response = requests.get(self.pluginsFileURL % '')
if not response.ok:
raise Exception("error: could not read plugins directory")
lines = response.content.strip().split('\n')
lines = map(lambda line: line.split(' ')[-1], lines)
lines = filter(lambda f: f.endswith('.xml'), lines)
return map(lambda f: f[:-4], lines)
class L10nScraper(Scraper):
def __init__(self, locale):
self.pluginsFileURL = L10N_PLUGINS_FILE_URL % locale
class EnScraper(Scraper):
def __init__(self):
self.pluginsFileURL = EN_PLUGINS_FILE_URL
class Overlay:
def __init__(self, path):
overlay = etree.parse(path)
self.actions = overlay.getroot().getchildren()
def apply(self, doc):
for action in self.actions:
if action.tag == "replace":
self.replace(target=action.get("target"), replacement=action[0], doc=doc)
elif action.tag == "append":
self.append(parent=action.get("parent"), child=action[0], doc=doc)
def replace(self, target, replacement, doc):
for element in doc.xpath(target, namespaces=ns):
replacementCopy = copy.deepcopy(replacement)
element.getparent().replace(element, replacementCopy)
# Try to preserve indentation.
replacementCopy.tail = element.tail
def append(self, parent, child, doc):
for element in doc.xpath(parent, namespaces=ns):
childCopy = copy.deepcopy(child)
element.append(childCopy)
# Try to preserve indentation.
childCopy.tail = "\n"
previous = childCopy.getprevious()
if previous is not None:
childCopy.tail = previous.tail
prevPrevious = previous.getprevious()
if prevPrevious is not None:
previous.tail = prevPrevious.tail
if __name__ == "__main__":
main()
| mpl-2.0 |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/AuthorizeNet/CustomerInformationManager/GetCustomerProfileIds.py | 5 | 3771 | # -*- coding: utf-8 -*-
###############################################################################
#
# GetCustomerProfileIds
# Retrieves all existing customer profile IDs.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetCustomerProfileIds(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetCustomerProfileIds Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetCustomerProfileIds, self).__init__(temboo_session, '/Library/AuthorizeNet/CustomerInformationManager/GetCustomerProfileIds')
def new_input_set(self):
return GetCustomerProfileIdsInputSet()
def _make_result_set(self, result, path):
return GetCustomerProfileIdsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetCustomerProfileIdsChoreographyExecution(session, exec_id, path)
class GetCustomerProfileIdsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetCustomerProfileIds
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APILoginId(self, value):
"""
Set the value of the APILoginId input for this Choreo. ((required, string) The API Login Id provided by Authorize.net when signing up for a developer account.)
"""
super(GetCustomerProfileIdsInputSet, self)._set_input('APILoginId', value)
def set_Endpoint(self, value):
"""
Set the value of the Endpoint input for this Choreo. ((optional, string) Set to api.authorize.net when running in production. Defaults to apitest.authorize.net for sandbox testing.)
"""
super(GetCustomerProfileIdsInputSet, self)._set_input('Endpoint', value)
def set_TransactionKey(self, value):
"""
Set the value of the TransactionKey input for this Choreo. ((required, string) The TransactionKey provided by Authorize.net when signing up for a developer account.)
"""
super(GetCustomerProfileIdsInputSet, self)._set_input('TransactionKey', value)
class GetCustomerProfileIdsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetCustomerProfileIds Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response from Authorize.net.)
"""
return self._output.get('Response', None)
class GetCustomerProfileIdsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetCustomerProfileIdsResultSet(response, path)
| gpl-3.0 |
Gebesa-Dev/Addons-gebesa | sale_order_state_change/models/stock_move.py | 1 | 3539 | # -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from openerp import _, api, fields, models
from openerp.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
class StockMove(models.Model):
_inherit = 'stock.move'
# INCOMPLETO
# @api.multi
# def procurement_searh(self):
# proc_obj = self.env['procurement.order']
# import ipdb; ipdb.set_trace()
# if self.state not in ['cancel', 'done']:
# self.do_unreserve()
# self.env.cr.execute("""UPDATE stock_move SET state = 'cancel'
# WHERE id in (%s) """ % (self.id))
# procurement = proc_obj.search([('move_dest_id', '=', self.id)])
# if procurement.state in ['confirmed', 'exception', 'running']:
# self.env.cr.execute("""UPDATE procurement_order SET state = 'cancel'
# WHERE id = %s """ % (procurement.id))
# moves = self.search([('procurement_id', '=', procurement.id)])
# for move in moves:
# _logger.error("stock.move" + str(move.id))
# procurement_1 = proc_obj.search([('move_dest_id', '=', move.id)])
# if procurement_1.state in ['confirmed', 'exception', 'running']:
# self.env.cr.execute("""UPDATE procurement_order SET state = 'cancel'
# WHERE id = %s """ % (procurement_1.id))
# if procurement_1.purchase_line_id:
# continue
# production = procurement_1.production_id
# if production:
# if production.move_prod_id.state not in ['cancel', 'done']:
# production.move_prod_id.do_unreserve()
# _logger.error("stock.move" + str(production.move_prod_id.id))
# self.env.cr.execute("""UPDATE stock_move SET state = 'cancel'
# WHERE id in (%s) """ % (production.move_prod_id.id))
# for mov in production.move_created_ids:
# _logger.error("stock.move" + str(mov.id))
# if mov.state not in ['cancel', 'done']:
# mov.do_unreserve()
# self.env.cr.execute("""UPDATE stock_move SET state = 'cancel'
# WHERE id in (%s) """ % (mov.id))
# for line in production.move_lines:
# _logger.error("stock.move" + str(line.id))
# if line.state not in ['cancel', 'done']:
# line.do_unreserve()
# self.env.cr.execute("""UPDATE stock_move SET state = 'cancel'
# WHERE id in (%s) """ % (line.id))
# for picking in production.picking_raw_material_ids:
# for move_picking in picking.move_lines:
# move_picking.procurement_searh()
# if picking.state in ['draft', 'waiting', 'confirmed']:
# self.env.cr.execute("""UPDATE stock_picking SET state = 'cancel'
# WHERE id in (%s) """ % (picking.id))
# if production.state in ['draft', 'confirmed', 'ready']:
# self.env.cr.execute("""UPDATE mrp_production SET state = 'cancel'
# WHERE id = %s """ % (production.id))
# return True
| agpl-3.0 |
ovnicraft/edx-platform | lms/djangoapps/course_api/tests/test_api.py | 19 | 8401 | """
Test for course API
"""
from hashlib import md5
from django.contrib.auth.models import AnonymousUser
from django.http import Http404
from opaque_keys.edx.keys import CourseKey
from rest_framework.exceptions import PermissionDenied
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase, ModuleStoreTestCase
from xmodule.modulestore.tests.factories import check_mongo_calls
from .mixins import CourseApiFactoryMixin
from ..api import course_detail, list_courses
class CourseApiTestMixin(CourseApiFactoryMixin):
"""
Establish basic functionality for Course API tests
"""
@classmethod
def setUpClass(cls):
super(CourseApiTestMixin, cls).setUpClass()
cls.request_factory = APIRequestFactory()
CourseOverview.get_all_courses() # seed the CourseOverview table
def verify_course(self, course, course_id=u'edX/toy/2012_Fall'):
"""
Ensure that the returned course is the course we just created
"""
self.assertEqual(course_id, str(course.id))
class CourseDetailTestMixin(CourseApiTestMixin):
"""
Common functionality for course_detail tests
"""
def _make_api_call(self, requesting_user, target_user, course_key):
"""
Call the `course_detail` api endpoint to get information on the course
identified by `course_key`.
"""
request = Request(self.request_factory.get('/'))
request.user = requesting_user
with check_mongo_calls(0):
return course_detail(request, target_user.username, course_key)
class TestGetCourseDetail(CourseDetailTestMixin, SharedModuleStoreTestCase):
"""
Test course_detail api function
"""
@classmethod
def setUpClass(cls):
super(TestGetCourseDetail, cls).setUpClass()
cls.course = cls.create_course()
cls.hidden_course = cls.create_course(course=u'hidden', visible_to_staff_only=True)
cls.honor_user = cls.create_user('honor', is_staff=False)
cls.staff_user = cls.create_user('staff', is_staff=True)
def test_get_existing_course(self):
course = self._make_api_call(self.honor_user, self.honor_user, self.course.id)
self.verify_course(course)
def test_get_nonexistent_course(self):
course_key = CourseKey.from_string(u'edX/toy/nope')
with self.assertRaises(Http404):
self._make_api_call(self.honor_user, self.honor_user, course_key)
def test_hidden_course_for_honor(self):
with self.assertRaises(Http404):
self._make_api_call(self.honor_user, self.honor_user, self.hidden_course.id)
def test_hidden_course_for_staff(self):
course = self._make_api_call(self.staff_user, self.staff_user, self.hidden_course.id)
self.verify_course(course, course_id=u'edX/hidden/2012_Fall')
def test_hidden_course_for_staff_as_honor(self):
with self.assertRaises(Http404):
self._make_api_call(self.staff_user, self.honor_user, self.hidden_course.id)
class CourseListTestMixin(CourseApiTestMixin):
"""
Common behavior for list_courses tests
"""
def _make_api_call(self, requesting_user, specified_user, org=None, filter_=None):
"""
Call the list_courses api endpoint to get information about
`specified_user` on behalf of `requesting_user`.
"""
request = Request(self.request_factory.get('/'))
request.user = requesting_user
with check_mongo_calls(0):
return list_courses(request, specified_user.username, org=org, filter_=filter_)
def verify_courses(self, courses):
"""
Verify that there is one course, and that it has the expected format.
"""
self.assertEqual(len(courses), 1)
self.verify_course(courses[0])
class TestGetCourseList(CourseListTestMixin, SharedModuleStoreTestCase):
"""
Test the behavior of the `list_courses` api function.
"""
@classmethod
def setUpClass(cls):
super(TestGetCourseList, cls).setUpClass()
cls.course = cls.create_course()
cls.staff_user = cls.create_user("staff", is_staff=True)
cls.honor_user = cls.create_user("honor", is_staff=False)
def test_as_staff(self):
courses = self._make_api_call(self.staff_user, self.staff_user)
self.assertEqual(len(courses), 1)
self.verify_courses(courses)
def test_for_honor_user_as_staff(self):
courses = self._make_api_call(self.staff_user, self.honor_user)
self.verify_courses(courses)
def test_as_honor(self):
courses = self._make_api_call(self.honor_user, self.honor_user)
self.verify_courses(courses)
def test_for_staff_user_as_honor(self):
with self.assertRaises(PermissionDenied):
self._make_api_call(self.honor_user, self.staff_user)
def test_as_anonymous(self):
anonuser = AnonymousUser()
courses = self._make_api_call(anonuser, anonuser)
self.verify_courses(courses)
def test_for_honor_user_as_anonymous(self):
anonuser = AnonymousUser()
with self.assertRaises(PermissionDenied):
self._make_api_call(anonuser, self.staff_user)
@SharedModuleStoreTestCase.modifies_courseware
def test_multiple_courses(self):
self.create_course(course='second')
courses = self._make_api_call(self.honor_user, self.honor_user)
self.assertEqual(len(courses), 2)
@SharedModuleStoreTestCase.modifies_courseware
def test_filter_by_org(self):
"""Verify that courses are filtered by the provided org key."""
# Create a second course to be filtered out of queries.
alternate_course = self.create_course(
org=md5(self.course.org).hexdigest()
)
self.assertNotEqual(alternate_course.org, self.course.org)
# No filtering.
unfiltered_courses = self._make_api_call(self.staff_user, self.staff_user)
for org in [self.course.org, alternate_course.org]:
self.assertTrue(
any(course.org == org for course in unfiltered_courses)
)
# With filtering.
filtered_courses = self._make_api_call(self.staff_user, self.staff_user, org=self.course.org)
self.assertTrue(
all(course.org == self.course.org for course in filtered_courses)
)
@SharedModuleStoreTestCase.modifies_courseware
def test_filter(self):
# Create a second course to be filtered out of queries.
alternate_course = self.create_course(course='mobile', mobile_available=True)
test_cases = [
(None, [alternate_course, self.course]),
(dict(mobile_available=True), [alternate_course]),
(dict(mobile_available=False), [self.course]),
]
for filter_, expected_courses in test_cases:
filtered_courses = self._make_api_call(self.staff_user, self.staff_user, filter_=filter_)
self.assertEquals(
{course.id for course in filtered_courses},
{course.id for course in expected_courses},
"testing course_api.api.list_courses with filter_={}".format(filter_),
)
class TestGetCourseListExtras(CourseListTestMixin, ModuleStoreTestCase):
"""
Tests of course_list api function that require alternative configurations
of created courses.
"""
@classmethod
def setUpClass(cls):
super(TestGetCourseListExtras, cls).setUpClass()
cls.staff_user = cls.create_user("staff", is_staff=True)
cls.honor_user = cls.create_user("honor", is_staff=False)
def test_no_courses(self):
courses = self._make_api_call(self.honor_user, self.honor_user)
self.assertEqual(len(courses), 0)
def test_hidden_course_for_honor(self):
self.create_course(visible_to_staff_only=True)
courses = self._make_api_call(self.honor_user, self.honor_user)
self.assertEqual(len(courses), 0)
def test_hidden_course_for_staff(self):
self.create_course(visible_to_staff_only=True)
courses = self._make_api_call(self.staff_user, self.staff_user)
self.verify_courses(courses)
| agpl-3.0 |
abhishek-ch/hue | desktop/core/ext-py/Django-1.6.10/tests/model_regress/tests.py | 54 | 8927 | from __future__ import absolute_import, unicode_literals
import datetime
from operator import attrgetter
import sys
from django.core.exceptions import ValidationError
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from django.utils import tzinfo
from django.utils import unittest
from django.db import connection, router
from django.db.models.sql import InsertQuery
from .models import (Worker, Article, Party, Event, Department,
BrokenUnicodeMethod, NonAutoPK, Model1, Model2, Model3)
class ModelTests(TestCase):
# The bug is that the following queries would raise:
# "TypeError: Related Field has invalid lookup: gte"
def test_related_gte_lookup(self):
"""
Regression test for #10153: foreign key __gte lookups.
"""
Worker.objects.filter(department__gte=0)
def test_related_lte_lookup(self):
"""
Regression test for #10153: foreign key __lte lookups.
"""
Worker.objects.filter(department__lte=0)
def test_sql_insert_compiler_return_id_attribute(self):
"""
Regression test for #14019: SQLInsertCompiler.as_sql() failure
"""
db = router.db_for_write(Party)
query = InsertQuery(Party)
query.insert_values([Party._meta.fields[0]], [], raw=False)
# this line will raise an AttributeError without the accompanying fix
query.get_compiler(using=db).as_sql()
def test_empty_choice(self):
# NOTE: Part of the regression test here is merely parsing the model
# declaration. The verbose_name, in particular, did not always work.
a = Article.objects.create(
headline="Look at me!", pub_date=datetime.datetime.now()
)
# An empty choice field should return None for the display name.
self.assertIs(a.get_status_display(), None)
# Empty strings should be returned as Unicode
a = Article.objects.get(pk=a.pk)
self.assertEqual(a.misc_data, '')
self.assertIs(type(a.misc_data), six.text_type)
def test_long_textfield(self):
# TextFields can hold more than 4000 characters (this was broken in
# Oracle).
a = Article.objects.create(
headline="Really, really big",
pub_date=datetime.datetime.now(),
article_text="ABCDE" * 1000
)
a = Article.objects.get(pk=a.pk)
self.assertEqual(len(a.article_text), 5000)
def test_date_lookup(self):
# Regression test for #659
Party.objects.create(when=datetime.datetime(1999, 12, 31))
Party.objects.create(when=datetime.datetime(1998, 12, 31))
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create(when=datetime.datetime(1, 3, 3))
self.assertQuerysetEqual(
Party.objects.filter(when__month=2), []
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=1), [
datetime.date(1999, 1, 1)
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__month=12), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year=1998), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #8510
self.assertQuerysetEqual(
Party.objects.filter(when__day="31"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__month="12"), [
datetime.date(1999, 12, 31),
datetime.date(1998, 12, 31),
],
attrgetter("when"),
ordered=False
)
self.assertQuerysetEqual(
Party.objects.filter(when__year="1998"), [
datetime.date(1998, 12, 31),
],
attrgetter("when")
)
# Regression test for #18969
self.assertQuerysetEqual(
Party.objects.filter(when__year=1), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
self.assertQuerysetEqual(
Party.objects.filter(when__year='1'), [
datetime.date(1, 3, 3),
],
attrgetter("when")
)
if (3,) <= sys.version_info < (3, 3) and connection.vendor == 'mysql':
# In Python < 3.3, datetime.strftime raises an exception for years
# below 1000, and existing MySQL DB-API drivers hit this problem.
test_date_lookup = unittest.expectedFailure(test_date_lookup)
def test_date_filter_null(self):
# Date filtering was failing with NULL date values in SQLite
# (regression test for #3501, amongst other things).
Party.objects.create(when=datetime.datetime(1999, 1, 1))
Party.objects.create()
p = Party.objects.filter(when__month=1)[0]
self.assertEqual(p.when, datetime.date(1999, 1, 1))
self.assertQuerysetEqual(
Party.objects.filter(pk=p.pk).dates("when", "month"), [
1
],
attrgetter("month")
)
def test_get_next_prev_by_field(self):
# Check that get_next_by_FIELD and get_previous_by_FIELD don't crash
# when we have usecs values stored on the database
#
# It crashed after the Field.get_db_prep_* refactor, because on most
# backends DateTimeFields supports usecs, but DateTimeField.to_python
# didn't recognize them. (Note that
# Model._get_next_or_previous_by_FIELD coerces values to strings)
Event.objects.create(when=datetime.datetime(2000, 1, 1, 16, 0, 0))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 6, 1, 1))
Event.objects.create(when=datetime.datetime(2000, 1, 1, 13, 1, 1))
e = Event.objects.create(when=datetime.datetime(2000, 1, 1, 12, 0, 20, 24))
self.assertEqual(
e.get_next_by_when().when, datetime.datetime(2000, 1, 1, 13, 1, 1)
)
self.assertEqual(
e.get_previous_by_when().when, datetime.datetime(2000, 1, 1, 6, 1, 1)
)
def test_primary_key_foreign_key_types(self):
# Check Department and Worker (non-default PK type)
d = Department.objects.create(id=10, name="IT")
w = Worker.objects.create(department=d, name="Full-time")
self.assertEqual(six.text_type(w), "Full-time")
def test_broken_unicode(self):
# Models with broken unicode methods should still have a printable repr
b = BrokenUnicodeMethod.objects.create(name="Jerry")
self.assertEqual(repr(b), "<BrokenUnicodeMethod: [Bad Unicode data]>")
@skipUnlessDBFeature("supports_timezones")
def test_timezones(self):
# Saving an updating with timezone-aware datetime Python objects.
# Regression test for #10443.
# The idea is that all these creations and saving should work without
# crashing. It's not rocket science.
dt1 = datetime.datetime(2008, 8, 31, 16, 20, tzinfo=tzinfo.FixedOffset(600))
dt2 = datetime.datetime(2008, 8, 31, 17, 20, tzinfo=tzinfo.FixedOffset(600))
obj = Article.objects.create(
headline="A headline", pub_date=dt1, article_text="foo"
)
obj.pub_date = dt2
obj.save()
self.assertEqual(
Article.objects.filter(headline="A headline").update(pub_date=dt1),
1
)
def test_chained_fks(self):
"""
Regression for #18432: Chained foreign keys with to_field produce incorrect query
"""
m1 = Model1.objects.create(pkey=1000)
m2 = Model2.objects.create(model1=m1)
m3 = Model3.objects.create(model2=m2)
# this is the actual test for #18432
m3 = Model3.objects.get(model2=1000)
m3.model2
class ModelValidationTest(TestCase):
def test_pk_validation(self):
one = NonAutoPK.objects.create(name="one")
again = NonAutoPK(name="one")
self.assertRaises(ValidationError, again.validate_unique)
class EvaluateMethodTest(TestCase):
"""
Regression test for #13640: cannot filter by objects with 'evaluate' attr
"""
def test_model_with_evaluate_method(self):
"""
Ensures that you can filter by objects that have an 'evaluate' attr
"""
dept = Department.objects.create(pk=1, name='abc')
dept.evaluate = 'abc'
Worker.objects.filter(department=dept)
| apache-2.0 |
havard024/prego | venv/lib/python2.7/site-packages/unidecode/x05d.py | 252 | 4670 | data = (
'Lang ', # 0x00
'Kan ', # 0x01
'Lao ', # 0x02
'Lai ', # 0x03
'Xian ', # 0x04
'Que ', # 0x05
'Kong ', # 0x06
'Chong ', # 0x07
'Chong ', # 0x08
'Ta ', # 0x09
'Lin ', # 0x0a
'Hua ', # 0x0b
'Ju ', # 0x0c
'Lai ', # 0x0d
'Qi ', # 0x0e
'Min ', # 0x0f
'Kun ', # 0x10
'Kun ', # 0x11
'Zu ', # 0x12
'Gu ', # 0x13
'Cui ', # 0x14
'Ya ', # 0x15
'Ya ', # 0x16
'Gang ', # 0x17
'Lun ', # 0x18
'Lun ', # 0x19
'Leng ', # 0x1a
'Jue ', # 0x1b
'Duo ', # 0x1c
'Zheng ', # 0x1d
'Guo ', # 0x1e
'Yin ', # 0x1f
'Dong ', # 0x20
'Han ', # 0x21
'Zheng ', # 0x22
'Wei ', # 0x23
'Yao ', # 0x24
'Pi ', # 0x25
'Yan ', # 0x26
'Song ', # 0x27
'Jie ', # 0x28
'Beng ', # 0x29
'Zu ', # 0x2a
'Jue ', # 0x2b
'Dong ', # 0x2c
'Zhan ', # 0x2d
'Gu ', # 0x2e
'Yin ', # 0x2f
'[?] ', # 0x30
'Ze ', # 0x31
'Huang ', # 0x32
'Yu ', # 0x33
'Wei ', # 0x34
'Yang ', # 0x35
'Feng ', # 0x36
'Qiu ', # 0x37
'Dun ', # 0x38
'Ti ', # 0x39
'Yi ', # 0x3a
'Zhi ', # 0x3b
'Shi ', # 0x3c
'Zai ', # 0x3d
'Yao ', # 0x3e
'E ', # 0x3f
'Zhu ', # 0x40
'Kan ', # 0x41
'Lu ', # 0x42
'Yan ', # 0x43
'Mei ', # 0x44
'Gan ', # 0x45
'Ji ', # 0x46
'Ji ', # 0x47
'Huan ', # 0x48
'Ting ', # 0x49
'Sheng ', # 0x4a
'Mei ', # 0x4b
'Qian ', # 0x4c
'Wu ', # 0x4d
'Yu ', # 0x4e
'Zong ', # 0x4f
'Lan ', # 0x50
'Jue ', # 0x51
'Yan ', # 0x52
'Yan ', # 0x53
'Wei ', # 0x54
'Zong ', # 0x55
'Cha ', # 0x56
'Sui ', # 0x57
'Rong ', # 0x58
'Yamashina ', # 0x59
'Qin ', # 0x5a
'Yu ', # 0x5b
'Kewashii ', # 0x5c
'Lou ', # 0x5d
'Tu ', # 0x5e
'Dui ', # 0x5f
'Xi ', # 0x60
'Weng ', # 0x61
'Cang ', # 0x62
'Dang ', # 0x63
'Hong ', # 0x64
'Jie ', # 0x65
'Ai ', # 0x66
'Liu ', # 0x67
'Wu ', # 0x68
'Song ', # 0x69
'Qiao ', # 0x6a
'Zi ', # 0x6b
'Wei ', # 0x6c
'Beng ', # 0x6d
'Dian ', # 0x6e
'Cuo ', # 0x6f
'Qian ', # 0x70
'Yong ', # 0x71
'Nie ', # 0x72
'Cuo ', # 0x73
'Ji ', # 0x74
'[?] ', # 0x75
'Tao ', # 0x76
'Song ', # 0x77
'Zong ', # 0x78
'Jiang ', # 0x79
'Liao ', # 0x7a
'Kang ', # 0x7b
'Chan ', # 0x7c
'Die ', # 0x7d
'Cen ', # 0x7e
'Ding ', # 0x7f
'Tu ', # 0x80
'Lou ', # 0x81
'Zhang ', # 0x82
'Zhan ', # 0x83
'Zhan ', # 0x84
'Ao ', # 0x85
'Cao ', # 0x86
'Qu ', # 0x87
'Qiang ', # 0x88
'Zui ', # 0x89
'Zui ', # 0x8a
'Dao ', # 0x8b
'Dao ', # 0x8c
'Xi ', # 0x8d
'Yu ', # 0x8e
'Bo ', # 0x8f
'Long ', # 0x90
'Xiang ', # 0x91
'Ceng ', # 0x92
'Bo ', # 0x93
'Qin ', # 0x94
'Jiao ', # 0x95
'Yan ', # 0x96
'Lao ', # 0x97
'Zhan ', # 0x98
'Lin ', # 0x99
'Liao ', # 0x9a
'Liao ', # 0x9b
'Jin ', # 0x9c
'Deng ', # 0x9d
'Duo ', # 0x9e
'Zun ', # 0x9f
'Jiao ', # 0xa0
'Gui ', # 0xa1
'Yao ', # 0xa2
'Qiao ', # 0xa3
'Yao ', # 0xa4
'Jue ', # 0xa5
'Zhan ', # 0xa6
'Yi ', # 0xa7
'Xue ', # 0xa8
'Nao ', # 0xa9
'Ye ', # 0xaa
'Ye ', # 0xab
'Yi ', # 0xac
'E ', # 0xad
'Xian ', # 0xae
'Ji ', # 0xaf
'Xie ', # 0xb0
'Ke ', # 0xb1
'Xi ', # 0xb2
'Di ', # 0xb3
'Ao ', # 0xb4
'Zui ', # 0xb5
'[?] ', # 0xb6
'Ni ', # 0xb7
'Rong ', # 0xb8
'Dao ', # 0xb9
'Ling ', # 0xba
'Za ', # 0xbb
'Yu ', # 0xbc
'Yue ', # 0xbd
'Yin ', # 0xbe
'[?] ', # 0xbf
'Jie ', # 0xc0
'Li ', # 0xc1
'Sui ', # 0xc2
'Long ', # 0xc3
'Long ', # 0xc4
'Dian ', # 0xc5
'Ying ', # 0xc6
'Xi ', # 0xc7
'Ju ', # 0xc8
'Chan ', # 0xc9
'Ying ', # 0xca
'Kui ', # 0xcb
'Yan ', # 0xcc
'Wei ', # 0xcd
'Nao ', # 0xce
'Quan ', # 0xcf
'Chao ', # 0xd0
'Cuan ', # 0xd1
'Luan ', # 0xd2
'Dian ', # 0xd3
'Dian ', # 0xd4
'[?] ', # 0xd5
'Yan ', # 0xd6
'Yan ', # 0xd7
'Yan ', # 0xd8
'Nao ', # 0xd9
'Yan ', # 0xda
'Chuan ', # 0xdb
'Gui ', # 0xdc
'Chuan ', # 0xdd
'Zhou ', # 0xde
'Huang ', # 0xdf
'Jing ', # 0xe0
'Xun ', # 0xe1
'Chao ', # 0xe2
'Chao ', # 0xe3
'Lie ', # 0xe4
'Gong ', # 0xe5
'Zuo ', # 0xe6
'Qiao ', # 0xe7
'Ju ', # 0xe8
'Gong ', # 0xe9
'Kek ', # 0xea
'Wu ', # 0xeb
'Pwu ', # 0xec
'Pwu ', # 0xed
'Chai ', # 0xee
'Qiu ', # 0xef
'Qiu ', # 0xf0
'Ji ', # 0xf1
'Yi ', # 0xf2
'Si ', # 0xf3
'Ba ', # 0xf4
'Zhi ', # 0xf5
'Zhao ', # 0xf6
'Xiang ', # 0xf7
'Yi ', # 0xf8
'Jin ', # 0xf9
'Xun ', # 0xfa
'Juan ', # 0xfb
'Phas ', # 0xfc
'Xun ', # 0xfd
'Jin ', # 0xfe
'Fu ', # 0xff
)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.