code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import logging
logger = logging.getLogger(__name__)
import requests
import os
from django.core.management.base import NoArgsCommand
from apps.subscribers.models import Ticket
class Command(NoArgsCommand):
help = 'Loops through all subscribers and marks each ticket appropriately.'
def handle_noargs(self, **options):
# Prepare our request.
headers = {
'Authorization': 'OAuth %s' % os.environ.get('TWITCH_OAUTH_TOKEN'),
'Accept': 'application/vnd.twitchtv.v3+json' }
url = 'https://api.twitch.tv/kraken/channels/avalonstar/subscriptions'
# Let's not invalidate anything unnecessarily. If we hit an exception
# with the first request, then bail.
try:
r = requests.get(url, headers=headers)
except requests.exceptions.RequestException as e:
logger.exception(e)
pass
# Rather than mark active tickets as inactive, mark all tickets as
# inactive. As we loop through the Twitch API, we'll mark
Ticket.objects.invalidate_tickets()
count = r.json().get('_total') # Total number of tickets.
limit = 100 # Maximum number of tickets we can fetch at once.
while url:
# To keep our dyno-hour usage down, we have to make sure that
# requests aren't hung up. So try the request and if a `Timeout`
# is thrown, bail.
try:
response = requests.get(url, headers=headers, params={'limit': limit}, timeout=1)
except requests.exceptions.RequestException as e:
logger.exception(e)
break
data = response.json()
tickets = data['subscriptions']
# The Twitch API doesn't stop offering `next` URLs when no results
# are available. So if we don't have tickets, shut it down.
if not tickets:
break
# We have tickets. Let's check each ticket and mark if that person
# as active if their ticket still exists in Twitch's API.
for ticket in tickets:
name = ticket['user']['name']
updates = {
'display_name': ticket['user']['display_name'],
'is_active': True,
'updated': ticket['created_at'],
'twid': ticket['_id'] }
t, created = Ticket.objects.update_or_create(name=name, defaults=updates)
# Done. Grab `next` and keep looping.
url = data['_links']['next']
|
bryanveloso/avalonstar-tv
|
apps/subscribers/management/commands/updatetickets.py
|
Python
|
apache-2.0
| 2,623
|
import collections
class Solution:
def numSimilarGroups(self, A):
UF = {}
for i in range(len(A)): UF[i] = i
def find(x):
if x != UF[x]:
UF[x] = find(UF[x])
return UF[x]
def union(x, y):
UF.setdefault(x, x)
UF.setdefault(y, y)
UF[find(x)] = find(y)
def match(s1, s2):
i = 0
j = -1
while i<len(s1):
if s1[i] != s2[i]:
if j == -1: j = i
else: break
i += 1
return s1[i+1:] == s2[i+1:]
N, W = len(A), len(A[0])
if N < W*W:
for i in range(len(A)): UF[i] = i
for i in range(len(A)):
for j in range(i+1, len(A)):
if match(A[i], A[j]):
union(i, j)
else:
d = collections.defaultdict(set)
for idx, w in enumerate(A):
lw = list(w)
for i in range(W):
for j in range(i+1, W):
lw[i], lw[j] = lw[j], lw[i]
d["".join(lw)].add(idx)
lw[i], lw[j] = lw[j], lw[i]
for i, w in enumerate(A):
for j in d[w]:
union(i, j)
return len({find(x) for x in UF})
print(Solution().numSimilarGroups(["tars","rats","arts","star"]))
|
zuun77/givemegoogletshirts
|
leetcode/python/839_similar-string-groups.py
|
Python
|
apache-2.0
| 1,451
|
import abc
from OHA.helpers.converters.BaseConverter import BaseConverter
__author__ = 'indrajit'
__email__ = 'eendroroy@gmail.com'
class LengthConverter(BaseConverter):
def __init__(self, _value, _from=None, _to=None):
super(LengthConverter, self).__init__(_value, _from, _to)
def _supported_units(self):
return ['ft', 'in', 'm', 'cm']
@abc.abstractmethod
def _default_from_unit(self):
raise NotImplementedError('method not implemented')
@abc.abstractmethod
def _default_to_unit(self):
raise NotImplementedError('method not implemented')
def _convert(self):
if self._from == self._to:
return self._value
elif self._to == 'm' and self._from == 'ft':
return self._value * 3.28084
elif self._to == 'm' and self._from == 'in':
return self._value * 39.3701
elif self._to == 'cm' and self._from == 'ft':
return self._value * 0.0328084
elif self._to == 'cm' and self._from == 'in':
return self._value * 0.393701
elif self._to == 'ft' and self._from == 'm':
return self._value * 0.3048
elif self._to == 'ft' and self._from == 'cm':
return self._value * 30.48
elif self._to == 'in' and self._from == 'm':
return self._value * 0.0254
elif self._to == 'in' and self._from == 'cm':
return self._value * 2.54
else:
return None
|
openhealthalgorithms/openhealthalgorithms
|
OHA/helpers/converters/LengthConverter.py
|
Python
|
apache-2.0
| 1,483
|
#!/usr/bin/python3
import rem_backend.query_data as qd
import rem_backend.propagation_model_estimation as pm
import threading
import _thread
__author__ = "Daniel Denkovski", "Valentin Rakovic"
__copyright__ = "Copyright (c) 2017, Faculty of Electrical Engineering and Information Technologies, UKIM, Skopje, Macedonia"
__version__ = "0.1.0"
__email__ = "{danield, valentin}@feit.ukim.edu.mk"
'''
REM console module
Showcases the REM backend capabilities of the extension
Used as console interface for users to interact with the platform
'''
def main():
run = 1;
while (run):
print("Please choose from the selection:")
print("1. WiFi device localization")
print("2. Duty cycle calculation")
print("3. Path loss model estimation")
print("0. Quit")
choice = input(" >> ")
if (choice == '0'):
run = 0
elif (choice == '1'):
print("Loc:Enter the channel of interest")
chann = input(" >> ")
dev_list = qd.get_all_active_devices_on_channel(chann,1)
try:
print("Select the index of the device of interest")
ind = 1
for row in dev_list:
print("{}. {}".format(ind,row[0]))
ind += 1
devind = input(" >> ")
print(dev_list[int(devind)-1][0])
try:
location = qd.estimate_tx_location(str(dev_list[int(devind)-1][0]),10)
print("The location of devices {} is:".format(str(dev_list[int(devind)-1][0])))
print("x:{} y:{} z:{} Pt:{} dBm".format(location[0],location[1],location[2],location[3]))
except:
print("not sufficient data for modeling")
print("")
except:
print("no devices")
print("")
elif (choice == '2'):
print("DC:Enter the channel of interest")
chann = input(" >> ")
ux, ul, dx, dy = input("provide ux ul dx dl coordinates of interest: ").split(' ')
try:
val = qd.get_duty_cycle_by_area(chann,10,ux,ul,dx,dy)
dc = val[0][0]
print("Duty cycle value for channel={} is {}".format(chann,dc))
except:
print("not sufficient data for modeling")
print("")
elif (choice == '3'):
print("PL:Enter the channel of interest")
chann = input(" >> ")
try:
val = pm.get_chann_model(10,chann)
print(val)
except:
print("not sufficient data for modeling")
print("")
if __name__=="__main__":
main()
|
danieldUKIM/controllers_dockers
|
rem_console/REM_console.py
|
Python
|
apache-2.0
| 2,275
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example code for TensorFlow Wide & Deep Tutorial using TF.Learn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
import sys
import tensorflow as tf
_CSV_COLUMNS = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket'
]
_CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],
[0], [0], [0], [''], ['']]
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir', type=str, default='/tmp/census_model',
help='Base directory for the model.')
parser.add_argument(
'--model_type', type=str, default='wide_deep',
help="Valid model types: {'wide', 'deep', 'wide_deep'}.")
parser.add_argument(
'--train_epochs', type=int, default=20, help='Number of training epochs.')
parser.add_argument(
'--epochs_per_eval', type=int, default=2,
help='The number of training epochs to run between evaluations.')
parser.add_argument(
'--batch_size', type=int, default=40, help='Number of examples per batch.')
parser.add_argument(
'--train_data', type=str, default='/tmp/census_data/adult.data',
help='Path to the training data.')
parser.add_argument(
'--test_data', type=str, default='/tmp/census_data/adult.test',
help='Path to the test data.')
def build_model_columns():
"""Builds a set of wide and deep feature columns."""
# Continuous columns
age = tf.feature_column.numeric_column('age')
education_num = tf.feature_column.numeric_column('education_num')
capital_gain = tf.feature_column.numeric_column('capital_gain')
capital_loss = tf.feature_column.numeric_column('capital_loss')
hours_per_week = tf.feature_column.numeric_column('hours_per_week')
education = tf.feature_column.categorical_column_with_vocabulary_list(
'education', [
'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',
'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',
'5th-6th', '10th', '1st-4th', 'Preschool', '12th'])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
'marital_status', [
'Married-civ-spouse', 'Divorced', 'Married-spouse-absent',
'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed'])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
'relationship', [
'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
'Other-relative'])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
'workclass', [
'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov',
'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked'])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=1000)
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
education, marital_status, relationship, workclass, occupation,
age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
['education', 'occupation'], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, 'education', 'occupation'], hash_bucket_size=1000),
]
wide_columns = base_columns + crossed_columns
deep_columns = [
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(marital_status),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(occupation, dimension=8),
]
return wide_columns, deep_columns
def build_estimator(model_dir, model_type):
"""Build an estimator appropriate for the given model type."""
wide_columns, deep_columns = build_model_columns()
hidden_units = [100, 75, 50, 25]
# Create a tf.estimator.RunConfig to ensure the model is run on CPU, which
# trains faster than GPU for this model.
run_config = tf.estimator.RunConfig().replace(
session_config=tf.ConfigProto(device_count={'GPU': 0}))
if model_type == 'wide':
return tf.estimator.LinearClassifier(
model_dir=model_dir,
feature_columns=wide_columns,
config=run_config)
elif model_type == 'deep':
return tf.estimator.DNNClassifier(
model_dir=model_dir,
feature_columns=deep_columns,
hidden_units=hidden_units,
config=run_config)
else:
return tf.estimator.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=hidden_units,
config=run_config)
def input_fn(data_file, num_epochs, shuffle, batch_size):
"""Generate an input function for the Estimator."""
assert tf.gfile.Exists(data_file), (
'%s not found. Please make sure you have either run data_download.py or '
'set both arguments --train_data and --test_data.' % data_file)
def parse_csv(value):
print('Parsing', data_file)
columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)
features = dict(zip(_CSV_COLUMNS, columns))
labels = features.pop('income_bracket')
return features, tf.equal(labels, '>50K')
# Extract lines from input files using the Dataset API.
dataset = tf.contrib.data.TextLineDataset(data_file)
dataset = dataset.map(parse_csv, num_threads=5)
# Apply transformations to the Dataset
dataset = dataset.batch(batch_size)
dataset = dataset.repeat(num_epochs)
# Input function that is called by the Estimator
def _input_fn():
if shuffle:
# Apply shuffle transformation to re-shuffle the dataset in each call.
shuffled_dataset = dataset.shuffle(buffer_size=100000)
iterator = shuffled_dataset.make_one_shot_iterator()
else:
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
return _input_fn
def main(unused_argv):
# Clean up the model directory if present
shutil.rmtree(FLAGS.model_dir, ignore_errors=True)
model = build_estimator(FLAGS.model_dir, FLAGS.model_type)
# Set up input function generators for the train and test data files.
train_input_fn = input_fn(
data_file=FLAGS.train_data,
num_epochs=FLAGS.epochs_per_eval,
shuffle=True,
batch_size=FLAGS.batch_size)
eval_input_fn = input_fn(
data_file=FLAGS.test_data,
num_epochs=1,
shuffle=False,
batch_size=FLAGS.batch_size)
# Train and evaluate the model every `FLAGS.epochs_per_eval` epochs.
for n in range(FLAGS.train_epochs // FLAGS.epochs_per_eval):
model.train(input_fn=train_input_fn)
results = model.evaluate(input_fn=eval_input_fn)
# Display evaluation metrics
print('Results at epoch', (n + 1) * FLAGS.epochs_per_eval)
print('-' * 30)
for key in sorted(results):
print('%s: %s' % (key, results[key]))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
zlpmichelle/crackingtensorflow
|
wide_deep/wide_deep.py
|
Python
|
apache-2.0
| 8,315
|
from dronekit import connect
import time
import argparse
import pprint
import rospy
import copy
from gf_beacon.srv import *
import hostapd
import beaconencoder
import findap
class sample_beacon_data:
def __init__(self, connect_string):
print "init"
self.missing_AP = False
try:
AP_interface = findap.findap()
self.myap = hostapd.Hostapd(AP_interface)
except:
print "AP missing will just print enceded beacon"
self.missing_AP = True
self.encode_beacon = rospy.ServiceProxy('gf_beacon_encoding', gf_encoding)
self.beacon_dict = {}
self.previous_beacon_dict = {}
vehicle = connect(connect_string, wait_ready=True)
vehicle.add_attribute_listener('location.global_relative_frame',
self.location_callback)
# vehicle.add_attribute_listener('location.local_frame',
# self.location_callback)
vehicle.add_attribute_listener('groundspeed', self.location_callback)
vehicle.add_attribute_listener('battery', self.location_callback)
vehicle.add_attribute_listener('heading', self.location_callback)
vehicle.add_attribute_listener('vehicle.mode.name',
self.location_callback)
def location_callback(self, vehicle, attr_name, value):
if vehicle.mode == "GUIDED":
self.beacon_dict["flying_state_on"] = True
self.beacon_dict["return_to_home_state_on"] = False
self.beacon_dict["forced_landing_state_on"] = False
elif vehicle.mode == "RTL":
self.beacon_dict["flying_state_on"] = False
self.beacon_dict["return_to_home_state_on"] = True
self.beacon_dict["forced_landing_state_on"] = False
elif vehicle.mode == "LAND":
self.beacon_dict["flying_state_on"] = False
self.beacon_dict["return_to_home_state_on"] = False
self.beacon_dict["forced_landing_state_on"] = True
else:
self.beacon_dict["flying_state_on"] = False
self.beacon_dict["return_to_home_state_on"] = False
self.beacon_dict["forced_landing_state_on"] = False
if attr_name == "groundspeed":
self.beacon_dict["speed_mph"] = float(value * 1.94384)
if attr_name == "heading":
self.beacon_dict["heading_deg"] = float(value)
elif attr_name == "battery":
vardict = vars(value)
mylevel = float(vardict["level"])
if mylevel < 0:
mylevel = 0
if mylevel > 100:
mylevel = 100
self.beacon_dict["battery_level"] = mylevel
elif attr_name == "location.global_relative_frame":
vardict = vars(value)
self.beacon_dict["lng_deg"] = float(vardict["lon"])
self.beacon_dict["lat_deg"] = float(vardict["lat"])
self.beacon_dict["alt_agl_m"] = float(vardict["alt"])
else:
print attr_name, " : ", value
#self.beacon_dict["heading_deg"] = 90.0
# pprint.pprint(self.beacon_dict)
# rosservice call gf_beacon_encoding 34.1 35.1 200.5 10.1 241.1 10.1 true false true
#lng_deg lat_deg alt_agl_m speed_mph heading_deg battery_level flying_state_on return_to_home_state_on forced_landing_state_on
value_list = ["lng_deg", "lat_deg", "alt_agl_m", "speed_mph",
"heading_deg", "battery_level", "flying_state_on",
"return_to_home_state_on", "forced_landing_state_on"]
if all(field in self.beacon_dict for field in value_list ):
if self.beacon_dict != self.previous_beacon_dict:
encoded = self.encode_beacon(
self.beacon_dict["lng_deg"],
self.beacon_dict["lat_deg"],
self.beacon_dict["alt_agl_m"],
self.beacon_dict["speed_mph"],
self.beacon_dict["heading_deg"],
self.beacon_dict["battery_level"],
self.beacon_dict["flying_state_on"],
self.beacon_dict["return_to_home_state_on"],
self.beacon_dict["forced_landing_state_on"]
)
self.previous_beacon_dict = copy.deepcopy(self.beacon_dict)
if self.missing_AP:
print "encoded_beacon=", encoded.encoded
else:
beaconstring = beaconencoder.create_beacon_hex(0, encoded.encoded)
pprint.pprint(encoded.encoded)
self.myap.set('vendor_elements', beaconstring)
pprint.pprint(self.myap.request("UPDATE_BEACON"))
# print "beaconstring=", beaconstring
# print "econded_beacon=", encoded.encoded
def go(self):
while 1 == 1:
time.sleep(2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--connect_string",
default="udp:127.0.0.1:14550")
args = parser.parse_args()
sampler=sample_beacon_data(args.connect_string)
sampler.go()
if __name__ == "__main__":
main()
|
geofrenzy/utm-mbsb
|
beacon/dkbeacon.py
|
Python
|
apache-2.0
| 5,335
|
#Thanks for the approach https://github.com/ML-Person/My-solution-to-Avito-Challenge-2018 (@nikita)
import pandas as pd
import numpy as np
import gc
import os
import re
import pickle
import string
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from scipy.sparse import hstack, csr_matrix
import lightgbm as lgb
# for text data
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
pd.set_option('max_columns', 84)
import warnings
warnings.filterwarnings('ignore')
PATH_TO_DATA = '/Avito'
traintrain == pdpd..read_csvread_cs (os.path.join(PATH_TO_DATA, 'train.csv'))
test = pd.read_csv(os.path.join(PATH_TO_DATA, 'test.csv'))
'''
item_id - Ad id.
user_id - User id.
region - Ad region.
city - Ad city.
parent_category_name - Top level ad category as classified by Avito's ad model.
category_name - Fine grain ad category as classified by Avito's ad model.
param_1 - Optional parameter from Avito's ad model.
param_2 - Optional parameter from Avito's ad model.
param_3 - Optional parameter from Avito's ad model.
title - Ad title.
description - Ad description.
price - Ad price.
item_seq_number - Ad sequential number for user.
activation_date - Date ad was placed.
user_type - User type.
image - Id code of image. Ties to a jpg file in train_jpg. Not every ad has an image.
image_top_1 - Avito's classification code for the image.
deal_probability - The target variable. This is the likelihood that an ad actually sold something. It's not possible to verify every transaction with certainty, so this column's value can be any float from zero to one.
'''
categorical = [
'image_top_1', 'param_1', 'param_2', 'param_3',
'city', 'region', 'category_name', 'parent_category_name', 'user_type'
]
# easy preprocessing
text_cols = [
'title', 'description', 'param_1', 'param_2', 'param_3',
'city', 'region', 'category_name', 'parent_category_name'
]
for col in text_cols:
for df in [train, test]:
df[col] = df[col].str.replace(r"[^А-Яа-яA-Za-z0-9,!?@\'\`\"\_\n]", ' ')
df[col].fillna("NA", inplace=True)
df[col] = df[col].str.lower()
forfor dfdf inin [[traintrain,, testtest]:]:
dfdf[['len_description''len_de ] = df['description'].apply(lambda x: len(str(x)))
df['num_desc_punct'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation])) / df['len_description']
for col in ['description', 'title']:
df['num_words_' + col] = df[col].apply(lambda comment: len(comment.split()))
df['num_unique_words_' + col] = df[col].apply(lambda comment: len(set(w for w in comment.split())))
# percentage of unique words
df['words_vs_unique_title'] = df['num_unique_words_title'] / df['num_words_title'] * 100
df['words_vs_unique_description'] = df['num_unique_words_description'] / df['num_words_description'] * 100
# [DUMP] TRAIN + TEST# [DUMP]
train.to_csv(os.path.join(PATH_TO_DATA, 'train_all_features.csv'), index=False, encoding='utf-8')
test.to_csv(os.path.join(PATH_TO_DATA, 'test_all_features.csv'), index=False, encoding='utf-8')
del train, test
gc.collect()
train = pd.read_csv(os.path.join(PATH_TO_DATA, 'train.csv'))
test = pd.read_csv(os.path.join(PATH_TO_DATA, 'test.csv
stemmer = SnowballStemmer("russian", ignore_stopwords=False)
train['title_stemm'] = train['title'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
test['title_stemm'] = test['title'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
train['description_stemm'] = train['description'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
test['description_stemm'] = test['description'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
train['text'] = train['param_1'] + " " + train['param_2'] + " " + train['param_3'] + " " + \
train['city'] + " " + train['category_name'] + " " + train['parent_category_name']
test['text'] = test['param_1'] + " " + test['param_2'] + " " + test['param_3'] + " " + \
test['city'] + " " + test['category_name'] + " " + test['parent_category_name']
train['text_stemm'] = train['text'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
test['text_stemm'] = test['text'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
for df in [train, test]:
df.drop(['title', 'description', 'text'], axis=1, inplace=True)
#TF-IDF + SVD
# CountVectorizer for 'title'
title_tfidf = CountVectorizer(stop_words=stopwords.words('russian'), lowercase=True,
token_pattern=r'\w{1,}', ngram_range=(1, 1))
full_tfidf = title_tfidf.fit_transform(train['title_stemm'].values.tolist() + test['title_stemm'].values.tolist())
train_title_tfidf = title_tfidf.transform(train['title_stemm'].values.tolist())
test_title_tfidf = title_tfidf.transform(test['title_stemm'].values.tolist())
### SVD Components ###
n_comp = 10
svd_obj = TruncatedSVD(n_components=n_comp, algorithm='arpack')
svd_obj.fit(full_tfidf)
train_svd = pd.DataFrame(svd_obj.transform(train_title_tfidf))
test_svd = pd.DataFrame(svd_obj.transform(test_title_tfidf))
train_svd.columns = ['svd_title_'+str(i+1) for i in range(n_comp)]
test_svd.columns = ['svd_title_'+str(i+1) for i in range(n_comp)]
train_svd['item_id'] = train['item_id']
test_svd['item_id'] = test['item_id']
# Merge and delete
train = train.merge(train_svd, on='item_id', how='left')
test = test.merge(test_svd, on='item_id', how='left')
del full_tfidf, train_svd, test_svd
gc.collect()
# TF-IDF for 'description'
desc_tfidf = TfidfVectorizer(stop_words=stopwords.words('russian'), token_pattern=r'\w{1,}',
lowercase=True, ngram_range=(1, 2), norm='l2', smooth_idf=False,
max_features=17000)
full_tfidf = desc_tfidf.fit_transform(train['description_stemm'].values.tolist() + test['description_stemm'].values.tolist())
train_desc_tfidf = desc_tfidf.transform(train['description_stemm'].values.tolist())
test_desc_tfidf = desc_tfidf.transform(test['description_stemm'].values.tolist())
### SVD Components ###
n_comp = 10
svd_obj = TruncatedSVD(n_components=n_comp, algorithm='arpack')
svd_obj.fit(full_tfidf)
train_svd = pd.DataFrame(svd_obj.transform(train_desc_tfidf))
test_svd = pd.DataFrame(svd_obj.transform(test_desc_tfidf))
train_svd.columns = ['svd_description_'+str(i+1) for i in range(n_comp)]
test_svd.columns = ['svd_description_'+str(i+1) for i in range(n_comp)]
train_svd['item_id'] = train['item_id']
test_svd['item_id'] = test['item_id']
# Merge and delete
train = train.merge(train_svd, on='item_id', how='left')
test = test.merge(test_svd, on='item_id', how='left')
del full_tfidf, train_svd, test_svd
gc.collect()
# [STACKING]# [STACK
train_tfidf = csr_matrix(hstack([train_title_tfidf, train_desc_tfidf, train_text_tfidf]))
test_tfidf = csr_matrix(hstack([test_title_tfidf, test_desc_tfidf, test_text_tfidf]))
del train_title_tfidf, train_desc_tfidf, train_text_tfidf
del test_title_tfidf, test_desc_tfidf, test_text_tfidf
gc.collect()
vocab = np.hstack([
title_tfidf.get_feature_names(),
desc_tfidf.get_feature_names(),
text_tfidf.get_feature_names()
])
[DUMP] TF-IDF pickle files + vocabulary
with open(os.path.join(PATH_TO_DATA, 'train_tfidf.pkl'), 'wb') as train_tfidf_pkl:
pickle.dump(train_tfidf, train_tfidf_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'test_tfidf.pkl'), 'wb') as test_tfidf_pkl:
pickle.dump(test_tfidf, test_tfidf_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'vocab.pkl'), 'wb') as vocab_pkl:
pickle.dump(vocab, vocab_pkl, protocol=2)
del train, train_tfidf, test, test_tfidf, vocab
gc.collect()
|
Diyago/Machine-Learning-scripts
|
DEEP LEARNING/Kaggle Avito Demand Prediction Challenge/stem to SVD.py
|
Python
|
apache-2.0
| 8,068
|
# Jinja2 filters for user creation help
# Needed for chage
def daysSinceEpoc( _unused=0 ):
import datetime
return (datetime.datetime.utcnow() - datetime.datetime(1970,1,1)).days
# Boilerplate code to add filter to Jinja2
class FilterModule(object):
def filters(self):
return { 'daysSinceEpoc': daysSinceEpoc,
}
|
jmalacho/ansible-examples
|
filter_plugins/users.py
|
Python
|
apache-2.0
| 344
|
import logging
import numpy as np
from ray.tune.automl.search_policy import AutoMLSearcher
logger = logging.getLogger(__name__)
LOGGING_PREFIX = "[GENETIC SEARCH] "
class GeneticSearch(AutoMLSearcher):
"""Implement the genetic search.
Keep a collection of top-K parameter permutations as base genes,
then apply selection, crossover, and mutation to them to generate
new genes (a.k.a new generation). Hopefully, the performance of
the top population would increase generation by generation.
"""
def __init__(self,
search_space,
reward_attr,
max_generation=2,
population_size=10,
population_decay=0.95,
keep_top_ratio=0.2,
selection_bound=0.4,
crossover_bound=0.4):
"""
Initialize GeneticSearcher.
Args:
search_space (SearchSpace): The space to search.
reward_attr: The attribute name of the reward in the result.
max_generation: Max iteration number of genetic search.
population_size: Number of trials of the initial generation.
population_decay: Decay ratio of population size for the
next generation.
keep_top_ratio: Ratio of the top performance population.
selection_bound: Threshold for performing selection.
crossover_bound: Threshold for performing crossover.
"""
super(GeneticSearch, self).__init__(search_space, reward_attr)
self._cur_generation = 1
self._max_generation = max_generation
self._population_size = population_size
self._population_decay = population_decay
self._keep_top_ratio = keep_top_ratio
self._selection_bound = selection_bound
self._crossover_bound = crossover_bound
self._cur_config_list = []
self._cur_encoding_list = []
for _ in range(population_size):
one_hot = self.search_space.generate_random_one_hot_encoding()
self._cur_encoding_list.append(one_hot)
self._cur_config_list.append(
self.search_space.apply_one_hot_encoding(one_hot))
def _select(self):
population_size = len(self._cur_config_list)
logger.info(
LOGGING_PREFIX + "Generate the %sth generation, population=%s",
self._cur_generation, population_size)
return self._cur_config_list, self._cur_encoding_list
def _feedback(self, trials):
self._cur_generation += 1
if self._cur_generation > self._max_generation:
return AutoMLSearcher.TERMINATE
sorted_trials = sorted(
trials,
key=lambda t: t.best_result[self.reward_attr],
reverse=True)
self._cur_encoding_list = self._next_generation(sorted_trials)
self._cur_config_list = []
for one_hot in self._cur_encoding_list:
self._cur_config_list.append(
self.search_space.apply_one_hot_encoding(one_hot))
return AutoMLSearcher.CONTINUE
def _next_generation(self, sorted_trials):
"""Generate genes (encodings) for the next generation.
Use the top K (_keep_top_ratio) trials of the last generation
as candidates to generate the next generation. The action could
be selection, crossover and mutation according corresponding
ratio (_selection_bound, _crossover_bound).
Args:
sorted_trials: List of finished trials with top
performance ones first.
Returns:
A list of new genes (encodings)
"""
candidate = []
next_generation = []
num_population = self._next_population_size(len(sorted_trials))
top_num = int(max(num_population * self._keep_top_ratio, 2))
for i in range(top_num):
candidate.append(sorted_trials[i].extra_arg)
next_generation.append(sorted_trials[i].extra_arg)
for i in range(top_num, num_population):
flip_coin = np.random.uniform()
if flip_coin < self._selection_bound:
next_generation.append(GeneticSearch._selection(candidate))
else:
if flip_coin < self._selection_bound + self._crossover_bound:
next_generation.append(GeneticSearch._crossover(candidate))
else:
next_generation.append(GeneticSearch._mutation(candidate))
return next_generation
def _next_population_size(self, last_population_size):
"""Calculate the population size of the next generation.
Intuitively, the population should decay after each iteration since
it should converge. It can also decrease the total resource required.
Args:
last_population_size: The last population size.
Returns:
The new population size.
"""
# TODO: implement an generic resource allocate algorithm.
return int(max(last_population_size * self._population_decay, 3))
@staticmethod
def _selection(candidate):
"""Perform selection action to candidates.
For example, new gene = sample_1 + the 5th bit of sample2.
Args:
candidate: List of candidate genes (encodings).
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])
>>> new_gene = _selection([gene1, gene2])
>>> # new_gene could be gene1 overwritten with the
>>> # 2nd parameter of gene2
>>> # in which case:
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene2[1]
>>> # new_gene[2] = gene1[0]
Returns:
New gene (encoding)
"""
sample_index1 = np.random.choice(len(candidate))
sample_index2 = np.random.choice(len(candidate))
sample_1 = candidate[sample_index1]
sample_2 = candidate[sample_index2]
select_index = np.random.choice(len(sample_1))
logger.info(
LOGGING_PREFIX + "Perform selection from %sth to %sth at index=%s",
sample_index2, sample_index1, select_index)
next_gen = []
for i in range(len(sample_1)):
if i is select_index:
next_gen.append(sample_2[i])
else:
next_gen.append(sample_1[i])
return next_gen
@staticmethod
def _crossover(candidate):
"""Perform crossover action to candidates.
For example, new gene = 60% sample_1 + 40% sample_2.
Args:
candidate: List of candidate genes (encodings).
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])
>>> new_gene = _crossover([gene1, gene2])
>>> # new_gene could be the first [n=1] parameters of
>>> # gene1 + the rest of gene2
>>> # in which case:
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene2[1]
>>> # new_gene[2] = gene1[1]
Returns:
New gene (encoding)
"""
sample_index1 = np.random.choice(len(candidate))
sample_index2 = np.random.choice(len(candidate))
sample_1 = candidate[sample_index1]
sample_2 = candidate[sample_index2]
cross_index = int(len(sample_1) * np.random.uniform(low=0.3, high=0.7))
logger.info(
LOGGING_PREFIX +
"Perform crossover between %sth and %sth at index=%s",
sample_index1, sample_index2, cross_index)
next_gen = []
for i in range(len(sample_1)):
if i > cross_index:
next_gen.append(sample_2[i])
else:
next_gen.append(sample_1[i])
return next_gen
@staticmethod
def _mutation(candidate, rate=0.1):
"""Perform mutation action to candidates.
For example, randomly change 10% of original sample
Args:
candidate: List of candidate genes (encodings).
rate: Percentage of mutation bits
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> new_gene = _mutation([gene1])
>>> # new_gene could be the gene1 with the 3rd parameter changed
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene1[1]
>>> # new_gene[2] = [0, 1] != gene1[2]
Returns:
New gene (encoding)
"""
sample_index = np.random.choice(len(candidate))
sample = candidate[sample_index]
idx_list = []
for i in range(int(max(len(sample) * rate, 1))):
idx = np.random.choice(len(sample))
idx_list.append(idx)
field = sample[idx] # one-hot encoding
field[np.argmax(field)] = 0
bit = np.random.choice(field.shape[0])
field[bit] = 1
logger.info(LOGGING_PREFIX + "Perform mutation on %sth at index=%s",
sample_index, str(idx_list))
return sample
|
stephanie-wang/ray
|
python/ray/tune/automl/genetic_searcher.py
|
Python
|
apache-2.0
| 9,413
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from abc import ABCMeta, abstractmethod
from pathlib import PurePath
from textwrap import dedent
from typing import List, Tuple, Type
from unittest.mock import Mock
import pytest
from pants.base.exceptions import ResolveError
from pants.build_graph.address import Address
from pants.engine.fs import (
EMPTY_DIRECTORY_DIGEST,
Digest,
FileContent,
InputFilesContent,
Workspace,
)
from pants.engine.interactive_runner import InteractiveProcessRequest, InteractiveRunner
from pants.engine.legacy.graph import HydratedTargetsWithOrigins, HydratedTargetWithOrigin
from pants.engine.legacy.structs import TargetAdaptorWithOrigin
from pants.engine.rules import UnionMembership
from pants.rules.core.fmt_test import FmtTest
from pants.rules.core.test import (
AddressAndTestResult,
CoverageDataBatch,
CoverageReport,
FilesystemCoverageReport,
Status,
Test,
TestDebugRequest,
TestResult,
TestRunner,
WrappedTestRunner,
run_tests,
)
from pants.testutil.engine.util import MockConsole, MockGet, run_rule
from pants.testutil.test_base import TestBase
from pants.util.ordered_set import OrderedSet
# TODO(#9141): replace this with a proper util to create `GoalSubsystem`s
class MockOptions:
def __init__(self, **values):
self.values = Mock(**values)
class MockTestRunner(TestRunner, metaclass=ABCMeta):
@staticmethod
def is_valid_target(_: TargetAdaptorWithOrigin) -> bool:
return True
@staticmethod
@abstractmethod
def status(_: Address) -> Status:
pass
@staticmethod
def stdout(_: Address) -> str:
return ""
@staticmethod
def stderr(_: Address) -> str:
return ""
@property
def test_result(self) -> TestResult:
address = self.adaptor_with_origin.adaptor.address
return TestResult(self.status(address), self.stdout(address), self.stderr(address))
class SuccessfulTestRunner(MockTestRunner):
@staticmethod
def status(_: Address) -> Status:
return Status.SUCCESS
@staticmethod
def stdout(address: Address) -> str:
return f"Successful test runner: Passed for {address}!"
class ConditionallySucceedsTestRunner(MockTestRunner):
@staticmethod
def status(address: Address) -> Status:
return Status.FAILURE if address.target_name == "bad" else Status.SUCCESS
@staticmethod
def stdout(address: Address) -> str:
return (
f"Conditionally succeeds test runner: Passed for {address}!"
if address.target_name != "bad"
else ""
)
@staticmethod
def stderr(address: Address) -> str:
return (
f"Conditionally succeeds test runner: Had an issue for {address}! Oh no!"
if address.target_name == "bad"
else ""
)
class InvalidTargetTestRunner(MockTestRunner):
@staticmethod
def is_valid_target(_: TargetAdaptorWithOrigin) -> bool:
return False
@staticmethod
def status(_: Address) -> Status:
return Status.FAILURE
class TestTest(TestBase):
def make_ipr(self) -> InteractiveProcessRequest:
input_files_content = InputFilesContent(
(FileContent(path="program.py", content=b"def test(): pass"),)
)
digest = self.request_single_product(Digest, input_files_content)
return InteractiveProcessRequest(
argv=("/usr/bin/python", "program.py",), run_in_workspace=False, input_files=digest,
)
def run_test_rule(
self,
*,
test_runner: Type[TestRunner],
targets: List[HydratedTargetWithOrigin],
debug: bool = False,
) -> Tuple[int, str]:
console = MockConsole(use_colors=False)
options = MockOptions(debug=debug, run_coverage=False)
interactive_runner = InteractiveRunner(self.scheduler)
workspace = Workspace(self.scheduler)
union_membership = UnionMembership({TestRunner: OrderedSet([test_runner])})
def mock_coordinator_of_tests(
wrapped_test_runner: WrappedTestRunner,
) -> AddressAndTestResult:
runner = wrapped_test_runner.runner
return AddressAndTestResult(
address=runner.adaptor_with_origin.adaptor.address,
test_result=runner.test_result, # type: ignore[attr-defined]
)
result: Test = run_rule(
run_tests,
rule_args=[
console,
options,
interactive_runner,
HydratedTargetsWithOrigins(targets),
workspace,
union_membership,
],
mock_gets=[
MockGet(
product_type=AddressAndTestResult,
subject_type=WrappedTestRunner,
mock=lambda wrapped_test_runner: mock_coordinator_of_tests(wrapped_test_runner),
),
MockGet(
product_type=TestDebugRequest,
subject_type=TestRunner,
mock=lambda _: TestDebugRequest(self.make_ipr()),
),
MockGet(
product_type=CoverageReport,
subject_type=CoverageDataBatch,
mock=lambda _: FilesystemCoverageReport(
result_digest=EMPTY_DIRECTORY_DIGEST,
directory_to_materialize_to=PurePath("mockety/mock"),
),
),
],
union_membership=union_membership,
)
return result.exit_code, console.stdout.getvalue()
def test_empty_target_noops(self) -> None:
exit_code, stdout = self.run_test_rule(
test_runner=SuccessfulTestRunner,
targets=[FmtTest.make_hydrated_target_with_origin(include_sources=False)],
)
assert exit_code == 0
assert stdout.strip() == ""
def test_invalid_target_noops(self) -> None:
exit_code, stdout = self.run_test_rule(
test_runner=InvalidTargetTestRunner,
targets=[FmtTest.make_hydrated_target_with_origin()],
)
assert exit_code == 0
assert stdout.strip() == ""
def test_single_target(self) -> None:
target_with_origin = FmtTest.make_hydrated_target_with_origin()
address = target_with_origin.target.adaptor.address
exit_code, stdout = self.run_test_rule(
test_runner=SuccessfulTestRunner, targets=[target_with_origin],
)
assert exit_code == 0
assert stdout == dedent(
f"""\
{address} stdout:
{SuccessfulTestRunner.stdout(address)}
{address} ..... SUCCESS
"""
)
def test_multiple_targets(self) -> None:
good_target = FmtTest.make_hydrated_target_with_origin(name="good")
good_address = good_target.target.adaptor.address
bad_target = FmtTest.make_hydrated_target_with_origin(name="bad")
bad_address = bad_target.target.adaptor.address
exit_code, stdout = self.run_test_rule(
test_runner=ConditionallySucceedsTestRunner, targets=[good_target, bad_target],
)
assert exit_code == 1
assert stdout == dedent(
f"""\
{good_address} stdout:
{ConditionallySucceedsTestRunner.stdout(good_address)}
{bad_address} stderr:
{ConditionallySucceedsTestRunner.stderr(bad_address)}
{good_address} ..... SUCCESS
{bad_address} ..... FAILURE
"""
)
def test_single_debug_target(self) -> None:
exit_code, stdout = self.run_test_rule(
test_runner=SuccessfulTestRunner,
targets=[FmtTest.make_hydrated_target_with_origin()],
debug=True,
)
assert exit_code == 0
def test_multiple_debug_targets_fail(self) -> None:
with pytest.raises(ResolveError):
self.run_test_rule(
test_runner=SuccessfulTestRunner,
targets=[
FmtTest.make_hydrated_target_with_origin(name="t1"),
FmtTest.make_hydrated_target_with_origin(name="t2"),
],
debug=True,
)
|
wisechengyi/pants
|
src/python/pants/rules/core/test_test.py
|
Python
|
apache-2.0
| 8,678
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from matplotlib.transforms import Bbox
from scipy import stats
x = np.linspace(0, 1, 200)
pdfx = stats.beta(2, 5).pdf(x)
path = Path(np.array([x, pdfx]).transpose())
patch = PathPatch(path, facecolor="none", alpha=0)
plt.gca().add_patch(patch)
cmap = matplotlib.colors.LinearSegmentedColormap.from_list("", ["#00bfbf", "#00bfbf", "#126a8a"])
im = plt.imshow(
np.array([[1, 0, 0], [1, 1, 0]]),
cmap=cmap,
interpolation="bicubic",
origin="lower",
extent=[0, 1, 0.0, 5],
aspect="auto",
clip_path=patch,
clip_on=True,
)
plt.axis("off")
plt.ylim(0, 5.5)
plt.xlim(0, 0.9)
bbox = Bbox([[0.75, 0.5], [5.4, 2.2]])
# plt.savefig('logo_00.png', dpi=300, bbox_inches=bbox, transparent=True)
plt.text(
x=0.04,
y=-0.01,
s="ArviZ",
clip_on=True,
fontdict={"name": "ubuntu mono", "fontsize": 62},
color="w",
)
plt.savefig("ArviZ.png", dpi=300, bbox_inches=bbox, transparent=True)
plt.savefig("ArviZ.pdf", dpi=300, bbox_inches=bbox, transparent=True)
plt.savefig("ArviZ.svg", dpi=300, bbox_inches=bbox, transparent=True)
plt.savefig("ArviZ.jpg", dpi=300, bbox_inches=bbox, transparent=True)
|
arviz-devs/arviz
|
doc/logo/generate_logo.py
|
Python
|
apache-2.0
| 1,282
|
import temperanotes
import pytest, bisect
@pytest.fixture
def idiot_temp():
temp = [1, 1.05, 1.1, 1.15, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9] # not a temperament, just a set of numbers for testing
assert len(temp) == 12 # need 12 notes for the chromatic scale
return temp
def test_note_names():
exclude = ['B#', 'Cb', 'E#', 'Fb']
assert len(temperanotes.note_names_sharp) == 12
assert len(temperanotes.note_names_flat) == 12
for note in "ABCDEFG":
assert note in temperanotes.note_names_sharp
assert note in temperanotes.note_names_flat
note_accidental = note + "#"
if not note_accidental in exclude:
assert note_accidental in temperanotes.note_names_sharp
note_accidental = note + "b"
if not note_accidental in exclude:
assert note_accidental in temperanotes.note_names_flat
def test_get_key_index():
assert temperanotes.get_key_index('A') == 0
assert temperanotes.get_key_index('C') == 3
assert temperanotes.get_key_index('F') == 8
assert temperanotes.get_key_index('F#') == 9
assert temperanotes.get_key_index('G#') == 11
assert temperanotes.get_key_index('Ab') == 11
def test_normal_octave_in_C(idiot_temp):
# when starting from C,
# A is the 10th semitone of the chromatic scale, i.e. idiot_temp[9]
expected_freq = [440.0 / idiot_temp[9] * i for i in idiot_temp]
actual_freq = temperanotes.frequencies(temperament = idiot_temp, notes_low = 0, notes_high = 12, key = 'C', base_freq = 440.0, key_freq = 'A')
assert actual_freq == expected_freq
def test_normal_octave(idiot_temp):
expected_freq = [440.0 * i for i in idiot_temp]
actual_freq = temperanotes.frequencies(temperament = idiot_temp, notes_low = 0, notes_high = 12, key = 'A', base_freq = 440.0, key_freq = 'A')
assert actual_freq == expected_freq
def test_lower_octave(idiot_temp):
expected_freq = [440.0 / 2 * i for i in idiot_temp]
actual_freq = temperanotes.frequencies(temperament = idiot_temp, notes_low = 12, notes_high = 0, key = 'A', base_freq = 440.0, key_freq = 'A')
assert actual_freq == expected_freq
def test_one_octave_and_one_note(idiot_temp):
expected_freq = [440.0 * i for i in idiot_temp] + [440.0 * 2]
assert len(expected_freq) == 13 # obvious, but making sure no simply bugs in test itself
actual_freq = temperanotes.frequencies(temperament = idiot_temp, notes_low = 0, notes_high = 13, key = 'A', base_freq = 440.0, key_freq = 'A')
assert actual_freq == expected_freq
def test_one_octave_and_one_note_per_direction(idiot_temp):
expected_freq_lo = [440.0 / 2 * i for i in idiot_temp]
expected_freq_hi = [440.0 * i for i in idiot_temp]
expected_freq = [440.0 / 4 * idiot_temp[-1]] + expected_freq_lo + expected_freq_hi + [440.0 * 2]
assert len(expected_freq) == 24 + 2 # obvious, but making sure no simply bugs in test itself
actual_freq = temperanotes.frequencies(temperament = idiot_temp, notes_low = 13, notes_high = 13, key = 'A', base_freq = 440.0, key_freq = 'A')
assert actual_freq == expected_freq
def test_one_octave_and_half_per_direction(idiot_temp):
expected_freq_lolo = [440.0 / 4 * i for i in idiot_temp]
expected_freq_lo = [440.0 / 2 * i for i in idiot_temp]
expected_freq_hi = [440.0 * i for i in idiot_temp]
expected_freq_hihi = [440.0 * 2 * i for i in idiot_temp]
expected_freq = expected_freq_lolo[6:] + expected_freq_lo + expected_freq_hi + expected_freq_hihi[:6]
assert len(expected_freq) == 48 - 12 # obvious, but making sure no simply bugs in test itself
actual_freq = temperanotes.frequencies(temperament = idiot_temp, notes_low = 18, notes_high = 18, key = 'A', base_freq = 440.0, key_freq = 'A')
assert actual_freq == expected_freq
def test_two_octaves(idiot_temp):
expected_freq_lo = [440.0 / 2 * i for i in idiot_temp]
expected_freq_hi = [440.0 * i for i in idiot_temp]
expected_freq = expected_freq_lo + expected_freq_hi
assert len(expected_freq) == 24 # obvious, but making sure no simply bugs in test itself
actual_freq = temperanotes.frequencies(temperament = idiot_temp, notes_low = 12, notes_high = 12, key = 'A', base_freq = 440.0, key_freq = 'A')
assert actual_freq == expected_freq
def test_four_octaves(idiot_temp):
expected_freq_lolo = [440.0 / 4 * i for i in idiot_temp]
expected_freq_lo = [440.0 / 2 * i for i in idiot_temp]
expected_freq_hi = [440.0 * i for i in idiot_temp]
expected_freq_hihi = [440.0 * 2 * i for i in idiot_temp]
expected_freq = expected_freq_lolo + expected_freq_lo + expected_freq_hi + expected_freq_hihi
assert len(expected_freq) == 48 # obvious, but making sure no simply bugs in test itself
actual_freq = temperanotes.frequencies(temperament = idiot_temp, notes_low = 24, notes_high = 24, key = 'A', base_freq = 440.0, key_freq = 'A')
assert actual_freq == expected_freq
def test_equal_temp():
expected = [1., 2. ** (1./12), 2. ** (1./6), 2. ** (1./4), 2. ** (1./3), 2. ** (5./12), 2. ** (1./2), 2. ** (7./12), 2. ** (2./3), 2. ** (3./4), 2. ** (5./6), 2. ** (11./12)]
actual = temperanotes.equal_temperament()
assert actual == expected
def test_cents():
expected = [100 * i for i in range(12)]
actual = temperanotes.to_cents(temperanotes.equal_temperament())
assert actual == expected
def test_read_temperament_nocents():
data = """#This is a comment
1
1.01 # this is another comment
1.3
1.4
# more comments
1.5
1.6
1.7
1.8
1.9
1.10
1.11
1.12"""
expected = [1, 1.01, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 1.10, 1.11, 1.12]
actual, cents = temperanotes.read_temperament(data)
assert actual == expected
assert len(cents) == 0
def test_read_temperament_withcents_and_math():
data = """#This is a comment
1, 100
sqrt(2), 200 # this is another comment
1.3, 4 ** (1/3) # 1.58 must round to 2
2 ** 1/12, 500
# more comments
1.5, 600
1.6, 700
1.7, 900
1.8, 1000
1.9, 2000 # comments can appear anywhere
1.10, 3000
1.11, 1
1.12, 7
# comments at the end"""
expected = [1, 1.4142135623730951, 1.3, 0.1666666666666666666666666, 1.5, 1.6, 1.7, 1.8, 1.9, 1.10, 1.11, 1.12]
actual, cents = temperanotes.read_temperament(data)
assert actual == expected
assert cents == [100, 200, 2, 500, 600, 700, 900, 1000, 2000, 3000, 1, 7]
def test_read_incorrect_temperaments():
data = 11 * "1, 100\n"
with pytest.raises(SystemExit):
temperanotes.read_temperament(data)
data = 13 * "1, 100\n"
with pytest.raises(SystemExit):
temperanotes.read_temperament(data)
def test_read_more_entries_cents():
data = (5 * "1, 100\n" +
2 * "2, 150, 200\n" + # additional data
5 * "7, 200\n")
with pytest.raises(SystemExit):
temperanotes.read_temperament(data)
def test_read_incorrect_cents():
data = (5 * "1, 100\n" +
2 * "2,\n" + # missing some cents (with comma)
5 * "7, 200\n")
with pytest.raises(SystemExit):
temperanotes.read_temperament(data)
def test_read_missing_cents():
data = (5 * "1, 100\n" +
2 * "2\n" + # missing some cents (without comma)
5 * "7, 200\n")
with pytest.raises(SystemExit):
temperanotes.read_temperament(data)
def test_read_file_with_errors():
data = (5 * "1, 100\n" +
2 * "foo_bar, 200\n" + # syntax error in frequencies
5 * "7, 700\n")
with pytest.raises(SystemExit):
temperanotes.read_temperament(data)
data = (5 * "1, 100\n" +
2 * "2, foo_bar\n" + # syntax error in cents
5 * "7, 700\n")
with pytest.raises(SystemExit):
temperanotes.read_temperament(data)
# not testing verify() since it's very simple
# not explicitly testing myeval() since it's implicitly tested in each read_temperament() invocation
def test_equal_piano():
piano = temperanotes.piano(temperanotes.equal_temperament())
index = bisect.bisect_left(piano, 440.) - 1
print "Index of the A-440", index, "(should be the 49th key or index 48)"
print "Value of index", index, "=", piano[index], "should be close to 440."
assert len(piano) == 88 # the piano has 88 keys
assert index == 48
assert piano[index] - 440. < 0.01
def test_equal_midi():
midi = temperanotes.midi(temperanotes.equal_temperament())
index = bisect.bisect_left(midi, 440.) - 1
print "Index of the A-440", index, "(should be 69)"
print "Value of index", index, "=", midi[index], "should be close to 440."
assert len(midi) == 128 # the midi spec's 128 notes (0 to 127)
assert index == 69
assert midi[index] - 440. < 0.01
|
davidedelvento/temperanotes
|
test_temperanotes.py
|
Python
|
apache-2.0
| 9,430
|
from app import setup
app = setup()
|
OniOni/ril
|
wsgi.py
|
Python
|
apache-2.0
| 37
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transforms."""
import functools
from cvx2 import latest as cv2
import numpy as np
import tensorflow as tf
from tensorflow_graphics.projects.points_to_3Dobjects.utils import image as image_utils
from google3.third_party.tensorflow_models.object_detection.core import preprocessor
tf_data_augmentation = preprocessor
LIGHTING_EIGVAL = (0.2141788, 0.01817699, 0.00341571)
LIGHTING_EIGVEC = ((-0.58752847, -0.69563484, 0.4134035),
(-0.5832747, 0.00994535, -0.8122141),
(-0.560893, 0.7183267, 0.41158938))
def bgr_to_rgb(image):
return image[..., ::-1]
def rgb_to_bgr(image):
return image[..., ::-1]
def brightness(image, variance):
alpha = 1 + tf.random.uniform(
[1], dtype=tf.float32, minval=-variance, maxval=variance)[0]
return image * alpha
def contrast(image, image_grayscale_mean, variance):
alpha = 1 + tf.random.uniform(
[1], dtype=tf.float32, minval=-variance, maxval=variance)[0]
return image * alpha + image_grayscale_mean * (1 - alpha)
def saturation(image, image_grayscale, variance):
alpha = 1 + tf.random.uniform(
[1], dtype=tf.float32, minval=-variance, maxval=variance)[0]
return image * alpha + image_grayscale * (1 - alpha)
def lighting(image,
alpha_std=0.1,
eigval=LIGHTING_EIGVAL,
eigvec=LIGHTING_EIGVEC):
alpha = tf.random.normal([3], stddev=alpha_std, dtype=tf.float32)
return image + tf.tensordot(
tf.constant(eigvec), tf.constant(eigval) * alpha, axes=((1,), (0,)))
def color_augmentations(image, variance=0.4):
"""Color augmentations."""
if variance:
print(variance)
image_grayscale = tf.image.rgb_to_grayscale(bgr_to_rgb(image))
image_grayscale_mean = tf.math.reduce_mean(
image_grayscale, axis=[-3, -2, -1], keepdims=True)
brightness_fn = functools.partial(brightness, variance=variance)
contrast_fn = functools.partial(
contrast, image_grayscale_mean=image_grayscale_mean, variance=variance)
saturation_fn = functools.partial(
saturation, image_grayscale=image_grayscale, variance=variance)
function_order = tf.random.shuffle([0, 1, 2])
ii = tf.constant(0)
def _apply_fn(image, ii):
tmp_ii = function_order[ii]
image = tf.switch_case(
tmp_ii, {
0: lambda: brightness_fn(image),
1: lambda: contrast_fn(image),
2: lambda: saturation_fn(image)
})
ii = ii + 1
return image, ii
(image, _) = tf.while_loop(lambda image, ii: tf.less(ii, 3),
_apply_fn(image, ii),
[image, ii])
image = lighting(image)
return image
def subtract_mean_and_normalize(image, means, std, random=False):
if len(means) != len(std):
raise ValueError('len(means) and len(std) must match')
image = image / 255
if random:
image = color_augmentations(image)
image = (image - tf.constant(means)) / tf.constant(std)
return image
def _get_image_border(border, size):
i = tf.constant(1)
cond = lambda i: tf.math.less_equal(size - border // i, border // i)
body = lambda i: tf.multiply(i, 2)
r = tf.while_loop(cond, body, [i])
return border // r[0]
def compute_image_size_affine_transform(original_image_size,
input_image_size,
padding_keep_size=127,
random=False,
random_side_scale_range=None):
"""Computer affine transform."""
if input_image_size is None:
input_h = tf.bitwise.bitwise_or(original_image_size[-2],
padding_keep_size) + 1
input_w = tf.bitwise.bitwise_or(original_image_size[-1],
padding_keep_size) + 1
input_size = tf.cast(tf.stack([input_w, input_h]), tf.float32)
side_size = tf.cast(tf.stack([input_w, input_h]), tf.float32)
center = tf.cast(
tf.stack([original_image_size[-1] // 2, original_image_size[-2] // 2]),
tf.float32)
else:
input_size = tf.cast(tf.stack(input_image_size), tf.float32)
max_side = tf.reduce_max(original_image_size[-2:])
side_size = tf.cast(tf.stack([max_side, max_side]), tf.float32)
image_shape = tf.cast(original_image_size, tf.float32)
center = tf.stack([image_shape[-1] / 2., image_shape[-2] / 2.])
if random:
assert random_side_scale_range is not None, (
'Random random_side_scale_range has to be provided when computing '
'random affine transformation!')
scales = tf.range(*random_side_scale_range)
scale_ii = tf.random.categorical(
tf.ones_like(scales)[None, ...], 1, dtype=tf.int32)[0, 0]
side_size = side_size * scales[scale_ii]
h_border = _get_image_border(128, original_image_size[-2])
w_border = _get_image_border(128, original_image_size[-1])
center_x = tf.random.uniform([1],
dtype=tf.int32,
minval=w_border,
maxval=(original_image_size[-1] - w_border))[0]
center_y = tf.random.uniform([1],
dtype=tf.int32,
minval=h_border,
maxval=(original_image_size[-2] - h_border))[0]
center = tf.cast(tf.stack([center_x, center_y]), tf.float32)
return center, side_size, input_size
def affine_transform(image,
original_image_size,
bounding_boxes,
instance_masks,
image_size,
padding_keep_size=127,
transform_gt_annotations=False,
random=False,
random_side_scale_range=None,
random_flip_probability=False):
"""Affine transform."""
# bounding_boxes: normalized coordinates with (ymin, xmin, ymax, xmax)
center, side_size, input_size = compute_image_size_affine_transform(
tf.shape(image)[:-1], image_size, padding_keep_size, random,
random_side_scale_range)
flipped = False
if random:
def _flip(flipped, image, center):
flipped = tf.math.logical_not(flipped)
image = image[..., ::-1, :]
center = tf.tensor_scatter_nd_update(center, tf.constant(
[[0]]), [tf.cast(tf.shape(image)[-2], center.dtype) - center[0]])
return flipped, image, center
def _no_flip(flipped, image, center):
return flipped, image, center
flipped, image, center = tf.cond(
tf.random.uniform([1], dtype=tf.float32)[0] < random_flip_probability,
lambda: _flip(flipped, image, center),
lambda: _no_flip(flipped, image, center))
if instance_masks is not None:
def _flip_mask(mask):
return mask[..., ::-1]
def _no_flip_mask(mask):
return mask
instance_masks = tf.cond(
flipped,
lambda: _flip_mask(instance_masks),
lambda: _no_flip_mask(instance_masks))
# affine_transform_image_np(image, center, side_size, input_size)
input_image_size_static = image.shape
[
image,
] = tf.py_function(affine_transform_image_np,
[image, center, side_size, input_size], [tf.float32])
if len(input_image_size_static) == 4:
image.set_shape([image.shape[0], None, None, image.shape[-1]])
else:
image.set_shape([None, None, image.shape[-1]])
if transform_gt_annotations:
bounding_boxes_shape = bounding_boxes.shape
[
bounding_boxes,
] = tf.py_function(_affine_transform_points_np, [
bounding_boxes, original_image_size, center, side_size, input_size,
flipped
], [tf.float32])
bounding_boxes.set_shape(bounding_boxes_shape)
if instance_masks is not None:
instance_masks_size_static = instance_masks.shape
[
instance_masks,
] = tf.py_function(affine_transform_image_np, [
instance_masks[..., None], center, side_size, input_size,
cv2.INTER_NEAREST
], [tf.float32])
if len(instance_masks_size_static) == 4:
instance_masks.set_shape([instance_masks.shape[0], None, None, None, 1])
else:
instance_masks.set_shape([None, None, None, 1])
instance_masks = instance_masks[..., 0]
original_image_size = tf.cast(input_size, original_image_size.dtype)
return image, original_image_size, bounding_boxes, instance_masks
def affine_transform_image_np(image,
center,
side_size,
input_size,
interpolation_mode=cv2.INTER_LINEAR):
"""Affine transform numpy."""
input_w, input_h = input_size.numpy()[0], input_size.numpy()[1]
trans_input = image_utils.get_affine_transform(center.numpy(),
side_size.numpy(), 0,
[input_w, input_h])
image_np = image.numpy()
if image_np.ndim >= 4:
image_np_shape = image_np.shape
image_np = np.reshape(image_np, (-1, *image_np_shape[-3:]))
input_images = []
for ii in range(image_np.shape[0]):
warped_input_image = cv2.warpAffine(
image_np[ii, ...],
trans_input, (input_w, input_h),
flags=interpolation_mode)
if warped_input_image.ndim != image_np[ii, ...].ndim:
warped_input_image = warped_input_image[..., None]
input_images.append(warped_input_image)
input_image = np.stack(input_images, axis=0)
input_image = np.reshape(input_image,
(*image_np_shape[:-3], *input_image.shape[-3:]))
else:
input_image = cv2.warpAffine(
image_np,
trans_input, (input_w, input_h),
flags=interpolation_mode)
return input_image.astype(np.float32)
def _affine_transform_points_np(bounding_boxes, original_image_size, center,
side_size, output_size, flip):
"""Affine transform points."""
bounding_boxes_np = bounding_boxes.numpy()
bounding_boxes_shape = bounding_boxes_np.shape
original_image_size_np = original_image_size.numpy()
if len(bounding_boxes_shape) == 3:
bounding_boxes_np = np.reshape(bounding_boxes_np, [-1, 4])
h, w = original_image_size_np[:2]
if flip:
bounding_boxes_np[..., [1, 3]] = 1.0 - bounding_boxes_np[..., [3, 1]]
bounding_boxes_np = bounding_boxes_np * [h, w, h, w]
box_min = np.stack([bounding_boxes_np[:, 1], bounding_boxes_np[:, 0]], axis=1)
box_max = np.stack([bounding_boxes_np[:, 3], bounding_boxes_np[:, 2]], axis=1)
box_min = image_utils.transform_points(box_min, center.numpy(),
side_size.numpy(), output_size.numpy(),
False)
box_max = image_utils.transform_points(box_max, center.numpy(),
side_size.numpy(), output_size.numpy(),
False)
box_min = np.stack([box_min[:, 1], box_min[:, 0]], axis=1)
box_max = np.stack([box_max[:, 1], box_max[:, 0]], axis=1)
bounding_boxes_np = np.concatenate([box_min, box_max], axis=1)
h, w = output_size.numpy()[:2]
bounding_boxes_np = np.clip(bounding_boxes_np / [h, w, h, w], 0.0, 1.0)
if len(bounding_boxes_shape) == 3:
bounding_boxes_np = np.reshape(bounding_boxes_np, bounding_boxes_shape)
return bounding_boxes_np.astype(np.float32)
def transform_predictions(points, center, scale, output_size):
# transform_points_np(points, center, scale, output_size)
[
points,
] = tf.py_function(transform_points_np,
[points, center, scale, output_size, True], [tf.float32])
return points
def transform_points_np(points, center, scale, output_size, inverse):
new_points = image_utils.transform_points(points.numpy(), center.numpy(),
scale.numpy(), output_size.numpy(),
inverse)
return new_points
|
tensorflow/graphics
|
tensorflow_graphics/projects/points_to_3Dobjects/transforms/transforms.py
|
Python
|
apache-2.0
| 12,632
|
"""ApacheParser is a member object of the ApacheConfigurator class."""
import copy
import fnmatch
import logging
import os
import re
import subprocess
import sys
import six
from certbot import errors
from certbot_apache import constants
logger = logging.getLogger(__name__)
class ApacheParser(object):
"""Class handles the fine details of parsing the Apache Configuration.
.. todo:: Make parsing general... remove sites-available etc...
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar set modules: All module names that are currently enabled.
:ivar dict loc: Location to place directives, root - configuration origin,
default - user config file, name - NameVirtualHost,
"""
arg_var_interpreter = re.compile(r"\$\{[^ \}]*}")
fnmatch_chars = set(["*", "?", "\\", "[", "]"])
def __init__(self, aug, root, vhostroot=None, version=(2, 4),
configurator=None):
# Note: Order is important here.
# Needed for calling save() with reverter functionality that resides in
# AugeasConfigurator superclass of ApacheConfigurator. This resolves
# issues with aug.load() after adding new files / defines to parse tree
self.configurator = configurator
# This uses the binary, so it can be done first.
# https://httpd.apache.org/docs/2.4/mod/core.html#define
# https://httpd.apache.org/docs/2.4/mod/core.html#ifdefine
# This only handles invocation parameters and Define directives!
self.parser_paths = {}
self.variables = {}
if version >= (2, 4):
self.update_runtime_variables()
self.aug = aug
# Find configuration root and make sure augeas can parse it.
self.root = os.path.abspath(root)
self.loc = {"root": self._find_config_root()}
self.parse_file(self.loc["root"])
# This problem has been fixed in Augeas 1.0
self.standardize_excl()
# Temporarily set modules to be empty, so that find_dirs can work
# https://httpd.apache.org/docs/2.4/mod/core.html#ifmodule
# This needs to come before locations are set.
self.modules = set()
self.init_modules()
# Set up rest of locations
self.loc.update(self._set_locations())
self.existing_paths = copy.deepcopy(self.parser_paths)
# Must also attempt to parse additional virtual host root
if vhostroot:
self.parse_file(os.path.abspath(vhostroot) + "/" +
constants.os_constant("vhost_files"))
# check to see if there were unparsed define statements
if version < (2, 4):
if self.find_dir("Define", exclude=False):
raise errors.PluginError("Error parsing runtime variables")
def add_include(self, main_config, inc_path):
"""Add Include for a new configuration file if one does not exist
:param str main_config: file path to main Apache config file
:param str inc_path: path of file to include
"""
if len(self.find_dir(case_i("Include"), inc_path)) == 0:
logger.debug("Adding Include %s to %s",
inc_path, get_aug_path(main_config))
self.add_dir(
get_aug_path(main_config),
"Include", inc_path)
# Add new path to parser paths
new_dir = os.path.dirname(inc_path)
new_file = os.path.basename(inc_path)
if new_dir in self.existing_paths.keys():
# Add to existing path
self.existing_paths[new_dir].append(new_file)
else:
# Create a new path
self.existing_paths[new_dir] = [new_file]
def init_modules(self):
"""Iterates on the configuration until no new modules are loaded.
..todo:: This should be attempted to be done with a binary to avoid
the iteration issue. Else... parse and enable mods at same time.
"""
# Since modules are being initiated... clear existing set.
self.modules = set()
matches = self.find_dir("LoadModule")
iterator = iter(matches)
# Make sure prev_size != cur_size for do: while: iteration
prev_size = -1
while len(self.modules) != prev_size:
prev_size = len(self.modules)
for match_name, match_filename in six.moves.zip(
iterator, iterator):
mod_name = self.get_arg(match_name)
mod_filename = self.get_arg(match_filename)
if mod_name and mod_filename:
self.modules.add(mod_name)
self.modules.add(os.path.basename(mod_filename)[:-2] + "c")
else:
logger.debug("Could not read LoadModule directive from " +
"Augeas path: {0}".format(match_name[6:]))
def update_runtime_variables(self):
""""
.. note:: Compile time variables (apache2ctl -V) are not used within
the dynamic configuration files. These should not be parsed or
interpreted.
.. todo:: Create separate compile time variables...
simply for arg_get()
"""
stdout = self._get_runtime_cfg()
variables = dict()
matches = re.compile(r"Define: ([^ \n]*)").findall(stdout)
try:
matches.remove("DUMP_RUN_CFG")
except ValueError:
return
for match in matches:
if match.count("=") > 1:
logger.error("Unexpected number of equal signs in "
"runtime config dump.")
raise errors.PluginError(
"Error parsing Apache runtime variables")
parts = match.partition("=")
variables[parts[0]] = parts[2]
self.variables = variables
def _get_runtime_cfg(self): # pylint: disable=no-self-use
"""Get runtime configuration info.
:returns: stdout from DUMP_RUN_CFG
"""
try:
proc = subprocess.Popen(
constants.os_constant("define_cmd"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.error(
"Error running command %s for runtime parameters!%s",
constants.os_constant("define_cmd"), os.linesep)
raise errors.MisconfigurationError(
"Error accessing loaded Apache parameters: %s",
constants.os_constant("define_cmd"))
# Small errors that do not impede
if proc.returncode != 0:
logger.warning("Error in checking parameter list: %s", stderr)
raise errors.MisconfigurationError(
"Apache is unable to check whether or not the module is "
"loaded because Apache is misconfigured.")
return stdout
def filter_args_num(self, matches, args): # pylint: disable=no-self-use
"""Filter out directives with specific number of arguments.
This function makes the assumption that all related arguments are given
in order. Thus /files/apache/directive[5]/arg[2] must come immediately
after /files/apache/directive[5]/arg[1]. Runs in 1 linear pass.
:param string matches: Matches of all directives with arg nodes
:param int args: Number of args you would like to filter
:returns: List of directives that contain # of arguments.
(arg is stripped off)
"""
filtered = []
if args == 1:
for i in range(len(matches)):
if matches[i].endswith("/arg"):
filtered.append(matches[i][:-4])
else:
for i in range(len(matches)):
if matches[i].endswith("/arg[%d]" % args):
# Make sure we don't cause an IndexError (end of list)
# Check to make sure arg + 1 doesn't exist
if (i == (len(matches) - 1) or
not matches[i + 1].endswith("/arg[%d]" %
(args + 1))):
filtered.append(matches[i][:-len("/arg[%d]" % args)])
return filtered
def add_dir_to_ifmodssl(self, aug_conf_path, directive, args):
"""Adds directive and value to IfMod ssl block.
Adds given directive and value along configuration path within
an IfMod mod_ssl.c block. If the IfMod block does not exist in
the file, it is created.
:param str aug_conf_path: Desired Augeas config path to add directive
:param str directive: Directive you would like to add, e.g. Listen
:param args: Values of the directive; str "443" or list of str
:type args: list
"""
# TODO: Add error checking code... does the path given even exist?
# Does it throw exceptions?
if_mod_path = self._get_ifmod(aug_conf_path, "mod_ssl.c")
# IfModule can have only one valid argument, so append after
self.aug.insert(if_mod_path + "arg", "directive", False)
nvh_path = if_mod_path + "directive[1]"
self.aug.set(nvh_path, directive)
if len(args) == 1:
self.aug.set(nvh_path + "/arg", args[0])
else:
for i, arg in enumerate(args):
self.aug.set("%s/arg[%d]" % (nvh_path, i + 1), arg)
def _get_ifmod(self, aug_conf_path, mod):
"""Returns the path to <IfMod mod> and creates one if it doesn't exist.
:param str aug_conf_path: Augeas configuration path
:param str mod: module ie. mod_ssl.c
"""
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
if len(if_mods) == 0:
self.aug.set("%s/IfModule[last() + 1]" % aug_conf_path, "")
self.aug.set("%s/IfModule[last()]/arg" % aug_conf_path, mod)
if_mods = self.aug.match(("%s/IfModule/*[self::arg='%s']" %
(aug_conf_path, mod)))
# Strip off "arg" at end of first ifmod path
return if_mods[0][:len(if_mods[0]) - 3]
def add_dir(self, aug_conf_path, directive, args):
"""Appends directive to the end fo the file given by aug_conf_path.
.. note:: Not added to AugeasConfigurator because it may depend
on the lens
:param str aug_conf_path: Augeas configuration path to add directive
:param str directive: Directive to add
:param args: Value of the directive. ie. Listen 443, 443 is arg
:type args: list or str
"""
self.aug.set(aug_conf_path + "/directive[last() + 1]", directive)
if isinstance(args, list):
for i, value in enumerate(args, 1):
self.aug.set(
"%s/directive[last()]/arg[%d]" % (aug_conf_path, i), value)
else:
self.aug.set(aug_conf_path + "/directive[last()]/arg", args)
def find_dir(self, directive, arg=None, start=None, exclude=True):
"""Finds directive in the configuration.
Recursively searches through config files to find directives
Directives should be in the form of a case insensitive regex currently
.. todo:: arg should probably be a list
.. todo:: arg search currently only supports direct matching. It does
not handle the case of variables or quoted arguments. This should
be adapted to use a generic search for the directive and then do a
case-insensitive self.get_arg filter
Note: Augeas is inherently case sensitive while Apache is case
insensitive. Augeas 1.0 allows case insensitive regexes like
regexp(/Listen/, "i"), however the version currently supported
by Ubuntu 0.10 does not. Thus I have included my own case insensitive
transformation by calling case_i() on everything to maintain
compatibility.
:param str directive: Directive to look for
:param arg: Specific value directive must have, None if all should
be considered
:type arg: str or None
:param str start: Beginning Augeas path to begin looking
:param bool exclude: Whether or not to exclude directives based on
variables and enabled modules
"""
# Cannot place member variable in the definition of the function so...
if not start:
start = get_aug_path(self.loc["root"])
# No regexp code
# if arg is None:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive + "']/arg")
# else:
# matches = self.aug.match(start +
# "//*[self::directive='" + directive +
# "']/* [self::arg='" + arg + "']")
# includes = self.aug.match(start +
# "//* [self::directive='Include']/* [label()='arg']")
regex = "(%s)|(%s)|(%s)" % (case_i(directive),
case_i("Include"),
case_i("IncludeOptional"))
matches = self.aug.match(
"%s//*[self::directive=~regexp('%s')]" % (start, regex))
if exclude:
matches = self._exclude_dirs(matches)
if arg is None:
arg_suffix = "/arg"
else:
arg_suffix = "/*[self::arg=~regexp('%s')]" % case_i(arg)
ordered_matches = []
# TODO: Wildcards should be included in alphabetical order
# https://httpd.apache.org/docs/2.4/mod/core.html#include
for match in matches:
dir_ = self.aug.get(match).lower()
if dir_ == "include" or dir_ == "includeoptional":
ordered_matches.extend(self.find_dir(
directive, arg,
self._get_include_path(self.get_arg(match + "/arg")),
exclude))
# This additionally allows Include
if dir_ == directive.lower():
ordered_matches.extend(self.aug.match(match + arg_suffix))
return ordered_matches
def get_arg(self, match):
"""Uses augeas.get to get argument value and interprets result.
This also converts all variables and parameters appropriately.
"""
value = self.aug.get(match)
# No need to strip quotes for variables, as apache2ctl already does
# this, but we do need to strip quotes for all normal arguments.
# Note: normal argument may be a quoted variable
# e.g. strip now, not later
if not value:
return None
else:
value = value.strip("'\"")
variables = ApacheParser.arg_var_interpreter.findall(value)
for var in variables:
# Strip off ${ and }
try:
value = value.replace(var, self.variables[var[2:-1]])
except KeyError:
raise errors.PluginError("Error Parsing variable: %s" % var)
return value
def _exclude_dirs(self, matches):
"""Exclude directives that are not loaded into the configuration."""
filters = [("ifmodule", self.modules), ("ifdefine", self.variables)]
valid_matches = []
for match in matches:
for filter_ in filters:
if not self._pass_filter(match, filter_):
break
else:
valid_matches.append(match)
return valid_matches
def _pass_filter(self, match, filter_):
"""Determine if directive passes a filter.
:param str match: Augeas path
:param list filter: list of tuples of form
[("lowercase if directive", set of relevant parameters)]
"""
match_l = match.lower()
last_match_idx = match_l.find(filter_[0])
while last_match_idx != -1:
# Check args
end_of_if = match_l.find("/", last_match_idx)
# This should be aug.get (vars are not used e.g. parser.aug_get)
expression = self.aug.get(match[:end_of_if] + "/arg")
if expression.startswith("!"):
# Strip off "!"
if expression[1:] in filter_[1]:
return False
else:
if expression not in filter_[1]:
return False
last_match_idx = match_l.find(filter_[0], end_of_if)
return True
def _get_include_path(self, arg):
"""Converts an Apache Include directive into Augeas path.
Converts an Apache Include directive argument into an Augeas
searchable path
.. todo:: convert to use os.path.join()
:param str arg: Argument of Include directive
:returns: Augeas path string
:rtype: str
"""
# Check to make sure only expected characters are used <- maybe remove
# validChars = re.compile("[a-zA-Z0-9.*?_-/]*")
# matchObj = validChars.match(arg)
# if matchObj.group() != arg:
# logger.error("Error: Invalid regexp characters in %s", arg)
# return []
# Remove beginning and ending quotes
arg = arg.strip("'\"")
# Standardize the include argument based on server root
if not arg.startswith("/"):
# Normpath will condense ../
arg = os.path.normpath(os.path.join(self.root, arg))
else:
arg = os.path.normpath(arg)
# Attempts to add a transform to the file if one does not already exist
if os.path.isdir(arg):
self.parse_file(os.path.join(arg, "*"))
else:
self.parse_file(arg)
# Argument represents an fnmatch regular expression, convert it
# Split up the path and convert each into an Augeas accepted regex
# then reassemble
split_arg = arg.split("/")
for idx, split in enumerate(split_arg):
if any(char in ApacheParser.fnmatch_chars for char in split):
# Turn it into a augeas regex
# TODO: Can this instead be an augeas glob instead of regex
split_arg[idx] = ("* [label()=~regexp('%s')]" %
self.fnmatch_to_re(split))
# Reassemble the argument
# Note: This also normalizes the argument /serverroot/ -> /serverroot
arg = "/".join(split_arg)
return get_aug_path(arg)
def fnmatch_to_re(self, clean_fn_match): # pylint: disable=no-self-use
"""Method converts Apache's basic fnmatch to regular expression.
Assumption - Configs are assumed to be well-formed and only writable by
privileged users.
https://apr.apache.org/docs/apr/2.0/apr__fnmatch_8h_source.html
http://apache2.sourcearchive.com/documentation/2.2.16-6/apr__fnmatch_8h_source.html
:param str clean_fn_match: Apache style filename match, like globs
:returns: regex suitable for augeas
:rtype: str
"""
if sys.version_info < (3, 6):
# This strips off final /Z(?ms)
return fnmatch.translate(clean_fn_match)[:-7]
else: # pragma: no cover
# Since Python 3.6, it returns a different pattern like (?s:.*\.load)\Z
return fnmatch.translate(clean_fn_match)[4:-3]
def parse_file(self, filepath):
"""Parse file with Augeas
Checks to see if file_path is parsed by Augeas
If filepath isn't parsed, the file is added and Augeas is reloaded
:param str filepath: Apache config file path
"""
use_new, remove_old = self._check_path_actions(filepath)
# Ensure that we have the latest Augeas DOM state on disk before
# calling aug.load() which reloads the state from disk
if self.configurator:
self.configurator.ensure_augeas_state()
# Test if augeas included file for Httpd.lens
# Note: This works for augeas globs, ie. *.conf
if use_new:
inc_test = self.aug.match(
"/augeas/load/Httpd['%s' =~ glob(incl)]" % filepath)
if not inc_test:
# Load up files
# This doesn't seem to work on TravisCI
# self.aug.add_transform("Httpd.lns", [filepath])
if remove_old:
self._remove_httpd_transform(filepath)
self._add_httpd_transform(filepath)
self.aug.load()
def parsed_in_current(self, filep):
"""Checks if the file path is parsed by current Augeas parser config
ie. returns True if the file is found on a path that's found in live
Augeas configuration.
:param str filep: Path to match
:returns: True if file is parsed in existing configuration tree
:rtype: bool
"""
return self._parsed_by_parser_paths(filep, self.parser_paths)
def parsed_in_original(self, filep):
"""Checks if the file path is parsed by existing Apache config.
ie. returns True if the file is found on a path that matches Include or
IncludeOptional statement in the Apache configuration.
:param str filep: Path to match
:returns: True if file is parsed in existing configuration tree
:rtype: bool
"""
return self._parsed_by_parser_paths(filep, self.existing_paths)
def _parsed_by_parser_paths(self, filep, paths):
"""Helper function that searches through provided paths and returns
True if file path is found in the set"""
for directory in paths.keys():
for filename in paths[directory]:
if fnmatch.fnmatch(filep, os.path.join(directory, filename)):
return True
return False
def _check_path_actions(self, filepath):
"""Determine actions to take with a new augeas path
This helper function will return a tuple that defines
if we should try to append the new filepath to augeas
parser paths, and / or remove the old one with more
narrow matching.
:param str filepath: filepath to check the actions for
"""
try:
new_file_match = os.path.basename(filepath)
existing_matches = self.parser_paths[os.path.dirname(filepath)]
if "*" in existing_matches:
use_new = False
else:
use_new = True
if new_file_match == "*":
remove_old = True
else:
remove_old = False
except KeyError:
use_new = True
remove_old = False
return use_new, remove_old
def _remove_httpd_transform(self, filepath):
"""Remove path from Augeas transform
:param str filepath: filepath to remove
"""
remove_basenames = self.parser_paths[os.path.dirname(filepath)]
remove_dirname = os.path.dirname(filepath)
for name in remove_basenames:
remove_path = remove_dirname + "/" + name
remove_inc = self.aug.match(
"/augeas/load/Httpd/incl [. ='%s']" % remove_path)
self.aug.remove(remove_inc[0])
self.parser_paths.pop(remove_dirname)
def _add_httpd_transform(self, incl):
"""Add a transform to Augeas.
This function will correctly add a transform to augeas
The existing augeas.add_transform in python doesn't seem to work for
Travis CI as it loads in libaugeas.so.0.10.0
:param str incl: filepath to include for transform
"""
last_include = self.aug.match("/augeas/load/Httpd/incl [last()]")
if last_include:
# Insert a new node immediately after the last incl
self.aug.insert(last_include[0], "incl", False)
self.aug.set("/augeas/load/Httpd/incl[last()]", incl)
# On first use... must load lens and add file to incl
else:
# Augeas uses base 1 indexing... insert at beginning...
self.aug.set("/augeas/load/Httpd/lens", "Httpd.lns")
self.aug.set("/augeas/load/Httpd/incl", incl)
# Add included path to paths dictionary
try:
self.parser_paths[os.path.dirname(incl)].append(
os.path.basename(incl))
except KeyError:
self.parser_paths[os.path.dirname(incl)] = [
os.path.basename(incl)]
def standardize_excl(self):
"""Standardize the excl arguments for the Httpd lens in Augeas.
Note: Hack!
Standardize the excl arguments for the Httpd lens in Augeas
Servers sometimes give incorrect defaults
Note: This problem should be fixed in Augeas 1.0. Unfortunately,
Augeas 0.10 appears to be the most popular version currently.
"""
# attempt to protect against augeas error in 0.10.0 - ubuntu
# *.augsave -> /*.augsave upon augeas.load()
# Try to avoid bad httpd files
# There has to be a better way... but after a day and a half of testing
# I had no luck
# This is a hack... work around... submit to augeas if still not fixed
excl = ["*.augnew", "*.augsave", "*.dpkg-dist", "*.dpkg-bak",
"*.dpkg-new", "*.dpkg-old", "*.rpmsave", "*.rpmnew",
"*~",
self.root + "/*.augsave",
self.root + "/*~",
self.root + "/*/*augsave",
self.root + "/*/*~",
self.root + "/*/*/*.augsave",
self.root + "/*/*/*~"]
for i, excluded in enumerate(excl, 1):
self.aug.set("/augeas/load/Httpd/excl[%d]" % i, excluded)
self.aug.load()
def _set_locations(self):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
default = self.loc["root"]
temp = os.path.join(self.root, "ports.conf")
if os.path.isfile(temp):
listen = temp
name = temp
else:
listen = default
name = default
return {"default": default, "listen": listen, "name": name}
def _find_config_root(self):
"""Find the Apache Configuration Root file."""
location = ["apache2.conf", "httpd.conf", "conf/httpd.conf"]
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError("Could not find configuration root")
def case_i(string):
"""Returns case insensitive regex.
Returns a sloppy, but necessary version of a case insensitive regex.
Any string should be able to be submitted and the string is
escaped and then made case insensitive.
May be replaced by a more proper /i once augeas 1.0 is widely
supported.
:param str string: string to make case i regex
"""
return "".join(["[" + c.upper() + c.lower() + "]"
if c.isalpha() else c for c in re.escape(string)])
def get_aug_path(file_path):
"""Return augeas path for full filepath.
:param str file_path: Full filepath
"""
return "/files%s" % file_path
|
jsha/letsencrypt
|
certbot-apache/certbot_apache/parser.py
|
Python
|
apache-2.0
| 27,616
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class DummyKeyResponse(object):
def __init__(self, gen=1):
self.generation = gen
self.name = ""
def request(self, path, method, **kwargs):
self.name = path.split('/')[-1]
return self
def json(self):
return {"generation": self.generation,
"name": self.name}
class DummyTicketResponse(object):
def __init__(self, signature, metadata, ticket):
self.signature = signature
self.metadata = metadata
self.ticket = ticket
def request(self, path, method, **kwargs):
return self
def json(self):
return {"signature": self.signature,
"metadata": self.metadata,
"ticket": self.ticket}
class DummyGroupResponse(object):
def __init__(self, name):
self.name = name
def request(self, path, method, **kwargs):
return self
def json(self):
return {"name": self.name}
class DummyGroupKeyResponse(object):
def __init__(self, signature, metadata, group_key):
self.signature = signature
self.metadata = metadata
self.group_key = group_key
def request(self, path, method, **kwargs):
return self
def json(self):
return {"signature": self.signature,
"metadata": self.metadata,
"group_key": self.group_key}
|
jamielennox/python-kiteclient
|
kiteclient/tests/v1/utils.py
|
Python
|
apache-2.0
| 1,915
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import fixtures
import microversion_parse
import os
from unittest import mock
from oslo_utils.fixture import uuidsentinel
from oslotest import base
from nova.compute import provider_config
from nova import exception as nova_exc
class SchemaValidationMixin(base.BaseTestCase):
"""This class provides the basic methods for running schema validation test
cases. It can be used along with ddt.file_data to test a specific schema
version using tests defined in yaml files. See SchemaValidationTestCasesV1
for an example of how this was done for schema version 1.
Because decorators can only access class properties of the class they are
defined in (even when overriding values in the subclass), the decorators
need to be placed in the subclass. This is why there are test_ functions in
the subclass that call the run_test_ methods in this class. This should
keep things simple as more schema versions are added.
"""
def setUp(self):
super(SchemaValidationMixin, self).setUp()
self.mock_load_yaml = self.useFixture(
fixtures.MockPatchObject(
provider_config, '_load_yaml_file')).mock
self.mock_LOG = self.useFixture(
fixtures.MockPatchObject(
provider_config, 'LOG')).mock
def set_config(self, config=None):
data = config or {}
self.mock_load_yaml.return_value = data
return data
def run_test_validation_errors(self, config, expected_messages):
self.set_config(config=config)
actual_msg = self.assertRaises(
nova_exc.ProviderConfigException,
provider_config._parse_provider_yaml, 'test_path').message
for msg in expected_messages:
self.assertIn(msg, actual_msg)
def run_test_validation_success(self, config):
reference = self.set_config(config=config)
actual = provider_config._parse_provider_yaml('test_path')
self.assertEqual(reference, actual)
def run_schema_version_matching(
self, min_schema_version, max_schema_version):
# note _load_yaml_file is mocked so the value is not important
# however it may appear in logs messages so changing it could
# result in tests failing unless the expected_messages field
# is updated in the test data.
path = 'test_path'
# test exactly min and max versions are supported
self.set_config(config={
'meta': {'schema_version': str(min_schema_version)}})
provider_config._parse_provider_yaml(path)
self.set_config(config={
'meta': {'schema_version': str(max_schema_version)}})
provider_config._parse_provider_yaml(path)
self.mock_LOG.warning.assert_not_called()
# test max major+1 raises
higher_major = microversion_parse.Version(
major=max_schema_version.major + 1, minor=max_schema_version.minor)
self.set_config(config={'meta': {'schema_version': str(higher_major)}})
self.assertRaises(nova_exc.ProviderConfigException,
provider_config._parse_provider_yaml, path)
# test max major with max minor+1 is logged
higher_minor = microversion_parse.Version(
major=max_schema_version.major, minor=max_schema_version.minor + 1)
expected_log_call = (
"Provider config file [%(path)s] is at schema version "
"%(schema_version)s. Nova supports the major version, but "
"not the minor. Some fields may be ignored." % {
"path": path, "schema_version": higher_minor})
self.set_config(config={'meta': {'schema_version': str(higher_minor)}})
provider_config._parse_provider_yaml(path)
self.mock_LOG.warning.assert_called_once_with(expected_log_call)
@ddt.ddt
class SchemaValidationTestCasesV1(SchemaValidationMixin):
MIN_SCHEMA_VERSION = microversion_parse.Version(1, 0)
MAX_SCHEMA_VERSION = microversion_parse.Version(1, 0)
@ddt.unpack
@ddt.file_data('provider_config_data/v1/validation_error_test_data.yaml')
def test_validation_errors(self, config, expected_messages):
self.run_test_validation_errors(config, expected_messages)
@ddt.unpack
@ddt.file_data('provider_config_data/v1/validation_success_test_data.yaml')
def test_validation_success(self, config):
self.run_test_validation_success(config)
def test_schema_version_matching(self):
self.run_schema_version_matching(self.MIN_SCHEMA_VERSION,
self.MAX_SCHEMA_VERSION)
@ddt.ddt
class ValidateProviderConfigTestCases(base.BaseTestCase):
@ddt.unpack
@ddt.file_data('provider_config_data/validate_provider_good_config.yaml')
def test__validate_provider_good_config(self, sample):
provider_config._validate_provider_config(sample, "fake_path")
@ddt.unpack
@ddt.file_data('provider_config_data/validate_provider_bad_config.yaml')
def test__validate_provider_bad_config(self, sample, expected_messages):
actual_msg = self.assertRaises(
nova_exc.ProviderConfigException,
provider_config._validate_provider_config,
sample, 'fake_path').message
self.assertIn(actual_msg, expected_messages)
@mock.patch.object(provider_config, 'LOG')
def test__validate_provider_config_one_noop_provider(self, mock_log):
expected = {
"providers": [
{
"identification": {"name": "NAME1"},
"inventories": {
"additional": [
{"CUSTOM_RESOURCE_CLASS": {}}
]
}
},
{
"identification": {"name": "NAME_453764"},
"inventories": {
"additional": []
},
"traits": {
"additional": []
}
}
]
}
data = copy.deepcopy(expected)
valid = provider_config._validate_provider_config(data, "fake_path")
mock_log.warning.assert_called_once_with(
"Provider NAME_453764 defined in "
"fake_path has no additional "
"inventories or traits and will be ignored."
)
# assert that _validate_provider_config does not mutate inputs
self.assertEqual(expected, data)
# assert that the first entry in the returned tuple is the full set
# of providers not a copy and is equal to the expected providers.
self.assertIs(data['providers'][0], valid[0])
self.assertEqual(expected['providers'][0], valid[0])
class GetProviderConfigsTestCases(base.BaseTestCase):
@mock.patch.object(provider_config, 'glob')
def test_get_provider_configs_one_file(self, mock_glob):
expected = {
"$COMPUTE_NODE": {
"__source_file": "example_provider.yaml",
"identification": {
"name": "$COMPUTE_NODE"
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1.0
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT_ONE",
"CUSTOM_TRAIT2"
]
}
}
}
example_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'provider_config_data/v1/example_provider.yaml')
mock_glob.glob.return_value = [example_file]
actual = provider_config.get_provider_configs('path')
self.assertEqual(expected, actual)
mock_glob.glob.assert_called_with('path/*.yaml')
@mock.patch.object(provider_config, 'glob')
@mock.patch.object(provider_config, '_parse_provider_yaml')
def test_get_provider_configs_one_file_uuid_conflict(
self, mock_parser, mock_glob):
# one config file with conflicting identification
providers = [
{"__source_file": "file1.yaml",
"identification": {
"uuid": uuidsentinel.uuid1
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS1": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT1"
]
}
},
{"__source_file": "file1.yaml",
"identification": {
"uuid": uuidsentinel.uuid1
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS2": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT2"
]
}
}
]
mock_parser.side_effect = [{"providers": providers}]
mock_glob.glob.return_value = ['file1.yaml']
# test that correct error is raised and message matches
error = self.assertRaises(nova_exc.ProviderConfigException,
provider_config.get_provider_configs,
'dummy_path').kwargs['error']
self.assertEqual("Provider %s has multiple definitions in source "
"file(s): ['file1.yaml']." % uuidsentinel.uuid1,
error)
@mock.patch.object(provider_config, 'glob')
@mock.patch.object(provider_config, '_parse_provider_yaml')
def test_get_provider_configs_two_files(self, mock_parser, mock_glob):
expected = {
"EXAMPLE_RESOURCE_PROVIDER1": {
"__source_file": "file1.yaml",
"identification": {
"name": "EXAMPLE_RESOURCE_PROVIDER1"
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS1": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT1"
]
}
},
"EXAMPLE_RESOURCE_PROVIDER2": {
"__source_file": "file2.yaml",
"identification": {
"name": "EXAMPLE_RESOURCE_PROVIDER2"
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS2": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT2"
]
}
}
}
mock_parser.side_effect = [
{"providers": [provider]} for provider in expected.values()]
mock_glob_return = ['file1.yaml', 'file2.yaml']
mock_glob.glob.return_value = mock_glob_return
dummy_path = 'dummy_path'
actual = provider_config.get_provider_configs(dummy_path)
mock_glob.glob.assert_called_once_with(os.path.join(dummy_path,
'*.yaml'))
mock_parser.assert_has_calls([mock.call(param)
for param in mock_glob_return])
self.assertEqual(expected, actual)
@mock.patch.object(provider_config, 'glob')
@mock.patch.object(provider_config, '_parse_provider_yaml')
def test_get_provider_configs_two_files_name_conflict(self, mock_parser,
mock_glob):
# two config files with conflicting identification
configs = {
"EXAMPLE_RESOURCE_PROVIDER1": {
"__source_file": "file1.yaml",
"identification": {
"name": "EXAMPLE_RESOURCE_PROVIDER1"
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS1": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT1"
]
}
},
"EXAMPLE_RESOURCE_PROVIDER2": {
"__source_file": "file2.yaml",
"identification": {
"name": "EXAMPLE_RESOURCE_PROVIDER1"
},
"inventories": {
"additional": [
{
"CUSTOM_EXAMPLE_RESOURCE_CLASS1": {
"total": 100,
"reserved": 0,
"min_unit": 1,
"max_unit": 10,
"step_size": 1,
"allocation_ratio": 1
}
}
]
},
"traits": {
"additional": [
"CUSTOM_TRAIT1"
]
}
}
}
mock_parser.side_effect = [{"providers": [configs[provider]]}
for provider in configs]
mock_glob.glob.return_value = ['file1.yaml', 'file2.yaml']
# test that correct error is raised and message matches
error = self.assertRaises(nova_exc.ProviderConfigException,
provider_config.get_provider_configs,
'dummy_path').kwargs['error']
self.assertEqual("Provider EXAMPLE_RESOURCE_PROVIDER1 has multiple "
"definitions in source file(s): "
"['file1.yaml', 'file2.yaml'].", error)
@mock.patch.object(provider_config, 'LOG')
def test_get_provider_configs_no_configs(self, mock_log):
path = "invalid_path!@#"
actual = provider_config.get_provider_configs(path)
self.assertEqual({}, actual)
mock_log.info.assert_called_once_with(
"No provider configs found in %s. If files are present, "
"ensure the Nova process has access.", path)
|
klmitch/nova
|
nova/tests/unit/compute/test_provider_config.py
|
Python
|
apache-2.0
| 17,242
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.iaas_client.actions.base import BaseAction
class DescribeRouterVxnetsAction(BaseAction):
action = 'DescribeRouterVxnets'
command = 'describe-router-vxnets'
usage = '%(prog)s -r <router_id> [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-r', '--router', dest='router',
action='store', type=str, default='',
help='ID of router whose vxnets you want to list.')
parser.add_argument('-v', '--vxnet', dest='vxnet',
action='store', type=str, default='',
help='filter by vxnet ID. ')
@classmethod
def build_directive(cls, options):
if not options.router:
print('error: [router] should be specified')
return None
return {
'router': options.router,
'vxnet': options.vxnet,
'offset':options.offset,
'limit': options.limit,
}
|
yunify/qingcloud-cli
|
qingcloud/cli/iaas_client/actions/router/describe_router_vxnets.py
|
Python
|
apache-2.0
| 1,841
|
import sys
from PyQt4 import QtGui
from dependence_repo_manager.core.manager import RepoManager
from dependence_repo_manager.core.main_window import MainWindow
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
manager = RepoManager()
manager.config()
mainWin = MainWindow(manager)
mainWin.show()
sys.exit(app.exec_())
|
xcgspring/dependence_repo
|
manager/dependence_repo_manager/main.py
|
Python
|
apache-2.0
| 357
|
import json
from collections import OrderedDict
from inspect import signature
from warnings import warn
import numpy as np
from sklearn.base import BaseEstimator
class Configuration(object):
def __init__(self, name, version, params):
if not isinstance(name, str):
raise ValueError()
if not isinstance(params, dict):
raise ValueError()
self.name = name
self.version = version
self.params = params
def __str__(self):
if len(self.params) == 0:
return "%s-v%s" % (self.name, self.version)
json_params = config_to_json(self.params)
if len(json_params) < 200:
return "%s-v%s: %s" % (self.name, self.version, json_params)
else:
return "%s-v%s {...}" % (self.name, self.version)
def __eq__(self, other):
return isinstance(other, Configuration) and \
self.name == other.name and \
self.version == other.version and \
self.params == other.params
class Configurable(object):
"""
Configurable classes have names, versions, and a set of parameters that are either "simple" aka JSON serializable
types or other Configurable objects. Configurable objects should also be serializable via pickle.
Configurable classes are defined mainly to give us a human-readable way of reading of the `parameters`
set for different objects and to attach version numbers to them.
By default we follow the format sklearn uses for its `BaseEstimator` class, where parameters are automatically
derived based on the constructor parameters.
"""
@classmethod
def _get_param_names(cls):
# fetch the constructor or the original constructor before
init = cls.__init__
if init is object.__init__:
# No explicit constructor to introspect
return []
init_signature = signature(init)
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self']
if any(p.kind == p.VAR_POSITIONAL for p in parameters):
raise RuntimeError()
return sorted([p.name for p in parameters])
@property
def name(self):
return self.__class__.__name__
@property
def version(self):
return 0
def get_params(self):
out = {}
for key in self._get_param_names():
v = getattr(self, key, None)
if isinstance(v, Configurable):
out[key] = v.get_config()
elif hasattr(v, "get_config"): # for keras objects
out[key] = {"name": v.__class__.__name__, "config": v.get_config()}
else:
out[key] = v
return out
def get_config(self) -> Configuration:
params = {k: describe(v) for k,v in self.get_params().items()}
return Configuration(self.name, self.version, params)
def __getstate__(self):
state = dict(self.__dict__)
if "version" in state:
if state["version"] != self.version:
raise RuntimeError()
else:
state["version"] = self.version
return state
def __setstate__(self, state):
if "version" not in state:
raise RuntimeError("Version should be in state (%s)" % self.__class__.__name__)
if state["version"] != self.version:
warn(("%s loaded with version %s, but class " +
"version is %s") % (self.__class__.__name__, state["version"], self.version))
if "state" in state:
self.__dict__ = state["state"]
else:
del state["version"]
self.__dict__ = state
def describe(obj):
if isinstance(obj, Configurable):
return obj.get_config()
else:
obj_type = type(obj)
if obj_type in (list, set, frozenset, tuple):
return obj_type([describe(e) for e in obj])
elif isinstance(obj, tuple):
# Name tuple, convert to tuple
return tuple(describe(e) for e in obj)
elif obj_type in (dict, OrderedDict):
output = OrderedDict()
for k, v in obj.items():
if isinstance(k, Configurable):
raise ValueError()
output[k] = describe(v)
return output
else:
return obj
class EncodeDescription(json.JSONEncoder):
""" Json encoder that encodes 'Configurable' objects as dictionaries and handles
some numpy types. Note decoding this output will not reproduce the original input,
for these types, this is only intended to be used to produce human readable output.
'"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.dtype):
return str(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.bool_):
return bool(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, BaseEstimator): # handle sklearn estimators
return Configuration(obj.__class__.__name__, 0, obj.get_params())
elif isinstance(obj, Configuration):
if "version" in obj.params or "name" in obj.params:
raise ValueError()
out = OrderedDict()
out["name"] = obj.name
if obj.version != 0:
out["version"] = obj.version
out.update(obj.params)
return out
elif isinstance(obj, Configurable):
return obj.get_config()
elif isinstance(obj, set):
return sorted(obj) # Ensure deterministic order
else:
try:
return super().default(obj)
except TypeError:
return str(obj)
def config_to_json(data, indent=None):
return json.dumps(data, sort_keys=False, cls=EncodeDescription, indent=indent)
|
allenai/document-qa
|
docqa/configurable.py
|
Python
|
apache-2.0
| 6,024
|
# This script has to run using the Python executable found in:
# /opt/mgmtworker/env/bin/python in order to properly load the manager
# blueprints utils.py module.
import argparse
import logging
import utils
class CtxWithLogger(object):
logger = logging.getLogger('internal-ssl-certs-logger')
utils.ctx = CtxWithLogger()
parser = argparse.ArgumentParser()
parser.add_argument('--metadata', default=utils.CERT_METADATA_FILE_PATH,
help='File containing the cert metadata. It should be a '
'JSON file containing an object with the '
'"internal_rest_host" and "networks" fields.')
parser.add_argument('manager_ip', default=None, nargs='?',
help='The IP of this machine on the default network')
if __name__ == '__main__':
args = parser.parse_args()
cert_metadata = utils.load_cert_metadata(filename=args.metadata)
internal_rest_host = args.manager_ip or cert_metadata['internal_rest_host']
networks = cert_metadata.get('networks', {})
networks['default'] = internal_rest_host
cert_ips = [internal_rest_host] + list(networks.values())
utils.generate_internal_ssl_cert(ips=cert_ips, name=internal_rest_host)
utils.store_cert_metadata(internal_rest_host, networks,
filename=args.metadata)
|
cloudify-cosmo/cloudify-manager-blueprints
|
components/manager-ip-setter/scripts/create-internal-ssl-certs.py
|
Python
|
apache-2.0
| 1,346
|
from measures.generic.GenericMeasure import GenericMeasure
import measures.generic.Units as Units
class StartedSearches(GenericMeasure):
"""Total number of started searches"""
def __init__(self, period, simulationTime):
GenericMeasure.__init__(self, r'DEBUG .*? - Peer [0-9]+ started search for parameters .*? ([0-9]+\,[0-9]+).*?', period, simulationTime, Units.MESSAGES)
def parseLine(self, line):
self.parseInc(line)
|
unaguil/hyperion-ns2
|
experiments/measures/multicast/StartedSearches.py
|
Python
|
apache-2.0
| 437
|
import logging
class BorgSingleton:
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
class LoggerSetup(BorgSingleton):
"""Logger setup convenience class"""
DEFAULT_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def __init__(self, logger_name, log_level=logging.INFO, log_file=None, log_format=DEFAULT_FORMAT):
BorgSingleton.__init__(self)
self.__logger_name = logger_name
logger = logging.getLogger(self.__logger_name)
logger.setLevel(log_level)
if log_file:
self.add_log_file(log_file, log_level, log_format)
def add_log_file(self, log_file, level=logging.INFO, log_format=DEFAULT_FORMAT):
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(level)
file_handler.setFormatter(logging.Formatter(log_format))
logging.getLogger(self.__logger_name).addHandler(file_handler)
def get_logger(self):
return logging.getLogger(self.__logger_name)
|
kevgraham7/toolbox
|
python/samples/git-tools/util/log_setup.py
|
Python
|
apache-2.0
| 1,033
|
#!/usr/bin/env python3
from flask import Flask, request, render_template
import os
import json
import time
import datetime
from smarthomemongo import SmartHomeDB
app = Flask(__name__)
smartDB = SmartHomeDB()
@app.route('/')
def index():
records = smartDB.getCurrentStats('raspberry')
if( 'timestamp' in records.keys() ):
ts = datetime.datetime.fromtimestamp(records['timestamp']).strftime('%Y-%m-%d %H:%M:%S')
records['timestamp_string'] = ts
return render_template('index.html',records=records)
@app.route('/add', methods=['POST'])
def add():
recordJson = request.get_json()
smartDB.insertTemperaturePoint(recordJson)
return 'Success', 200
@app.route('/update_stats', methods=['POST'])
def update_stats():
recordJson = request.get_json()
smartDB.updateCurrentStats(recordJson)
return 'Success', 200
@app.route('/get_current_stats',methods=['GET'])
def get_current_stats():
record = smartDB.getCurrentStats('raspberry')
return json.dumps(record)
@app.route('/line_graph')
def get_line_graph():
return render_template('graph.html')
@app.route('/data.csv')
def get_data_csv():
records = smartDB.getTemperaturePoints()
return json.dumps(records)
@app.route('/upload_test1', methods=['POST'])
def upload_test():
recordJson = request.get_json()
smartDB.upload_collection(recordJson)
return 'Success', 200
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True)
|
bharath2020/SmartHome
|
temperature_server.py
|
Python
|
apache-2.0
| 1,401
|
clock.addListener("clockStopped", "python", "clock_stopped")
def clock_stopped():
print("The clock has been stopped")
|
MyRobotLab/myrobotlab
|
src/main/resources/resource/Clock/clock_6_clock_stopped.py
|
Python
|
apache-2.0
| 122
|
# Setup file for package int_methods
from setuptools import setup
setup(name="int_methods",
version="0.0.1",
install_requires=["quark==0.0.1"],
py_modules=['int_methods'],
packages=['int_methods', 'int_methods_md'])
|
bozzzzo/quark
|
quarkc/test/emit/expected/py/int-methods/setup.py
|
Python
|
apache-2.0
| 242
|
# Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
import traceback
from sis_provisioner.dao.uw_account import get_all_uw_accounts
from sis_provisioner.csv import get_filepath
from sis_provisioner.csv.user_writer import make_import_user_csv_files
from sis_provisioner.util.log import log_exception
from sis_provisioner.util.settings import get_csv_file_path_prefix
logger = logging.getLogger(__name__)
class CsvMaker:
"""
For the given loader, create the corresponsing csv files.
"""
def __init__(self,):
"""
@param: loader an account_managers.loader subclass object
"""
self.file_wrote = False
self.filepath = get_filepath(path_prefix=get_csv_file_path_prefix())
def fetch_users(self):
return get_all_uw_accounts()
def load_files(self):
try:
number_users_wrote = make_import_user_csv_files(
self.fetch_users(), self.filepath)
logger.info("Total {0:d} users wrote into {1}\n".format(
number_users_wrote, self.filepath))
return number_users_wrote
except Exception:
log_exception(
logger,
"Failed to make user csv file in {0}".format(self.filepath),
traceback.format_exc())
|
uw-it-aca/bridge-sis-provisioner
|
sis_provisioner/csv/writer.py
|
Python
|
apache-2.0
| 1,345
|
from itertools import *
for i, s in zip(count(), repeat('over-and-over', 5)):
print(i, s)
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_algorithms/itertools_repeat_zip.py
|
Python
|
apache-2.0
| 96
|
from SamplingAccuracyEvaluation import SamplingAlgorithm as SA
from SamplingAccuracyEvaluation import AccuracyEvaluation as AE
from SamplingAccuracyEvaluation import PrintGraph as PG
from SamplingAccuracyEvaluation import StatisticalCalculation as SC
import operator
def populationListGenerate(filePath, target):
print('Generate Population List')
populationList = []
populationFile = open(filePath, 'r')
while True:
line = populationFile.readline()
if not line: break
line_data = line.split(',')
populationList.append(line_data[target])
populationFile.close()
return populationList
def calculateScore(evalList):
score = 0
for i in range(len(evalList)):
if i == 0:
score = score + abs(evalList[i])/4
else:
score = score + abs(evalList[i])/3
return score
def run(windowSize, sampleSize, filePath, target=0):
print('############## Sampling Accuracy Evaluation ##############')
count = 1
numOfTrials = 1
jSDPieceCount = 20
pAAPieceCount = 20
print('Window Size: ' ,windowSize)
print('Sample Size: ' ,sampleSize)
print('JSD Piece Count: ' ,jSDPieceCount)
print('PAA Piece Count: ' ,pAAPieceCount)
populationList = populationListGenerate(filePath, target)
windowList = []
accuracyMeasureCount = 3
evalDic = {}
reservoirEvalList = [0.0 for _ in range(accuracyMeasureCount)]
hashEvalList = [0.0 for _ in range(accuracyMeasureCount)]
priorityEvalList = [0.0 for _ in range(accuracyMeasureCount)]
print()
for data in populationList:
windowList.append(data)
if count == windowSize:
print('################## ' + str(numOfTrials) + ' Evaluation Start ####################')
# if numOfTrials == 1: PG.printGraph(windowList, 'Population', numOfTrials)
print()
print(str(numOfTrials)+'_ReservoirSampling')
sampleList = SA.sortedReservoirSam(sampleSize, windowList)
tempEvalList = AE.run(windowList, sampleList, jSDPieceCount, pAAPieceCount)
SC.sumPerIndex(reservoirEvalList, tempEvalList)
# if numOfTrials == 1: PG.printGraph(sampleList, 'Reservoir', numOfTrials)
print()
print(str(numOfTrials)+'_HashSampling')
sampleList = SA.hashSam(sampleSize, windowList)
tempEvalList = AE.run(windowList, sampleList, jSDPieceCount, pAAPieceCount)
SC.sumPerIndex(hashEvalList, tempEvalList)
# if numOfTrials == 1: PG.printGraph(sampleList, 'Hash', numOfTrials)
print()
print(str(numOfTrials)+'_PrioritySampling')
sampleList = SA.sortedPrioritySam(sampleSize, windowList)
tempEvalList = AE.run(windowList, sampleList, jSDPieceCount, pAAPieceCount)
SC.sumPerIndex(priorityEvalList, tempEvalList)
# if numOfTrials == 1: PG.printGraph(sampleList, 'Priority', numOfTrials)
print()
numOfTrials = numOfTrials + 1
count = 0
windowList = []
count = count + 1
for i in range(accuracyMeasureCount):
reservoirEvalList[i] = reservoirEvalList[i] / numOfTrials
hashEvalList[i] = hashEvalList[i] / numOfTrials
priorityEvalList[i] = priorityEvalList[i] / numOfTrials
evalDic['RESERVOIR_SAMPLING'] = calculateScore(reservoirEvalList)
evalDic['HASH_SAMPLING'] = calculateScore(hashEvalList)
evalDic['PRIORITY_SAMPLING'] = calculateScore(priorityEvalList)
sortedEvalList = sorted(evalDic.items(), key = operator.itemgetter(1))
return sortedEvalList[0][0]
|
dke-knu/i2am
|
i2am-app/AlgorithmSelectionEngine/SamplingAccuracyEvaluation/SamplingAccuracyEvaluation.py
|
Python
|
apache-2.0
| 3,665
|
### This program intends to combine same GSHF in citation tree, and sort them according to the first letter of title;
### Author: Ye Gao
### Date: 2017-11-7
import csv
file = open('NodeCheckList.csv', 'rb')
reader = csv.reader(file)
NodeCheckList = list(reader)
file.close()
#print NodeCheckList
FirstRow = NodeCheckList.pop(0)
FirstRow[8] = 'Frequency'
# convert cited time from string to integer;
for element in NodeCheckList:
if element[2] != "":
element[2] = int(element[2])
SortYear = sorted(NodeCheckList, key=lambda l:l[3], reverse=True)
SortCiteTimes = sorted(NodeCheckList, key=lambda l:l[2], reverse=True)
print SortYear
NodeStressList = []
for element in NodeCheckList:
if (int(element[7]) == 0) or (int(element[7]) == 1) or (int(element[7]) == 2):
NodeStressList.append(element)
SortTitle = sorted(NodeStressList, key=lambda l:l[4], reverse=False)
SortTitle = [FirstRow] + SortTitle
title = ""
CombineTitle = []
for element in SortTitle:
if element[4] != title:
CombineTitle.append(element)
else:
CombineTitle[-1][1] += '|' + element[1]
title = element[4]
# save result list to NodeStressList.csv;
file = open('NodeStressList.csv','wb')
for i in CombineTitle:
for j in i:
file.write(str(j))
file.write(',')
file.write('\n')
file.close()
|
sortsimilar/Citation-Tree
|
markstress.py
|
Python
|
apache-2.0
| 1,287
|
import urllib2
from lxml import etree
####################################################################
# API
####################################################################
class Scrape_Quora:
regexpNS = "http://exslt.org/regular-expressions"
@staticmethod
def get_name(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
name = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/div/h1/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return name
@staticmethod
def get_url(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
URL = response.geturl()
return URL
@staticmethod
def get_profile_picture_link(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
profile_picture_link = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/div/img/@data-src', namespaces={'re':Scrape_Quora.regexpNS})[0]
return profile_picture_link
@staticmethod
def get_no_of_questions(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_questions = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Questions"]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_questions
@staticmethod
def get_no_of_answers(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_answers = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Answers"]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_answers
@staticmethod
def get_no_of_followers(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_followers = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Followers "]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_followers
@staticmethod
def get_no_of_following(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_following = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Following "]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_following
@staticmethod
def get_no_of_edits(user_name):
url = 'https://www.quora.com/profile/' + user_name
response = urllib2.urlopen(url)
htmlparser = etree.HTMLParser()
tree = etree.parse(response, htmlparser)
no_of_edits = tree.xpath('//*[re:test(@id, "ld_[a-z]+_\\d+", g)]/li/a[text()="Edits"]/span/text()', namespaces={'re':Scrape_Quora.regexpNS})[0]
return no_of_edits
|
hansika/pyquora
|
scrape_quora/pyquora.py
|
Python
|
apache-2.0
| 3,445
|
# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib import context
from neutron_lib.plugins import directory
from neutron_lib import worker
from oslo_log import log
from neutron.db import provisioning_blocks
from networking_odl.common import client as odl_client
from networking_odl.common import odl_features
from networking_odl.common import utils
from networking_odl.common import websocket_client as odl_ws_client
LOG = log.getLogger(__name__)
class OdlPortStatusUpdate(worker.BaseWorker):
"""Class to register and handle port status update"""
PORT_PATH = "restconf/operational/neutron:neutron/ports/port"
def __init__(self):
super(OdlPortStatusUpdate, self).__init__()
self.odl_websocket_client = None
def start(self):
super(OdlPortStatusUpdate, self).start()
LOG.debug('OdlPortStatusUpdate worker running')
if odl_features.has(odl_features.OPERATIONAL_PORT_STATUS):
self.run_websocket()
def stop(self):
if self.odl_websocket_client:
self.odl_websocket_client.set_exit_flag()
def wait(self):
"""Wait for service to complete."""
@staticmethod
def reset():
pass
def run_websocket(self):
# OpenDaylight path to recieve websocket notifications on
neutron_ports_path = "/neutron:neutron/neutron:ports"
self.path_uri = utils.get_odl_url()
self.odl_websocket_client = (
odl_ws_client.OpenDaylightWebsocketClient.odl_create_websocket(
self.path_uri, neutron_ports_path,
odl_ws_client.ODL_OPERATIONAL_DATASTORE,
odl_ws_client.ODL_NOTIFICATION_SCOPE_SUBTREE,
self._process_websocket_recv,
self._process_websocket_reconnect,
True
))
def _process_websocket_recv(self, payload, reconnect):
# Callback for websocket notification
LOG.debug("Websocket notification for port status update")
for event in odl_ws_client.EventDataParser.get_item(payload):
operation, path, data = event.get_fields()
if ((operation in [event.OPERATION_UPDATE,
event.OPERATION_CREATE])):
port_id = event.extract_field(path, "neutron:uuid")
port_id = str(port_id).strip("'")
status_field = data.get('status')
if status_field is not None:
status = status_field.get('content')
LOG.debug("Update port for port id %s %s", port_id, status)
# for now we only support transition from DOWN->ACTIVE
# https://bugs.launchpad.net/networking-odl/+bug/1686023
if status == n_const.PORT_STATUS_ACTIVE:
provisioning_blocks.provisioning_complete(
context.get_admin_context(),
port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
if operation == event.OPERATION_DELETE:
LOG.debug("PortStatus: Ignoring delete operation")
def _process_websocket_reconnect(self, status):
if status == odl_ws_client.ODL_WEBSOCKET_CONNECTED:
# Get port data using restconf
LOG.debug("Websocket notification on reconnection")
reconn_thread = threading.Thread(
name='websocket', target=self._pull_missed_statuses)
reconn_thread.start()
def _pull_missed_statuses(self):
LOG.debug("starting to pull pending statuses...")
plugin = directory.get_plugin()
filter = {"status": [n_const.PORT_STATUS_DOWN],
"vif_type": ["unbound"]}
ports = plugin.get_ports(context.get_admin_context(), filter)
if not ports:
LOG.debug("no down ports found, done")
return
port_fetch_url = utils.get_odl_url(self.PORT_PATH)
client = odl_client.OpenDaylightRestClient.create_client(
url=port_fetch_url)
for port in ports:
port_id = port["id"]
response = client.get(port_id)
if response.status_code != 200:
LOG.warning("Non-200 response code %s", str(response))
continue
odl_status = response.json()['port'][0]['status']
if odl_status == n_const.PORT_STATUS_ACTIVE:
# for now we only support transition from DOWN->ACTIVE
# See https://bugs.launchpad.net/networking-odl/+bug/1686023
provisioning_blocks.provisioning_complete(
context.get_admin_context(),
port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
LOG.debug("done pulling pending statuses")
|
openstack/networking-odl
|
networking_odl/ml2/port_status_update.py
|
Python
|
apache-2.0
| 5,546
|
def rewrite_keywords(journal_like):
bib = journal_like.bibjson()
kwords = [k.lower() for k in bib.keywords]
bib.set_keywords(kwords)
return journal_like
|
DOAJ/doaj
|
portality/migrate/20191128_2056_keywords_to_lower/operations.py
|
Python
|
apache-2.0
| 169
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, List, Iterator, Iterable, TYPE_CHECKING
from cirq.interop.quirk.cells.cell import Cell, CELL_SIZES, CellMaker
if TYPE_CHECKING:
import cirq
class InputCell(Cell):
"""A modifier that provides a quantum input to gates in the same column."""
def __init__(self, qubits: Iterable['cirq.Qid'], letter: str):
self.qubits = tuple(qubits)
self.letter = letter
def gate_count(self) -> int:
return 0
def with_line_qubits_mapped_to(self, qubits: List['cirq.Qid']) -> 'Cell':
return InputCell(qubits=Cell._replace_qubits(self.qubits, qubits), letter=self.letter)
def modify_column(self, column: List[Optional['Cell']]):
for i in range(len(column)):
cell = column[i]
if cell is not None:
column[i] = cell.with_input(self.letter, self.qubits)
class SetDefaultInputCell(Cell):
"""A persistent modifier that provides a fallback classical input."""
def __init__(self, letter: str, value: int):
self.letter = letter
self.value = value
def gate_count(self) -> int:
return 0
def with_line_qubits_mapped_to(self, qubits: List['cirq.Qid']) -> 'Cell':
return self
def persistent_modifiers(self):
return {f'set_default_{self.letter}': lambda cell: cell.with_input(self.letter, self.value)}
def generate_all_input_cell_makers() -> Iterator[CellMaker]:
# Quantum inputs.
yield from _input_family("inputA", "a")
yield from _input_family("inputB", "b")
yield from _input_family("inputR", "r")
yield from _input_family("revinputA", "a", rev=True)
yield from _input_family("revinputB", "b", rev=True)
# Classical inputs.
yield CellMaker("setA", 2, lambda args: SetDefaultInputCell('a', args.value))
yield CellMaker("setB", 2, lambda args: SetDefaultInputCell('b', args.value))
yield CellMaker("setR", 2, lambda args: SetDefaultInputCell('r', args.value))
def _input_family(identifier_prefix: str, letter: str, rev: bool = False) -> Iterator[CellMaker]:
for n in CELL_SIZES:
yield CellMaker(
identifier=identifier_prefix + str(n),
size=n,
maker=lambda args: InputCell(
qubits=args.qubits[::-1] if rev else args.qubits, letter=letter
),
)
|
quantumlib/Cirq
|
cirq-core/cirq/interop/quirk/cells/input_cells.py
|
Python
|
apache-2.0
| 2,927
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from util.typedispatch import *
from . import graph as cfg
from . dfs import CFGDFS
# Kills unreachable CFG nodes
class Logger(TypeDispatcher):
def __init__(self):
self.merges = []
@defaultdispatch
def default(self, node):
pass
@dispatch(cfg.MultiEntryBlock)
def visitMerge(self, node):
self.merges.append(node)
def evaluate(compiler, g):
logger = Logger()
dfs = CFGDFS(post=logger)
dfs.process(g.entryTerminal)
def live(node):
return node in dfs.processed
for merge in logger.merges:
for prev in merge._prev:
assert isinstance(prev, tuple), merge._prev
# HACK exposes the internals of merge
filtered = [prev for prev in merge._prev if live(prev[0])]
merge._prev = filtered
|
ncbray/pystream
|
bin/analysis/cfg/gc.py
|
Python
|
apache-2.0
| 1,289
|
'''
Kurgan AI Web Application Security Analyzer.
http://www.kurgan.com.br/
Author: Glaudson Ocampos - <glaudson@vortexai.com.br>
Created in May, 11th 2016.
'''
import requests
import config
import sys
import warnings
import validators
sys.path.append('../')
class Target(object):
host = None
method = ''
headers = {}
uri = ''
webserver = ''
scheme = ''
port = ''
path = ''
url = ''
baseUrl = ''
def __init__(self):
method = 'GET'
headers = {'User-Agent':'Kurgan-AI/0.0.1'}
uri = '/'
scheme = 'http'
def set_host(self, val):
self.host = val
def get_host(self):
return self.host
def set_port(self, val):
self.port = val
def get_port(self):
return self.port
def set_method(self, val):
self.method = val
def get_method(self):
return self.method
def set_headers(self,val):
self.headers = val
def get_headers(self):
return self.headers
def set_webserver(self, val):
self.webserver = val
def get_webserver(self):
return self.webserver
def set_scheme(self, val):
self.scheme = val
def get_scheme(self):
return self.scheme
def set_path(self,val):
self.path = val
def get_path(self):
return self.path
def set_url(self, val):
self.url = val
def get_url(self):
return self.url
def set_baseUrl(self, val):
self.baseUrl = val
def get_baseUrl(self):
return self.baseUrl
def send_request(self):
warnings.filterwarnings('ignore')
url = self.scheme+'://'+self.host+':'+str(self.port)+"/"+str(self.path)
if config.FOLLOW_URL is True:
r = requests.get(url, allow_redirects=True, verify=False)
else:
r = requests.get(url, allow_redirects=False, verify=False)
return r
def send_request_head(self, v_url):
warnings.filterwarnings('ignore')
if config.FOLLOW_URL is True:
r = requests.request('HEAD',v_url, allow_redirects=True, verify=False)
else:
r = requests.request('HEAD',v_url, allow_redirects=False, verify=False)
return r
def get_options(self):
url = self.scheme+'://'+self.host+':'+str(self.port)+"/"+str(self.path)
r = requests.options(url)
if r.status_code == 200:
if 'allow' in r.headers:
return r.headers['allow']
else:
return None
|
glaudsonml/kurgan-ai
|
libs/Target.py
|
Python
|
apache-2.0
| 2,614
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_ami_find
version_added: 2.0
short_description: Searches for AMIs to obtain the AMI ID and other information
description:
- Returns list of matching AMIs with AMI ID, along with other useful information
- Can search AMIs with different owners
- Can search by matching tag(s), by AMI name and/or other criteria
- Results can be sorted and sliced
author: "Tom Bamford (@tombamford)"
notes:
- This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release.
options:
region:
description:
- The AWS region to use.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
owner:
description:
- Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched.
- You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
required: false
default: null
ami_id:
description:
- An AMI ID to match.
default: null
required: false
ami_tags:
description:
- A hash/dictionary of tags to match for the AMI.
default: null
required: false
architecture:
description:
- An architecture type to match (e.g. x86_64).
default: null
required: false
hypervisor:
description:
- A hypervisor type type to match (e.g. xen).
default: null
required: false
is_public:
description:
- Whether or not the image(s) are public.
choices: ['yes', 'no']
default: null
required: false
name:
description:
- An AMI name to match.
default: null
required: false
platform:
description:
- Platform type to match.
default: null
required: false
sort:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
choices: ['name', 'description', 'tag']
default: null
required: false
sort_tag:
description:
- Tag name with which to sort results.
- Required when specifying 'sort=tag'.
default: null
required: false
sort_order:
description:
- Order in which to sort results.
- Only used when the 'sort' parameter is specified.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
sort_start:
description:
- Which result to start with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
sort_end:
description:
- Which result to end with (when sorting).
- Corresponds to Python slice notation.
default: null
required: false
state:
description:
- AMI state to match.
default: 'available'
required: false
virtualization_type:
description:
- Virtualization type to match (e.g. hvm).
default: null
required: false
no_result_action:
description:
- What to do when no results are found.
- "'success' reports success and returns an empty array"
- "'fail' causes the module to report failure"
choices: ['success', 'fail']
default: 'success'
required: false
requirements:
- "python >= 2.6"
- boto
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the AMI tagged "project:website"
- ec2_ami_find:
owner: self
ami_tags:
project: website
no_result_action: fail
register: ami_find
# Search for the latest Ubuntu 14.04 AMI
- ec2_ami_find:
name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
owner: 099720109477
sort: name
sort_order: descending
sort_end: 1
register: ami_find
# Launch an EC2 instance
- ec2:
image: "{{ ami_find.results[0].ami_id }}"
instance_type: m3.medium
key_name: mykey
wait: yes
'''
try:
import boto.ec2
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
import json
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region = dict(required=True,
aliases = ['aws_region', 'ec2_region']),
owner = dict(required=False, default=None),
ami_id = dict(required=False),
ami_tags = dict(required=False, type='dict',
aliases = ['search_tags', 'image_tags']),
architecture = dict(required=False),
hypervisor = dict(required=False),
is_public = dict(required=False),
name = dict(required=False),
platform = dict(required=False),
sort = dict(required=False, default=None,
choices=['name', 'description', 'tag']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
sort_start = dict(required=False),
sort_end = dict(required=False),
state = dict(required=False, default='available'),
virtualization_type = dict(required=False),
no_result_action = dict(required=False, default='success',
choices = ['success', 'fail']),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module, install via pip or your package manager')
ami_id = module.params.get('ami_id')
ami_tags = module.params.get('ami_tags')
architecture = module.params.get('architecture')
hypervisor = module.params.get('hypervisor')
is_public = module.params.get('is_public')
name = module.params.get('name')
owner = module.params.get('owner')
platform = module.params.get('platform')
sort = module.params.get('sort')
sort_tag = module.params.get('sort_tag')
sort_order = module.params.get('sort_order')
sort_start = module.params.get('sort_start')
sort_end = module.params.get('sort_end')
state = module.params.get('state')
virtualization_type = module.params.get('virtualization_type')
no_result_action = module.params.get('no_result_action')
filter = {'state': state}
if ami_id:
filter['image_id'] = ami_id
if ami_tags:
for tag in ami_tags:
filter['tag:'+tag] = ami_tags[tag]
if architecture:
filter['architecture'] = architecture
if hypervisor:
filter['hypervisor'] = hypervisor
if is_public:
filter['is_public'] = is_public
if name:
filter['name'] = name
if platform:
filter['platform'] = platform
if virtualization_type:
filter['virtualization_type'] = virtualization_type
ec2 = ec2_connect(module)
images_result = ec2.get_all_images(owners=owner, filters=filter)
if no_result_action == 'fail' and len(images_result) == 0:
module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
results = []
for image in images_result:
data = {
'ami_id': image.id,
'architecture': image.architecture,
'description': image.description,
'is_public': image.is_public,
'name': image.name,
'owner_id': image.owner_id,
'platform': image.platform,
'root_device_name': image.root_device_name,
'root_device_type': image.root_device_type,
'state': image.state,
'tags': image.tags,
'virtualization_type': image.virtualization_type,
}
if image.kernel_id:
data['kernel_id'] = image.kernel_id
if image.ramdisk_id:
data['ramdisk_id'] = image.ramdisk_id
results.append(data)
if sort == 'tag':
if not sort_tag:
module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
elif sort:
results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
try:
if sort and sort_start and sort_end:
results = results[int(sort_start):int(sort_end)]
elif sort and sort_start:
results = results[int(sort_start):]
elif sort and sort_end:
results = results[:int(sort_end)]
except TypeError:
module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
module.exit_json(results=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
NitorCreations/aws-utils
|
opt/nitor/library/ec2_ami_find.py
|
Python
|
apache-2.0
| 9,762
|
from typing import Union
from ray.rllib.models.action_dist import ActionDistribution
from ray.rllib.utils.annotations import override
from ray.rllib.utils.exploration.exploration import TensorType
from ray.rllib.utils.exploration.soft_q import SoftQ
from ray.rllib.utils.framework import try_import_tf, try_import_torch
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class SlateSoftQ(SoftQ):
@override(SoftQ)
def get_exploration_action(
self,
action_distribution: ActionDistribution,
timestep: Union[int, TensorType],
explore: bool = True,
):
assert (
self.framework == "torch"
), "ERROR: SlateSoftQ only supports torch so far!"
cls = type(action_distribution)
# Re-create the action distribution with the correct temperature
# applied.
action_distribution = cls(
action_distribution.inputs, self.model, temperature=self.temperature
)
batch_size = action_distribution.inputs.size()[0]
action_logp = torch.zeros(batch_size, dtype=torch.float)
self.last_timestep = timestep
# Explore.
if explore:
# Return stochastic sample over (q-value) logits.
action = action_distribution.sample()
# Return the deterministic "sample" (argmax) over (q-value) logits.
else:
action = action_distribution.deterministic_sample()
return action, action_logp
|
ray-project/ray
|
rllib/utils/exploration/slate_soft_q.py
|
Python
|
apache-2.0
| 1,483
|
import tweepy
import os
from flask import Flask, make_response, jsonify
CONSUMER_KEY = os.environ['CATCHMEMEALL_TWITTER_CONSUMER_TOKEN']
CONSUMER_SECRET = os.environ['CATCHMEMEALL_TWITTER_CONSUMER_SECRET']
app = Flask(__name__)
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
api = tweepy.API(auth)
@app.route('/twitter/avatar/<username>', methods=['GET'])
def get_user_avatar(username):
try:
user = api.get_user(username)
except tweepy.TweepError:
return make_response(jsonify({'error': 'no username %s' % (username)}), 404)
else:
json_data = {'avatar': user.profile_image_url}
return make_response(jsonify(json_data), 200)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8888, debug=True)
|
mapado/CatchMemeAll
|
twitter_server.py
|
Python
|
apache-2.0
| 763
|
from guardian.shortcuts import get_perms
from rest_framework import serializers as ser
from rest_framework.exceptions import ValidationError
from reviews.workflow import Workflows
from api.actions.serializers import ReviewableCountsRelationshipField
from api.base.utils import absolute_reverse, get_user_auth
from api.base.serializers import JSONAPISerializer, LinksField, RelationshipField, ShowIfVersion
class PreprintProviderSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'allow_submissions',
'description',
'domain',
'domain_redirect_enabled',
'id',
'name',
'share_publish_type',
'reviews_workflow',
'permissions',
])
name = ser.CharField(read_only=True)
description = ser.CharField(read_only=True)
id = ser.CharField(read_only=True, max_length=200, source='_id')
advisory_board = ser.CharField(read_only=True)
example = ser.CharField(read_only=True, allow_null=True)
domain = ser.CharField(read_only=True, allow_null=False)
domain_redirect_enabled = ser.BooleanField(read_only=True)
footer_links = ser.CharField(read_only=True)
share_source = ser.CharField(read_only=True)
share_publish_type = ser.CharField(read_only=True)
email_support = ser.CharField(read_only=True, allow_null=True)
preprint_word = ser.CharField(read_only=True, allow_null=True)
allow_submissions = ser.BooleanField(read_only=True)
additional_providers = ser.ListField(read_only=True, child=ser.CharField())
# Reviews settings are the only writable fields
reviews_workflow = ser.ChoiceField(choices=Workflows.choices())
reviews_comments_private = ser.BooleanField()
reviews_comments_anonymous = ser.BooleanField()
permissions = ser.SerializerMethodField()
preprints = ReviewableCountsRelationshipField(
related_view='preprint_providers:preprints-list',
related_view_kwargs={'provider_id': '<_id>'}
)
taxonomies = RelationshipField(
related_view='preprint_providers:taxonomy-list',
related_view_kwargs={'provider_id': '<_id>'}
)
highlighted_taxonomies = RelationshipField(
related_view='preprint_providers:highlighted-taxonomy-list',
related_view_kwargs={'provider_id': '<_id>'}
)
licenses_acceptable = RelationshipField(
related_view='preprint_providers:license-list',
related_view_kwargs={'provider_id': '<_id>'}
)
links = LinksField({
'self': 'get_absolute_url',
'preprints': 'get_preprints_url',
'external_url': 'get_external_url'
})
# Deprecated fields
header_text = ShowIfVersion(
ser.CharField(read_only=True, default=''),
min_version='2.0', max_version='2.3'
)
banner_path = ShowIfVersion(
ser.CharField(read_only=True, default=''),
min_version='2.0', max_version='2.3'
)
logo_path = ShowIfVersion(
ser.CharField(read_only=True, default=''),
min_version='2.0', max_version='2.3'
)
email_contact = ShowIfVersion(
ser.CharField(read_only=True, allow_null=True),
min_version='2.0', max_version='2.3'
)
social_twitter = ShowIfVersion(
ser.CharField(read_only=True, allow_null=True),
min_version='2.0', max_version='2.3'
)
social_facebook = ShowIfVersion(
ser.CharField(read_only=True, allow_null=True),
min_version='2.0', max_version='2.3'
)
social_instagram = ShowIfVersion(
ser.CharField(read_only=True, allow_null=True),
min_version='2.0', max_version='2.3'
)
subjects_acceptable = ShowIfVersion(
ser.ListField(read_only=True, default=[]),
min_version='2.0', max_version='2.4'
)
class Meta:
type_ = 'preprint_providers'
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
def get_preprints_url(self, obj):
return absolute_reverse('preprint_providers:preprints-list', kwargs={
'provider_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_external_url(self, obj):
return obj.external_url
def get_permissions(self, obj):
auth = get_user_auth(self.context['request'])
if not auth.user:
return []
return get_perms(auth.user, obj)
def validate(self, data):
required_fields = ('reviews_workflow', 'reviews_comments_private', 'reviews_comments_anonymous')
for field in required_fields:
if data.get(field) is None:
raise ValidationError('All reviews fields must be set at once: `{}`'.format('`, `'.join(required_fields)))
return data
def update(self, instance, validated_data):
instance.reviews_workflow = validated_data['reviews_workflow']
instance.reviews_comments_private = validated_data['reviews_comments_private']
instance.reviews_comments_anonymous = validated_data['reviews_comments_anonymous']
instance.save()
return instance
|
aaxelb/osf.io
|
api/preprint_providers/serializers.py
|
Python
|
apache-2.0
| 5,102
|
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI org proposal processing mapreduce."""
from mapreduce import context
from mapreduce import operation
from melange.request import links
from soc.logic import org_app as org_app_logic
# MapReduce requires import of processed model classes.
# pylint: disable=unused-import
from soc.models.site import Site
from soc.models.org_app_survey import OrgAppSurvey
from soc.modules.gci.models.program import GCIProgram
from soc.modules.gci.views.helper import url_names as gci_url_names
# pylint: enable=unused-import
class MapreduceRequestData(object):
"""Simple class to use for convenience with RequestData object"""
def __init__(self, program, site):
self.program = program
self.site = site
def process(org_app):
ctx = context.get()
params = ctx.mapreduce_spec.mapper.params
program_type = params['program_type']
program_key_str = params['program_key']
# now the script is used only for GCI
if program_type != 'gci':
return
program = GCIProgram.get_by_key_name(program_key_str)
survey_query = OrgAppSurvey.all(keys_only=True).filter('program', program)
survey_key = survey_query.get()
# We can skip the survey records not belonging to the given program.
if org_app.survey.key() != survey_key:
return
# TODO(daniel): create a MapReduce/Task RequestData
data = MapreduceRequestData(program, Site.get_by_key_name('site'))
absolute_url = links.ABSOLUTE_LINKER.program(
program, gci_url_names.CREATE_GCI_ORG_PROFILE)
if org_app.status == 'pre-accepted':
org_app_logic.setStatus(data, org_app, 'accepted', absolute_url)
yield operation.counters.Increment("proposals_accepted")
elif org_app.status == 'pre-rejected':
org_app_logic.setStatus(data, org_app, 'rejected', absolute_url)
yield operation.counters.Increment("proposals_rejected")
else:
yield operation.counters.Increment("proposals_ignored")
|
rhyolight/nupic.son
|
app/soc/mapreduce/process_org_apps.py
|
Python
|
apache-2.0
| 2,473
|
from __future__ import absolute_import
from typing import Any
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib.auth import authenticate, login, get_backends
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponseForbidden, HttpResponse
from django.shortcuts import redirect
from django.template import RequestContext, loader
from django.utils.timezone import now
from django.utils.cache import patch_cache_control
from django.core.exceptions import ValidationError
from django.core import validators
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.forms.models import model_to_dict
from django.core.mail import send_mail
from django.middleware.csrf import get_token
from zerver.models import Message, UserProfile, Stream, Subscription, Huddle, \
Recipient, Realm, UserMessage, DefaultStream, RealmEmoji, RealmAlias, \
RealmFilter, \
PreregistrationUser, get_client, MitUser, UserActivity, PushDeviceToken, \
get_stream, UserPresence, get_recipient, \
split_email_to_domain, resolve_email_to_domain, email_to_username, get_realm, \
completely_open, get_unique_open_realm, remote_user_to_email, email_allowed_for_realm
from zerver.lib.actions import do_change_password, do_change_full_name, do_change_is_admin, \
do_activate_user, do_create_user, \
internal_send_message, update_user_presence, do_events_register, \
get_status_dict, do_change_enable_offline_email_notifications, \
do_change_enable_digest_emails, do_set_realm_name, do_set_realm_restricted_to_domain, \
do_set_realm_invite_required, do_set_realm_invite_by_admins_only, \
do_set_realm_create_stream_by_admins_only, get_default_subs, \
user_email_is_unique, do_invite_users, do_refer_friend, compute_mit_user_fullname, \
do_set_muted_topics, clear_followup_emails_queue, do_update_pointer, realm_user_count
from zerver.lib.push_notifications import num_push_devices_for_user
from zerver.forms import RegistrationForm, HomepageForm, ToSForm, \
CreateUserForm, is_inactive, OurAuthenticationForm
from django.views.decorators.csrf import csrf_exempt
from django_auth_ldap.backend import LDAPBackend, _LDAPUser
from zerver.lib import bugdown
from zerver.lib.validator import check_string, check_list, check_bool
from zerver.decorator import require_post, authenticated_json_post_view, \
has_request_variables, authenticated_json_view, to_non_negative_int, \
JsonableError, get_user_profile_by_email, REQ, require_realm_admin, \
zulip_login_required
from zerver.lib.avatar import avatar_url
from zerver.lib.upload import upload_message_image_through_web_client, \
get_signed_upload_url, get_realm_for_filename
from zerver.lib.response import json_success, json_error
from zerver.lib.utils import statsd, generate_random_token
from zproject.backends import password_auth_enabled, dev_auth_enabled
from confirmation.models import Confirmation
import requests
import subprocess
import calendar
import datetime
import ujson
import simplejson
import re
from six.moves import urllib
import base64
import time
import logging
import jwt
import hashlib
import hmac
from zproject.jinja2 import render_to_response
from zerver.lib.rest import rest_dispatch as _rest_dispatch
rest_dispatch = csrf_exempt((lambda request, *args, **kwargs: _rest_dispatch(request, globals(), *args, **kwargs)))
def name_changes_disabled(realm):
return settings.NAME_CHANGES_DISABLED or realm.name_changes_disabled
@require_post
def accounts_register(request):
key = request.POST['key']
confirmation = Confirmation.objects.get(confirmation_key=key)
prereg_user = confirmation.content_object
email = prereg_user.email
mit_beta_user = isinstance(confirmation.content_object, MitUser)
try:
existing_user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
existing_user_profile = None
validators.validate_email(email)
unique_open_realm = get_unique_open_realm()
if unique_open_realm:
realm = unique_open_realm
domain = realm.domain
elif not mit_beta_user and prereg_user.referred_by:
# If someone invited you, you are joining their realm regardless
# of your e-mail address.
#
# MitUsers can't be referred and don't have a referred_by field.
realm = prereg_user.referred_by.realm
domain = realm.domain
if not email_allowed_for_realm(email, realm):
return render_to_response("zerver/closed_realm.html", {"closed_domain_name": realm.name})
elif not mit_beta_user and prereg_user.realm:
# You have a realm set, even though nobody referred you. This
# happens if you sign up through a special URL for an open
# realm.
domain = prereg_user.realm.domain
realm = get_realm(domain)
else:
domain = resolve_email_to_domain(email)
realm = get_realm(domain)
if realm and realm.deactivated:
# The user is trying to register for a deactivated realm. Advise them to
# contact support.
return render_to_response("zerver/deactivated.html",
{"deactivated_domain_name": realm.name,
"zulip_administrator": settings.ZULIP_ADMINISTRATOR})
try:
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
# Mirror dummy users to be activated must be inactive
is_inactive(email)
else:
# Other users should not already exist at all.
user_email_is_unique(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.parse.quote_plus(email))
name_validated = False
full_name = None
if request.POST.get('from_confirmation'):
try:
del request.session['authenticated_full_name']
except KeyError:
pass
if domain == "mit.edu":
hesiod_name = compute_mit_user_fullname(email)
form = RegistrationForm(
initial={'full_name': hesiod_name if "@" not in hesiod_name else ""})
name_validated = True
elif settings.POPULATE_PROFILE_VIA_LDAP:
for backend in get_backends():
if isinstance(backend, LDAPBackend):
ldap_attrs = _LDAPUser(backend, backend.django_to_ldap_username(email)).attrs
try:
request.session['authenticated_full_name'] = ldap_attrs[settings.AUTH_LDAP_USER_ATTR_MAP['full_name']][0]
name_validated = True
# We don't use initial= here, because if the form is
# complete (that is, no additional fields need to be
# filled out by the user) we want the form to validate,
# so they can be directly registered without having to
# go through this interstitial.
form = RegistrationForm(
{'full_name': request.session['authenticated_full_name']})
# FIXME: This will result in the user getting
# validation errors if they have to enter a password.
# Not relevant for ONLY_SSO, though.
break
except TypeError:
# Let the user fill out a name and/or try another backend
form = RegistrationForm()
elif 'full_name' in request.POST:
form = RegistrationForm(
initial={'full_name': request.POST.get('full_name')}
)
else:
form = RegistrationForm()
else:
postdata = request.POST.copy()
if name_changes_disabled(realm):
# If we populate profile information via LDAP and we have a
# verified name from you on file, use that. Otherwise, fall
# back to the full name in the request.
try:
postdata.update({'full_name': request.session['authenticated_full_name']})
name_validated = True
except KeyError:
pass
form = RegistrationForm(postdata)
if not password_auth_enabled(realm):
form['password'].field.required = False
if form.is_valid():
if password_auth_enabled(realm):
password = form.cleaned_data['password']
else:
# SSO users don't need no passwords
password = None
full_name = form.cleaned_data['full_name']
short_name = email_to_username(email)
first_in_realm = len(UserProfile.objects.filter(realm=realm, is_bot=False)) == 0
# FIXME: sanitize email addresses and fullname
if existing_user_profile is not None and existing_user_profile.is_mirror_dummy:
try:
user_profile = existing_user_profile
do_activate_user(user_profile)
do_change_password(user_profile, password)
do_change_full_name(user_profile, full_name)
except UserProfile.DoesNotExist:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
else:
user_profile = do_create_user(email, password, realm, full_name, short_name,
prereg_user=prereg_user,
newsletter_data={"IP": request.META['REMOTE_ADDR']})
# This logs you in using the ZulipDummyBackend, since honestly nothing
# more fancy than this is required.
login(request, authenticate(username=user_profile.email, use_dummy_backend=True))
if first_in_realm:
do_change_is_admin(user_profile, True)
return HttpResponseRedirect(reverse('zerver.views.initial_invite_page'))
else:
return HttpResponseRedirect(reverse('zerver.views.home'))
return render_to_response('zerver/register.html',
{'form': form,
'company_name': domain,
'email': email,
'key': key,
'full_name': request.session.get('authenticated_full_name', None),
'lock_name': name_validated and name_changes_disabled(realm),
# password_auth_enabled is normally set via our context processor,
# but for the registration form, there is no logged in user yet, so
# we have to set it here.
'password_auth_enabled': password_auth_enabled(realm),
},
request=request)
@zulip_login_required
def accounts_accept_terms(request):
email = request.user.email
domain = resolve_email_to_domain(email)
if request.method == "POST":
form = ToSForm(request.POST)
if form.is_valid():
full_name = form.cleaned_data['full_name']
send_mail('Terms acceptance for ' + full_name,
loader.render_to_string('zerver/tos_accept_body.txt',
{'name': full_name,
'email': email,
'ip': request.META['REMOTE_ADDR'],
'browser': request.META.get('HTTP_USER_AGENT', "Unspecified")}),
settings.EMAIL_HOST_USER,
["all@zulip.com"])
do_change_full_name(request.user, full_name)
return redirect(home)
else:
form = ToSForm()
return render_to_response('zerver/accounts_accept_terms.html',
{ 'form': form, 'company_name': domain, 'email': email },
request=request)
from zerver.lib.ccache import make_ccache
@authenticated_json_view
@has_request_variables
def webathena_kerberos_login(request, user_profile,
cred=REQ(default=None)):
if cred is None:
return json_error(_("Could not find Kerberos credential"))
if not user_profile.realm.domain == "mit.edu":
return json_error(_("Webathena login only for mit.edu realm"))
try:
parsed_cred = ujson.loads(cred)
user = parsed_cred["cname"]["nameString"][0]
if user == "golem":
# Hack for an mit.edu user whose Kerberos username doesn't
# match what he zephyrs as
user = "ctl"
assert(user == user_profile.email.split("@")[0])
ccache = make_ccache(parsed_cred)
except Exception:
return json_error(_("Invalid Kerberos cache"))
# TODO: Send these data via (say) rabbitmq
try:
subprocess.check_call(["ssh", "zulip@zmirror2.zulip.net", "--",
"/home/zulip/zulip/bots/process_ccache",
user,
user_profile.api_key,
base64.b64encode(ccache)])
except Exception:
logging.exception("Error updating the user's ccache")
return json_error(_("We were unable to setup mirroring for you"))
return json_success()
def api_endpoint_docs(request):
raw_calls = open('templates/zerver/api_content.json', 'r').read()
calls = ujson.loads(raw_calls)
langs = set()
for call in calls:
call["endpoint"] = "%s/v1/%s" % (settings.EXTERNAL_API_URI, call["endpoint"])
call["example_request"]["curl"] = call["example_request"]["curl"].replace("https://api.zulip.com", settings.EXTERNAL_API_URI)
response = call['example_response']
if '\n' not in response:
# For 1-line responses, pretty-print them
extended_response = response.replace(", ", ",\n ")
else:
extended_response = response
call['rendered_response'] = bugdown.convert("~~~ .py\n" + extended_response + "\n~~~\n", "default")
for example_type in ('request', 'response'):
for lang in call.get('example_' + example_type, []):
langs.add(lang)
return render_to_response(
'zerver/api_endpoints.html', {
'content': calls,
'langs': langs,
},
request=request)
@authenticated_json_post_view
@has_request_variables
def json_invite_users(request, user_profile, invitee_emails=REQ()):
if not invitee_emails:
return json_error(_("You must specify at least one email address."))
invitee_emails = set(re.split(r'[, \n]', invitee_emails))
stream_names = request.POST.getlist('stream')
if not stream_names:
return json_error(_("You must specify at least one stream for invitees to join."))
# We unconditionally sub you to the notifications stream if it
# exists and is public.
notifications_stream = user_profile.realm.notifications_stream
if notifications_stream and not notifications_stream.invite_only:
stream_names.append(notifications_stream.name)
streams = []
for stream_name in stream_names:
stream = get_stream(stream_name, user_profile.realm)
if stream is None:
return json_error(_("Stream does not exist: %s. No invites were sent.") % (stream_name,))
streams.append(stream)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
return json_success()
def create_homepage_form(request, user_info=None):
if user_info:
return HomepageForm(user_info, domain=request.session.get("domain"))
# An empty fields dict is not treated the same way as not
# providing it.
return HomepageForm(domain=request.session.get("domain"))
def maybe_send_to_registration(request, email, full_name=''):
form = create_homepage_form(request, user_info={'email': email})
request.verified_email = None
if form.is_valid():
# Construct a PreregistrationUser object and send the user over to
# the confirmation view.
prereg_user = None
if settings.ONLY_SSO:
try:
prereg_user = PreregistrationUser.objects.filter(email__iexact=email).latest("invited_at")
except PreregistrationUser.DoesNotExist:
prereg_user = create_preregistration_user(email, request)
else:
prereg_user = create_preregistration_user(email, request)
return redirect("".join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
"/",
# Split this so we only get the part after the /
Confirmation.objects.get_link_for_object(prereg_user).split("/", 3)[3],
'?full_name=',
# urllib does not handle Unicode, so coerece to encoded byte string
# Explanation: http://stackoverflow.com/a/5605354/90777
urllib.parse.quote_plus(full_name.encode('utf8')))))
else:
return render_to_response('zerver/accounts_home.html', {'form': form},
request=request)
def login_or_register_remote_user(request, remote_username, user_profile, full_name=''):
if user_profile is None or user_profile.is_mirror_dummy:
# Since execution has reached here, the client specified a remote user
# but no associated user account exists. Send them over to the
# PreregistrationUser flow.
return maybe_send_to_registration(request, remote_user_to_email(remote_username), full_name)
else:
login(request, user_profile)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
def remote_user_sso(request):
try:
remote_user = request.META["REMOTE_USER"]
except KeyError:
raise JsonableError(_("No REMOTE_USER set."))
user_profile = authenticate(remote_user=remote_user)
return login_or_register_remote_user(request, remote_user, user_profile)
@csrf_exempt
def remote_user_jwt(request):
try:
json_web_token = request.POST["json_web_token"]
payload, signing_input, header, signature = jwt.load(json_web_token)
except KeyError:
raise JsonableError(_("No JSON web token passed in request"))
except jwt.DecodeError:
raise JsonableError(_("Bad JSON web token"))
remote_user = payload.get("user", None)
if remote_user is None:
raise JsonableError(_("No user specified in JSON web token claims"))
domain = payload.get('realm', None)
if domain is None:
raise JsonableError(_("No domain specified in JSON web token claims"))
email = "%s@%s" % (remote_user, domain)
try:
jwt.verify_signature(payload, signing_input, header, signature,
settings.JWT_AUTH_KEYS[domain])
# We do all the authentication we need here (otherwise we'd have to
# duplicate work), but we need to call authenticate with some backend so
# that the request.backend attribute gets set.
user_profile = authenticate(username=email, use_dummy_backend=True)
except (jwt.DecodeError, jwt.ExpiredSignature):
raise JsonableError(_("Bad JSON web token signature"))
except KeyError:
raise JsonableError(_("Realm not authorized for JWT login"))
except UserProfile.DoesNotExist:
user_profile = None
return login_or_register_remote_user(request, email, user_profile, remote_user)
def google_oauth2_csrf(request, value):
return hmac.new(get_token(request).encode('utf-8'), value, hashlib.sha256).hexdigest()
def start_google_oauth2(request):
uri = 'https://accounts.google.com/o/oauth2/auth?'
cur_time = str(int(time.time()))
csrf_state = '{}:{}'.format(
cur_time,
google_oauth2_csrf(request, cur_time),
)
prams = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'scope': 'profile email',
'state': csrf_state,
}
return redirect(uri + urllib.parse.urlencode(prams))
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp):
if requests_json_is_function:
return resp.json()
else:
return resp.json
def finish_google_oauth2(request):
error = request.GET.get('error')
if error == 'access_denied':
return redirect('/')
elif error is not None:
logging.warning('Error from google oauth2 login %r', request.GET)
return HttpResponse(status=400)
value, hmac_value = request.GET.get('state').split(':')
if hmac_value != google_oauth2_csrf(request, value):
logging.warning('Google oauth2 CSRF error')
return HttpResponse(status=400)
resp = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
'redirect_uri': ''.join((
settings.EXTERNAL_URI_SCHEME,
request.get_host(),
reverse('zerver.views.finish_google_oauth2'),
)),
'grant_type': 'authorization_code',
},
)
if resp.status_code == 400:
logging.warning('User error converting Google oauth2 login to token: %r' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
raise Exception('Could not convert google oauth2 code to access_token\r%r' % (resp.text,))
access_token = extract_json_response(resp)['access_token']
resp = requests.get(
'https://www.googleapis.com/plus/v1/people/me',
params={'access_token': access_token}
)
if resp.status_code == 400:
logging.warning('Google login failed making info API call: %r' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
raise Exception('Google login failed making API call\r%r' % (resp.text,))
body = extract_json_response(resp)
try:
full_name = body['name']['formatted']
except KeyError:
# Only google+ users have a formated name. I am ignoring i18n here.
full_name = u'{} {}'.format(
body['name']['givenName'], body['name']['familyName']
)
for email in body['emails']:
if email['type'] == 'account':
break
else:
raise Exception('Google oauth2 account email not found %r' % (body,))
email_address = email['value']
user_profile = authenticate(username=email_address, use_dummy_backend=True)
return login_or_register_remote_user(request, email_address, user_profile, full_name)
def login_page(request, **kwargs):
extra_context = kwargs.pop('extra_context', {})
if dev_auth_enabled():
# Development environments usually have only a few users, but
# it still makes sense to limit how many users we render to
# support performance testing with DevAuthBackend.
MAX_DEV_BACKEND_USERS = 100
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True)
users = users_query.order_by('email')[0:MAX_DEV_BACKEND_USERS]
extra_context['direct_admins'] = [u.email for u in users if u.is_realm_admin]
extra_context['direct_users'] = [u.email for u in users if not u.is_realm_admin]
template_response = django_login_page(
request, authentication_form=OurAuthenticationForm,
extra_context=extra_context, **kwargs)
try:
template_response.context_data['email'] = request.GET['email']
except KeyError:
pass
return template_response
def dev_direct_login(request, **kwargs):
# This function allows logging in without a password and should only be called in development environments.
# It may be called if the DevAuthBackend is included in settings.AUTHENTICATION_BACKENDS
if (not dev_auth_enabled()) or settings.PRODUCTION:
# This check is probably not required, since authenticate would fail without an enabled DevAuthBackend.
raise Exception('Direct login not supported.')
email = request.POST['direct_email']
user_profile = authenticate(username=email)
if user_profile is None:
raise Exception("User cannot login")
login(request, user_profile)
return HttpResponseRedirect("%s%s" % (settings.EXTERNAL_URI_SCHEME,
request.get_host()))
@authenticated_json_post_view
@has_request_variables
def json_bulk_invite_users(request, user_profile,
invitee_emails=REQ(validator=check_list(check_string))):
invitee_emails = set(invitee_emails)
streams = get_default_subs(user_profile)
ret_error, error_data = do_invite_users(user_profile, invitee_emails, streams)
if ret_error is not None:
return json_error(data=error_data, msg=ret_error)
else:
# Report bulk invites to internal Zulip.
invited = PreregistrationUser.objects.filter(referred_by=user_profile)
internal_message = "%s <`%s`> invited %d people to Zulip." % (
user_profile.full_name, user_profile.email, invited.count())
internal_send_message(settings.NEW_USER_BOT, "stream", "signups",
user_profile.realm.domain, internal_message)
return json_success()
@zulip_login_required
def initial_invite_page(request):
user = request.user
# Only show the bulk-invite page for the first user in a realm
domain_count = len(UserProfile.objects.filter(realm=user.realm))
if domain_count > 1:
return redirect('zerver.views.home')
params = {'company_name': user.realm.domain}
if (user.realm.restricted_to_domain):
params['invite_suffix'] = user.realm.domain
return render_to_response('zerver/initial_invite_page.html', params,
request=request)
@require_post
def logout_then_login(request, **kwargs):
return django_logout_then_login(request, kwargs)
def create_preregistration_user(email, request):
domain = request.session.get("domain")
if completely_open(domain):
# Clear the "domain" from the session object; it's no longer needed
request.session["domain"] = None
# The user is trying to sign up for a completely open realm,
# so create them a PreregistrationUser for that realm
return PreregistrationUser.objects.create(email=email,
realm=get_realm(domain))
# MIT users who are not explicitly signing up for an open realm
# require special handling (They may already have an (inactive)
# account, for example)
if split_email_to_domain(email) == "mit.edu":
return MitUser.objects.get_or_create(email=email)[0]
return PreregistrationUser.objects.create(email=email)
def accounts_home_with_domain(request, domain):
if completely_open(domain):
# You can sign up for a completely open realm through a
# special registration path that contains the domain in the
# URL. We store this information in the session rather than
# elsewhere because we don't have control over URL or form
# data for folks registering through OpenID.
request.session["domain"] = domain
return accounts_home(request)
else:
return HttpResponseRedirect(reverse('zerver.views.accounts_home'))
def send_registration_completion_email(email, request):
"""
Send an email with a confirmation link to the provided e-mail so the user
can complete their registration.
"""
prereg_user = create_preregistration_user(email, request)
context = {'support_email': settings.ZULIP_ADMINISTRATOR,
'voyager': settings.VOYAGER}
Confirmation.objects.send_confirmation(prereg_user, email,
additional_context=context)
def accounts_home(request):
if request.method == 'POST':
form = create_homepage_form(request, user_info=request.POST)
if form.is_valid():
email = form.cleaned_data['email']
send_registration_completion_email(email, request)
return HttpResponseRedirect(reverse('send_confirm', kwargs={'email': email}))
try:
email = request.POST['email']
# Note: We don't check for uniqueness
is_inactive(email)
except ValidationError:
return HttpResponseRedirect(reverse('django.contrib.auth.views.login') + '?email=' + urllib.parse.quote_plus(email))
else:
form = create_homepage_form(request)
return render_to_response('zerver/accounts_home.html',
{'form': form, 'current_url': request.get_full_path},
request=request)
def approximate_unread_count(user_profile):
not_in_home_view_recipients = [sub.recipient.id for sub in \
Subscription.objects.filter(
user_profile=user_profile, in_home_view=False)]
muted_topics = ujson.loads(user_profile.muted_topics)
# If muted_topics is empty, it looks like []. If it is non-empty, it look
# like [[u'devel', u'test']]. We should switch to a consistent envelope, but
# until we do we still have both in the database.
if muted_topics:
muted_topics = muted_topics[0]
return UserMessage.objects.filter(
user_profile=user_profile, message_id__gt=user_profile.pointer).exclude(
message__recipient__type=Recipient.STREAM,
message__recipient__id__in=not_in_home_view_recipients).exclude(
message__subject__in=muted_topics).exclude(
flags=UserMessage.flags.read).count()
def sent_time_in_epoch_seconds(user_message):
# user_message is a UserMessage object.
if not user_message:
return None
# We have USE_TZ = True, so our datetime objects are timezone-aware.
# Return the epoch seconds in UTC.
return calendar.timegm(user_message.message.pub_date.utctimetuple())
@zulip_login_required
def home(request):
# We need to modify the session object every two weeks or it will expire.
# This line makes reloading the page a sufficient action to keep the
# session alive.
request.session.modified = True
user_profile = request.user
request._email = request.user.email
request.client = get_client("website")
narrow = [] # type: List[List[str]]
narrow_stream = None
narrow_topic = request.GET.get("topic")
if request.GET.get("stream"):
try:
narrow_stream = get_stream(request.GET.get("stream"), user_profile.realm)
assert(narrow_stream is not None)
assert(narrow_stream.is_public())
narrow = [["stream", narrow_stream.name]]
except Exception:
logging.exception("Narrow parsing")
if narrow_topic is not None:
narrow.append(["topic", narrow_topic])
register_ret = do_events_register(user_profile, request.client,
apply_markdown=True, narrow=narrow)
user_has_messages = (register_ret['max_message_id'] != -1)
# Reset our don't-spam-users-with-email counter since the
# user has since logged in
if not user_profile.last_reminder is None:
user_profile.last_reminder = None
user_profile.save(update_fields=["last_reminder"])
# Brand new users get the tutorial
needs_tutorial = settings.TUTORIAL_ENABLED and \
user_profile.tutorial_status != UserProfile.TUTORIAL_FINISHED
first_in_realm = realm_user_count(user_profile.realm) == 1
# If you are the only person in the realm and you didn't invite
# anyone, we'll continue to encourage you to do so on the frontend.
prompt_for_invites = first_in_realm and \
not PreregistrationUser.objects.filter(referred_by=user_profile).count()
if user_profile.pointer == -1 and user_has_messages:
# Put the new user's pointer at the bottom
#
# This improves performance, because we limit backfilling of messages
# before the pointer. It's also likely that someone joining an
# organization is interested in recent messages more than the very
# first messages on the system.
register_ret['pointer'] = register_ret['max_message_id']
user_profile.last_pointer_updater = request.session.session_key
if user_profile.pointer == -1:
latest_read = None
else:
try:
latest_read = UserMessage.objects.get(user_profile=user_profile,
message__id=user_profile.pointer)
except UserMessage.DoesNotExist:
# Don't completely fail if your saved pointer ID is invalid
logging.warning("%s has invalid pointer %s" % (user_profile.email, user_profile.pointer))
latest_read = None
desktop_notifications_enabled = user_profile.enable_desktop_notifications
if narrow_stream is not None:
desktop_notifications_enabled = False
if user_profile.realm.notifications_stream:
notifications_stream = user_profile.realm.notifications_stream.name
else:
notifications_stream = ""
# Pass parameters to the client-side JavaScript code.
# These end up in a global JavaScript Object named 'page_params'.
page_params = dict(
voyager = settings.VOYAGER,
debug_mode = settings.DEBUG,
test_suite = settings.TEST_SUITE,
poll_timeout = settings.POLL_TIMEOUT,
login_page = settings.HOME_NOT_LOGGED_IN,
maxfilesize = settings.MAX_FILE_UPLOAD_SIZE,
password_auth_enabled = password_auth_enabled(user_profile.realm),
have_initial_messages = user_has_messages,
subbed_info = register_ret['subscriptions'],
unsubbed_info = register_ret['unsubscribed'],
email_dict = register_ret['email_dict'],
people_list = register_ret['realm_users'],
bot_list = register_ret['realm_bots'],
initial_pointer = register_ret['pointer'],
initial_presences = register_ret['presences'],
initial_servertime = time.time(), # Used for calculating relative presence age
fullname = user_profile.full_name,
email = user_profile.email,
domain = user_profile.realm.domain,
realm_name = register_ret['realm_name'],
realm_invite_required = register_ret['realm_invite_required'],
realm_invite_by_admins_only = register_ret['realm_invite_by_admins_only'],
realm_create_stream_by_admins_only = register_ret['realm_create_stream_by_admins_only'],
realm_restricted_to_domain = register_ret['realm_restricted_to_domain'],
enter_sends = user_profile.enter_sends,
left_side_userlist = register_ret['left_side_userlist'],
referrals = register_ret['referrals'],
realm_emoji = register_ret['realm_emoji'],
needs_tutorial = needs_tutorial,
first_in_realm = first_in_realm,
prompt_for_invites = prompt_for_invites,
notifications_stream = notifications_stream,
# Stream message notification settings:
stream_desktop_notifications_enabled =
user_profile.enable_stream_desktop_notifications,
stream_sounds_enabled = user_profile.enable_stream_sounds,
# Private message and @-mention notification settings:
desktop_notifications_enabled = desktop_notifications_enabled,
sounds_enabled =
user_profile.enable_sounds,
enable_offline_email_notifications =
user_profile.enable_offline_email_notifications,
enable_offline_push_notifications =
user_profile.enable_offline_push_notifications,
twenty_four_hour_time = register_ret['twenty_four_hour_time'],
enable_digest_emails = user_profile.enable_digest_emails,
event_queue_id = register_ret['queue_id'],
last_event_id = register_ret['last_event_id'],
max_message_id = register_ret['max_message_id'],
unread_count = approximate_unread_count(user_profile),
furthest_read_time = sent_time_in_epoch_seconds(latest_read),
staging = settings.ZULIP_COM_STAGING or settings.DEVELOPMENT,
alert_words = register_ret['alert_words'],
muted_topics = register_ret['muted_topics'],
realm_filters = register_ret['realm_filters'],
is_admin = user_profile.is_realm_admin,
can_create_streams = user_profile.can_create_streams(),
name_changes_disabled = name_changes_disabled(user_profile.realm),
has_mobile_devices = num_push_devices_for_user(user_profile) > 0,
autoscroll_forever = user_profile.autoscroll_forever,
default_desktop_notifications = user_profile.default_desktop_notifications,
avatar_url = avatar_url(user_profile),
mandatory_topics = user_profile.realm.mandatory_topics,
show_digest_email = user_profile.realm.show_digest_email,
)
if narrow_stream is not None:
# In narrow_stream context, initial pointer is just latest message
recipient = get_recipient(Recipient.STREAM, narrow_stream.id)
try:
initial_pointer = Message.objects.filter(recipient=recipient).order_by('id').reverse()[0].id
except IndexError:
initial_pointer = -1
page_params["narrow_stream"] = narrow_stream.name
if narrow_topic is not None:
page_params["narrow_topic"] = narrow_topic
page_params["narrow"] = [dict(operator=term[0], operand=term[1]) for term in narrow]
page_params["max_message_id"] = initial_pointer
page_params["initial_pointer"] = initial_pointer
page_params["have_initial_messages"] = (initial_pointer != -1)
statsd.incr('views.home')
show_invites = True
# Some realms only allow admins to invite users
if user_profile.realm.invite_by_admins_only and not user_profile.is_realm_admin:
show_invites = False
product_name = "Zulip"
page_params['product_name'] = product_name
request._log_data['extra'] = "[%s]" % (register_ret["queue_id"],)
response = render_to_response('zerver/index.html',
{'user_profile': user_profile,
'page_params' : simplejson.encoder.JSONEncoderForHTML().encode(page_params),
'nofontface': is_buggy_ua(request.META.get("HTTP_USER_AGENT", "Unspecified")),
'avatar_url': avatar_url(user_profile),
'show_debug':
settings.DEBUG and ('show_debug' in request.GET),
'pipeline': settings.PIPELINE,
'show_invites': show_invites,
'is_admin': user_profile.is_realm_admin,
'show_webathena': user_profile.realm.domain == "mit.edu",
'enable_feedback': settings.ENABLE_FEEDBACK,
'embedded': narrow_stream is not None,
'product_name': product_name
},
request=request)
patch_cache_control(response, no_cache=True, no_store=True, must_revalidate=True)
return response
@zulip_login_required
def desktop_home(request):
return HttpResponseRedirect(reverse('zerver.views.home'))
def is_buggy_ua(agent):
"""Discrimiate CSS served to clients based on User Agent
Due to QTBUG-3467, @font-face is not supported in QtWebKit.
This may get fixed in the future, but for right now we can
just serve the more conservative CSS to all our desktop apps.
"""
return ("Humbug Desktop/" in agent or "Zulip Desktop/" in agent or "ZulipDesktop/" in agent) and \
"Mac" not in agent
def get_pointer_backend(request, user_profile):
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
def generate_client_id():
return generate_random_token(32)
# The order of creation of the various dictionaries are important.
# We filter on {userprofile,stream,subscription_recipient}_ids.
@require_realm_admin
def export(request, user_profile):
if (Message.objects.filter(sender__realm=user_profile.realm).count() > 1000000 or
UserMessage.objects.filter(user_profile__realm=user_profile.realm).count() > 3000000):
return json_error(_("Realm has too much data for non-batched export."))
response = {}
response['zerver_realm'] = [model_to_dict(x)
for x in Realm.objects.select_related().filter(id=user_profile.realm.id)]
response['zerver_userprofile'] = [model_to_dict(x, exclude=["password", "api_key"])
for x in UserProfile.objects.select_related().filter(realm=user_profile.realm)]
userprofile_ids = set(userprofile["id"] for userprofile in response['zerver_userprofile'])
response['zerver_stream'] = [model_to_dict(x, exclude=["email_token"])
for x in Stream.objects.select_related().filter(realm=user_profile.realm, invite_only=False)]
stream_ids = set(x["id"] for x in response['zerver_stream'])
response['zerver_usermessage'] = [model_to_dict(x) for x in UserMessage.objects.select_related()
if x.user_profile_id in userprofile_ids]
user_recipients = [model_to_dict(x)
for x in Recipient.objects.select_related().filter(type=1)
if x.type_id in userprofile_ids]
stream_recipients = [model_to_dict(x)
for x in Recipient.objects.select_related().filter(type=2)
if x.type_id in stream_ids]
stream_recipient_ids = set(x["id"] for x in stream_recipients)
# only check for subscriptions to streams
response['zerver_subscription'] = [model_to_dict(x) for x in Subscription.objects.select_related()
if x.user_profile_id in userprofile_ids
and x.recipient_id in stream_recipient_ids]
subscription_recipient_ids = set(x["recipient"] for x in response['zerver_subscription'])
huddle_recipients = [model_to_dict(r)
for r in Recipient.objects.select_related().filter(type=3)
if r.type_id in subscription_recipient_ids]
huddle_ids = set(x["type_id"] for x in huddle_recipients)
response["zerver_recipient"] = user_recipients + stream_recipients + huddle_recipients
response['zerver_huddle'] = [model_to_dict(h)
for h in Huddle.objects.select_related()
if h.id in huddle_ids]
recipient_ids = set(x["id"] for x in response['zerver_recipient'])
response["zerver_message"] = [model_to_dict(m) for m in Message.objects.select_related()
if m.recipient_id in recipient_ids
and m.sender_id in userprofile_ids]
for (table, model) in [("defaultstream", DefaultStream),
("realmemoji", RealmEmoji),
("realmalias", RealmAlias),
("realmfilter", RealmFilter)]:
response["zerver_"+table] = [model_to_dict(x) for x in
model.objects.select_related().filter(realm_id=user_profile.realm.id)] # type: ignore
return json_success(response)
def get_profile_backend(request, user_profile):
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
@require_realm_admin
@has_request_variables
def update_realm(request, user_profile, name=REQ(validator=check_string, default=None),
restricted_to_domain=REQ(validator=check_bool, default=None),
invite_required=REQ(validator=check_bool, default=None),
invite_by_admins_only=REQ(validator=check_bool, default=None),
create_stream_by_admins_only=REQ(validator=check_bool, default=None)):
realm = user_profile.realm
data = {}
if name is not None and realm.name != name:
do_set_realm_name(realm, name)
data['name'] = 'updated'
if restricted_to_domain is not None and realm.restricted_to_domain != restricted_to_domain:
do_set_realm_restricted_to_domain(realm, restricted_to_domain)
data['restricted_to_domain'] = restricted_to_domain
if invite_required is not None and realm.invite_required != invite_required:
do_set_realm_invite_required(realm, invite_required)
data['invite_required'] = invite_required
if invite_by_admins_only is not None and realm.invite_by_admins_only != invite_by_admins_only:
do_set_realm_invite_by_admins_only(realm, invite_by_admins_only)
data['invite_by_admins_only'] = invite_by_admins_only
if create_stream_by_admins_only is not None and realm.create_stream_by_admins_only != create_stream_by_admins_only:
do_set_realm_create_stream_by_admins_only(realm, create_stream_by_admins_only)
data['create_stream_by_admins_only'] = create_stream_by_admins_only
return json_success(data)
@authenticated_json_post_view
@has_request_variables
def json_upload_file(request, user_profile):
if len(request.FILES) == 0:
return json_error(_("You must specify a file to upload"))
if len(request.FILES) != 1:
return json_error(_("You may only upload one file at a time"))
user_file = list(request.FILES.values())[0]
if ((settings.MAX_FILE_UPLOAD_SIZE * 1024 * 1024) < user_file._get_size()):
return json_error(_("File Upload is larger than allowed limit"))
uri = upload_message_image_through_web_client(request, user_file, user_profile)
return json_success({'uri': uri})
@zulip_login_required
@has_request_variables
def get_uploaded_file(request, realm_id, filename,
redir=REQ(validator=check_bool, default=True)):
if settings.LOCAL_UPLOADS_DIR is not None:
return HttpResponseForbidden() # Should have been served by nginx
user_profile = request.user
url_path = "%s/%s" % (realm_id, filename)
if realm_id == "unk":
realm_id = get_realm_for_filename(url_path)
if realm_id is None:
# File does not exist
return json_error(_("That file does not exist."), status=404)
# Internal users can access all uploads so we can receive attachments in cross-realm messages
if user_profile.realm.id == int(realm_id) or user_profile.realm.domain == 'zulip.com':
uri = get_signed_upload_url(url_path)
if redir:
return redirect(uri)
else:
return json_success({'uri': uri})
else:
return HttpResponseForbidden()
@csrf_exempt
@require_post
@has_request_variables
def api_fetch_api_key(request, username=REQ(), password=REQ()):
# type: (Any, Any, Any) -> Any
return_data = {} # type: Dict[str, bool]
if username == "google-oauth2-token":
user_profile = authenticate(google_oauth2_token=password, return_data=return_data)
else:
user_profile = authenticate(username=username, password=password, return_data=return_data)
if return_data.get("inactive_user") == True:
return json_error(_("Your account has been disabled."), data={"reason": "user disable"}, status=403)
if return_data.get("inactive_realm") == True:
return json_error(_("Your realm has been deactivated."), data={"reason": "realm deactivated"}, status=403)
if return_data.get("password_auth_disabled") == True:
return json_error(_("Password auth is disabled in your team."), data={"reason": "password auth disabled"}, status=403)
if user_profile is None:
if return_data.get("valid_attestation") == True:
# We can leak that the user is unregistered iff they present a valid authentication string for the user.
return json_error(_("This user is not registered; do so from a browser."), data={"reason": "unregistered"}, status=403)
return json_error(_("Your username or password is incorrect."), data={"reason": "incorrect_creds"}, status=403)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@authenticated_json_post_view
@has_request_variables
def json_fetch_api_key(request, user_profile, password=REQ(default='')):
if password_auth_enabled(user_profile.realm) and not user_profile.check_password(password):
return json_error(_("Your username or password is incorrect."))
return json_success({"api_key": user_profile.api_key})
@csrf_exempt
def api_fetch_google_client_id(request):
if not settings.GOOGLE_CLIENT_ID:
return json_error(_("GOOGLE_CLIENT_ID is not configured"), status=400)
return json_success({"google_client_id": settings.GOOGLE_CLIENT_ID})
def get_status_list(requesting_user_profile):
return {'presences': get_status_dict(requesting_user_profile),
'server_timestamp': time.time()}
@has_request_variables
def update_active_status_backend(request, user_profile, status=REQ(),
new_user_input=REQ(validator=check_bool, default=False)):
status_val = UserPresence.status_from_string(status)
if status_val is None:
raise JsonableError(_("Invalid presence status: %s") % (status,))
else:
update_user_presence(user_profile, request.client, now(), status_val,
new_user_input)
ret = get_status_list(user_profile)
if user_profile.realm.domain == "mit.edu":
try:
activity = UserActivity.objects.get(user_profile = user_profile,
query="get_events_backend",
client__name="zephyr_mirror")
ret['zephyr_mirror_active'] = \
(activity.last_visit.replace(tzinfo=None) >
datetime.datetime.utcnow() - datetime.timedelta(minutes=5))
except UserActivity.DoesNotExist:
ret['zephyr_mirror_active'] = False
return json_success(ret)
@authenticated_json_post_view
def json_get_active_statuses(request, user_profile):
return json_success(get_status_list(user_profile))
# Does not need to be authenticated because it's called from rest_dispatch
@has_request_variables
def api_events_register(request, user_profile,
apply_markdown=REQ(default=False, validator=check_bool),
all_public_streams=REQ(default=None, validator=check_bool)):
return events_register_backend(request, user_profile,
apply_markdown=apply_markdown,
all_public_streams=all_public_streams)
def _default_all_public_streams(user_profile, all_public_streams):
if all_public_streams is not None:
return all_public_streams
else:
return user_profile.default_all_public_streams
def _default_narrow(user_profile, narrow):
default_stream = user_profile.default_events_register_stream
if not narrow and user_profile.default_events_register_stream is not None:
narrow = [('stream', default_stream.name)]
return narrow
@has_request_variables
def events_register_backend(request, user_profile, apply_markdown=True,
all_public_streams=None,
event_types=REQ(validator=check_list(check_string), default=None),
narrow=REQ(validator=check_list(check_list(check_string, length=2)), default=[]),
queue_lifespan_secs=REQ(converter=int, default=0)):
all_public_streams = _default_all_public_streams(user_profile, all_public_streams)
narrow = _default_narrow(user_profile, narrow)
ret = do_events_register(user_profile, request.client, apply_markdown,
event_types, queue_lifespan_secs, all_public_streams,
narrow=narrow)
return json_success(ret)
@authenticated_json_post_view
@has_request_variables
def json_refer_friend(request, user_profile, email=REQ()):
if not email:
return json_error(_("No email address specified"))
if user_profile.invites_granted - user_profile.invites_used <= 0:
return json_error(_("Insufficient invites"))
do_refer_friend(user_profile, email);
return json_success()
@authenticated_json_post_view
@has_request_variables
def json_set_muted_topics(request, user_profile,
muted_topics=REQ(validator=check_list(check_list(check_string, length=2)), default=[])):
do_set_muted_topics(user_profile, muted_topics)
return json_success()
def add_push_device_token(request, user_profile, token, kind, ios_app_id=None):
if token == '' or len(token) > 4096:
return json_error(_('Empty or invalid length token'))
# If another user was previously logged in on the same device and didn't
# properly log out, the token will still be registered to the wrong account
PushDeviceToken.objects.filter(token=token).delete()
# Overwrite with the latest value
token, created = PushDeviceToken.objects.get_or_create(user=user_profile,
token=token,
kind=kind,
ios_app_id=ios_app_id)
if not created:
token.last_updated = now()
token.save(update_fields=['last_updated'])
return json_success()
@has_request_variables
def add_apns_device_token(request, user_profile, token=REQ(), appid=REQ(default=settings.ZULIP_IOS_APP_ID)):
return add_push_device_token(request, user_profile, token, PushDeviceToken.APNS, ios_app_id=appid)
@has_request_variables
def add_android_reg_id(request, user_profile, token=REQ()):
return add_push_device_token(request, user_profile, token, PushDeviceToken.GCM)
def remove_push_device_token(request, user_profile, token, kind):
if token == '' or len(token) > 4096:
return json_error(_('Empty or invalid length token'))
try:
token = PushDeviceToken.objects.get(token=token, kind=kind)
token.delete()
except PushDeviceToken.DoesNotExist:
return json_error(_("Token does not exist"))
return json_success()
@has_request_variables
def remove_apns_device_token(request, user_profile, token=REQ()):
return remove_push_device_token(request, user_profile, token, PushDeviceToken.APNS)
@has_request_variables
def remove_android_reg_id(request, user_profile, token=REQ()):
return remove_push_device_token(request, user_profile, token, PushDeviceToken.GCM)
def generate_204(request):
return HttpResponse(content=None, status=204)
def process_unsubscribe(token, type, unsubscribe_function):
try:
confirmation = Confirmation.objects.get(confirmation_key=token)
except Confirmation.DoesNotExist:
return render_to_response('zerver/unsubscribe_link_error.html')
user_profile = confirmation.content_object
unsubscribe_function(user_profile)
return render_to_response('zerver/unsubscribe_success.html',
{"subscription_type": type,
"external_host": settings.EXTERNAL_HOST})
# Email unsubscribe functions. All have the function signature
# processor(user_profile).
def do_missedmessage_unsubscribe(user_profile):
do_change_enable_offline_email_notifications(user_profile, False)
def do_welcome_unsubscribe(user_profile):
clear_followup_emails_queue(user_profile.email)
def do_digest_unsubscribe(user_profile):
do_change_enable_digest_emails(user_profile, False)
# The keys are part of the URL for the unsubscribe link and must be valid
# without encoding.
# The values are a tuple of (display name, unsubscribe function), where the
# display name is what we call this class of email in user-visible text.
email_unsubscribers = {
"missed_messages": ("missed messages", do_missedmessage_unsubscribe),
"welcome": ("welcome", do_welcome_unsubscribe),
"digest": ("digest", do_digest_unsubscribe)
}
# Login NOT required. These are for one-click unsubscribes.
def email_unsubscribe(request, type, token):
if type in email_unsubscribers:
display_name, unsubscribe_function = email_unsubscribers[type]
return process_unsubscribe(token, display_name, unsubscribe_function)
return render_to_response('zerver/unsubscribe_link_error.html', {},
request=request)
|
peiwei/zulip
|
zerver/views/__init__.py
|
Python
|
apache-2.0
| 58,738
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateModelDeploymentMonitoringJob
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_async]
from google.cloud import aiplatform_v1beta1
async def sample_update_model_deployment_monitoring_job():
# Create a client
client = aiplatform_v1beta1.JobServiceAsyncClient()
# Initialize request argument(s)
model_deployment_monitoring_job = aiplatform_v1beta1.ModelDeploymentMonitoringJob()
model_deployment_monitoring_job.display_name = "display_name_value"
model_deployment_monitoring_job.endpoint = "endpoint_value"
request = aiplatform_v1beta1.UpdateModelDeploymentMonitoringJobRequest(
model_deployment_monitoring_job=model_deployment_monitoring_job,
)
# Make the request
operation = client.update_model_deployment_monitoring_job(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_async]
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py
|
Python
|
apache-2.0
| 1,983
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.common import wsgi
from trove.common.auth import admin_context
from trove.extensions.mgmt.volume import models
from trove.extensions.mgmt.volume import views
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
class StorageController(wsgi.Controller):
"""Controller for storage device functionality."""
@admin_context
def index(self, req, tenant_id):
"""Return all storage devices."""
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("Indexing storage info for tenant '%s'") % tenant_id)
context = req.environ[wsgi.CONTEXT_KEY]
storages = models.StorageDevices.load(context)
return wsgi.Result(views.StoragesView(storages).data(), 200)
|
changsimon/trove
|
trove/extensions/mgmt/volume/service.py
|
Python
|
apache-2.0
| 1,428
|
"""Module to debug python programs"""
import sys
import traceback
def getAllStacks():
code = []
for threadId, stack in sys._current_frames().iteritems():
code.append("\n# ThreadID: %s" % threadId)
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename,
lineno, name))
if line:
code.append(" %s" % (line.strip()))
return code
def strStacks():
out = "\n*** STACKTRACE - START ***\n"
out += "\n".join(getAllStacks())
out += "\n*** STACKTRACE - END ***\n"
return out
|
netixx/python-tools
|
tools/debugger.py
|
Python
|
apache-2.0
| 674
|
'''
@auther:fangxiao
'''
import apibinding.api_actions as api_actions
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.iam2_operations as iam2_ops
import zstackwoodpecker.operations.affinitygroup_operations as ag_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.vxlan_operations as vxlan_ops
import zstackwoodpecker.operations.net_operations as net_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.operations.tag_operations as tag_ops
import zstackwoodpecker.operations.deploy_operations as dep_ops
import zstackwoodpecker.operations.vpcdns_operations as vpcdns_ops
import apibinding.inventory as inventory
import zstackwoodpecker.operations.vpc_operations as vpc_ops
import os
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
l2_vxlan_network_uuid = None
project_uuid = None
project_operator_uuid = None
vni_range_uuid = None
vxlan_pool_uuid = None
l3_vpc_network_uuid = None
dns_text = '223.5.5.5'
allservices = ["VRouterRoute","DHCP","IPsec","LoadBalancer","CentralizedDNS","Eip","DNS","SNAT","VipQos","PortForwarding"]
cond = res_ops.gen_query_conditions("type","=","vrouter")
network_service_provider_uuid = res_ops.query_resource(res_ops.NETWORK_SERVICE_PROVIDER,cond)[0].uuid
def create_l3_vpc(name,l2_uuid,session_uuid = None):
action = api_actions.CreateL3NetworkAction()
action.name = name
action.l2NetworkUuid = l2_uuid
action.timeout = 300000
action.type = inventory.VPC_L3_NETWORK_TYPE
action.sessionUuid = session_uuid
evt = acc_ops.execute_action_with_session(action,session_uuid)
test_util.action_logger('[l3:] %s is created' %name)
return evt.inventory
def AddDnsToL3Network(l3_network_uuid,dns_text,session_uuid = None):
action = api_actions.AddDnsToL3NetworkAction()
action.sessionUuid = session_uuid
action.dns = dns_text
action.l3NetworkUuid = l3_network_uuid
evt = acc_ops.execute_action_with_session(action,session_uuid)
test_util.action_logger('add dns to l3 network: %s' % l3_network_uuid)
return evt
def AttachNetworkServiceToL3Network(l3_network_uuid,allservices,session_uuid = None):
action = api_actions.AttachNetworkServiceToL3NetworkAction()
action.sessionUuid = session_uuid
action.l3NetworkUuid = l3_network_uuid
action.networkServices = {network_service_provider_uuid:allservices}
evt = acc_ops.execute_action_with_session(action,session_uuid)
test_util.action_logger('add network services to l3 network: %s' % l3_network_uuid)
return evt
def test():
global l2_vxlan_network_uuid,project_uuid,project_operator_uuid,vni_range_uuid,vxlan_pool_uuid,l3_vpc_network_uuid
# create vxlan pool and vni range
zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid
cluster_uuid = res_ops.get_resource(res_ops.CLUSTER)[0].uuid
vxlan_pool_name = 'vxlan_pool_name'
vxlan_pool_uuid = vxlan_ops.create_l2_vxlan_network_pool(vxlan_pool_name,zone_uuid).uuid
vxlan_ops.create_vni_range('vni_range',20,40,vxlan_pool_uuid)
systemTags = ["l2NetworkUuid::%s::clusterUuid::%s::cidr::{172.20.0.1/16}"%(vxlan_pool_uuid,cluster_uuid)]
net_ops.attach_l2_vxlan_pool(vxlan_pool_uuid,cluster_uuid,systemTags)
# 1 create project
project_name = 'test_project7'
project = iam2_ops.create_iam2_project(project_name)
project_uuid = project.uuid
#cond = res_ops.gen_query_conditions("name",'=',"test_project7")
#linked_account_uuid = res_ops.query_resource(res_ops.ACCOUNT,cond)[0].uuid
linked_account_uuid = project.linkedAccountUuid
# 2 create project operator
project_operator_name = 'username7'
project_operator_password = 'b109f3bbbc244eb82441917ed06d618b9008dd09b3befd1b5e07394c706a8bb980b1d7785e5976ec049b46df5f1326af5a2ea6d103fd07c95385ffab0cacbc86'
attributes = [{"name": "__ProjectOperator__", "value": project_uuid}]
project_operator_uuid = iam2_ops.create_iam2_virtual_id(project_operator_name,project_operator_password,attributes=attributes).uuid
zone_uuid = res_ops.get_resource(res_ops.ZONE)[0].uuid
attributes = [{"name": "__ProjectRelatedZone__", "value": zone_uuid}]
iam2_ops.add_attributes_to_iam2_project(project_uuid, attributes)
# 3 login in project by project operator
iam2_ops.add_iam2_virtual_ids_to_project([project_operator_uuid],project_uuid)
project_operator_session_uuid = iam2_ops.login_iam2_virtual_id(project_operator_name,project_operator_password)
project_login_uuid = iam2_ops.login_iam2_project(project_name,session_uuid=project_operator_session_uuid).uuid
# 4 share vxlan pool to project
l2vxlan_pools = res_ops.query_resource(res_ops.L2_VXLAN_NETWORK_POOL)
for l2vxlan_pool in l2vxlan_pools:
acc_ops.share_resources([linked_account_uuid],[l2vxlan_pool.uuid])
# 5 create l2 vxlan
l2_vxlan_network_uuid = vxlan_ops.create_l2_vxlan_network('l2_vxlan',vxlan_pool_uuid,zone_uuid,session_uuid=project_login_uuid).uuid
# 6 use l2 vxlan to create l3 vpc
l3_vpc_network = create_l3_vpc('test_vpc',l2_vxlan_network_uuid,project_login_uuid)
l3_vpc_network_uuid = l3_vpc_network.uuid
# add ip range
ir_option = test_util.IpRangeOption()
ir_option.set_name('iprange2')
ir_option.set_description('iprange for vpc')
ir_option.set_netmask('255.255.255.0')
ir_option.set_gateway('192.168.23.1')
ir_option.set_l3_uuid(l3_vpc_network_uuid)
ir_option.set_startIp('192.168.23.2')
ir_option.set_endIp('192.168.23.254')
net_ops.add_ip_range(ir_option)
# add network service
AttachNetworkServiceToL3Network(l3_vpc_network_uuid,allservices,session_uuid = project_login_uuid)
# share the vr_offering to project and do create vpc router and vpc network
cond = res_ops.gen_query_conditions("name",'=',"virtual-router-vm")
vr_offering_uuid = res_ops.query_resource(res_ops.VR_OFFERING,cond)[0].uuid
acc_ops.share_resources([linked_account_uuid],[vr_offering_uuid])
vpc_ops.create_vpc_vrouter(name = 'test_vpc_vr', virtualrouter_offering_uuid = vr_offering_uuid,session_uuid = project_login_uuid)
vpc_vr = test_stub.query_vpc_vrouter('test_vpc_vr')
vpc_vr.add_nic(l3_vpc_network_uuid)
# 7 expunge the project and check the l2 vxlan
iam2_ops.delete_iam2_project(project_uuid)
iam2_ops.expunge_iam2_project(project_uuid)
try:
l2_vxlan_network_test_uuid = res_ops.query_resource(res_ops.L2_VXLAN_NETWORK)[0].uuid
except:
test_util.test_pass(
"l2 vxlan is delete after deleted the project " )
test_util.test_dsc('test l2 l2 cascade delete')
# 8 check the vpc network and vpc_vr
try:
cond = res_ops.gen_query_conditions("name",'=',"test_vpc")
l3_vpc_network_uuid = res_ops.query_resource(res_ops.L3_NETWORK,cond)[0].uuid
except:
test_util.test_pass(
"l3_vpc is delete after deleted the project")
cond = res_ops.gen_query_conditions("name",'=',"test_vpc_vr")
vpc_vr = res_ops.query_resource(res_ops.VIRTUALROUTER_VM,cond)
if not vpc_vr.inv.state is 'Paused':
test_util.test_fail(
"vpc vr [%s] is still exist after delete and expunge the project [%s]" % (vpc_vr.uuid,project_uuid))
# 9 delete
vni_range_uuid = res_ops.get_resource(res_ops.VNI_RANGE)[0].uuid
vxlan_ops.delete_vni_range(vni_range_uuid)
net_ops.delete_l2(vxlan_pool_uuid)
iam2_ops.delete_iam2_virtual_id(project_operator_uuid)
def error_cleanup():
if project_uuid:
iam2_ops.delete_iam2_project(project_uuid)
iam2_ops.expunge_iam2_project(project_uuid)
if project_operator_uuid:
iam2_ops.delete_iam2_virtual_id(project_operator_uuid)
if l2_vxlan_network_uuid:
net_ops.delete_l2(l2_vxlan_network_uuid)
if vni_range_uuid:
vxlan_ops.delete_vni_range(vni_range_uuid)
if vxlan_pool_uuid:
net_ops.delete_l2(vxlan_pool_uuid)
if l3_vpc_network_uuid:
net_ops.delete_l3(l3_vpc_network_uuid)
|
zstackio/zstack-woodpecker
|
integrationtest/vm/simulator/iam2/test_iam2_project_vpc_cascade_delete.py
|
Python
|
apache-2.0
| 8,266
|
# Copyright 2014, Doug Wiegley, A10 Networks.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
VERSION = '2.9.0'
|
a10networks/acos-client
|
acos_client/version.py
|
Python
|
apache-2.0
| 641
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import, unused-argument, too-many-ancestors
"""Gluon EventHandlers for Estimators"""
import os
import time
import warnings
import numpy as np
from ...metric import CompositeEvalMetric, EvalMetric
from ...metric import Loss as metric_loss
from .utils import _check_metrics
__all__ = ['TrainBegin', 'TrainEnd', 'EpochBegin', 'EpochEnd', 'BatchBegin', 'BatchEnd',
'StoppingHandler', 'MetricHandler', 'ValidationHandler',
'LoggingHandler', 'CheckpointHandler', 'EarlyStoppingHandler', 'GradientUpdateHandler']
class EventHandler(object):
pass
def _check_event_handlers(handlers):
if isinstance(handlers, EventHandler):
handlers = [handlers]
else:
handlers = handlers or []
if not all([isinstance(handler, EventHandler) for handler in handlers]):
raise ValueError("handlers must be an EventHandler or a list of EventHandler, "
"got: {}".format(handlers))
return handlers
class TrainBegin(EventHandler):
def train_begin(self, estimator, *args, **kwargs):
pass
class TrainEnd(EventHandler):
def train_end(self, estimator, *args, **kwargs):
pass
class EpochBegin(EventHandler):
def epoch_begin(self, estimator, *args, **kwargs):
pass
class EpochEnd(EventHandler):
def epoch_end(self, estimator, *args, **kwargs):
return False
class BatchBegin(EventHandler):
def batch_begin(self, estimator, *args, **kwargs):
pass
class BatchEnd(EventHandler):
def batch_end(self, estimator, *args, **kwargs):
return False
class StoppingHandler(TrainBegin, BatchEnd, EpochEnd):
"""Stop conditions to stop training
Stop training if maximum number of batches or epochs
reached.
Parameters
----------
max_epoch : int, default None
Number of maximum epochs to train.
max_batch : int, default None
Number of maximum batches to train.
"""
def __init__(self, max_epoch=None, max_batch=None):
self.max_epoch = max_epoch
self.max_batch = max_batch
self.current_batch = 0
self.current_epoch = 0
self.stop_training = False
def train_begin(self, estimator, *args, **kwargs):
self.max_epoch = estimator.max_epoch
self.max_batch = estimator.max_batch
self.current_batch = 0
self.current_epoch = 0
def batch_end(self, estimator, *args, **kwargs):
self.current_batch += 1
if self.current_batch == self.max_batch:
self.stop_training = True
return self.stop_training
def epoch_end(self, estimator, *args, **kwargs):
self.current_epoch += 1
if self.current_epoch == self.max_epoch:
self.stop_training = True
return self.stop_training
class MetricHandler(EpochBegin, BatchEnd):
"""Metric Handler that update metric values at batch end
:py:class:`MetricHandler` takes model predictions and true labels
and update the metrics, it also update metric wrapper for loss with loss values.
Validation loss and metrics will be handled by :py:class:`ValidationHandler`
Parameters
----------
metrics : List of EvalMetrics
Metrics to be updated at batch end.
priority : scalar
Priority level of the MetricHandler. Priority level is sorted in ascending
order. The lower the number is, the higher priority level the handler is.
"""
def __init__(self, metrics, priority=-1000):
self.metrics = _check_metrics(metrics)
# order to be called among all callbacks
# metrics need to be calculated before other callbacks can access them
self.priority = priority
def epoch_begin(self, estimator, *args, **kwargs):
for metric in self.metrics:
metric.reset()
def batch_end(self, estimator, *args, **kwargs):
pred = kwargs['pred']
label = kwargs['label']
loss = kwargs['loss']
for metric in self.metrics:
if isinstance(metric, metric_loss):
# metric wrapper for loss values
metric.update(0, loss)
else:
metric.update(label, pred)
class ValidationHandler(TrainBegin, BatchEnd, EpochEnd):
"""Validation Handler that evaluate model on validation dataset
:py:class:`ValidationHandler` takes validation dataset, an evaluation function,
metrics to be evaluated, and how often to run the validation. You can provide custom
evaluation function or use the one provided my :py:class:`Estimator`
Parameters
----------
val_data : DataLoader
Validation data set to run evaluation.
eval_fn : function
A function defines how to run evaluation and
calculate loss and metrics.
epoch_period : int, default 1
How often to run validation at epoch end, by default
:py:class:`ValidationHandler` validate every epoch.
batch_period : int, default None
How often to run validation at batch end, by default
:py:class:`ValidationHandler` does not validate at batch end.
priority: scalar, default -1000
Priority level of the ValidationHandler. Priority level is sorted in
ascending order. The lower the number is, the higher priority level the
handler is.
event_handlers : EventHandler or list of EventHandlers
List of :py:class:`EventHandler` to apply during validaiton. This argument
is used by self.eval_fn function in order to process customized event
handlers.
"""
def __init__(self,
val_data,
eval_fn,
epoch_period=1,
batch_period=None,
priority=-1000,
event_handlers=None):
self.val_data = val_data
self.eval_fn = eval_fn
self.epoch_period = epoch_period
self.batch_period = batch_period
self.current_batch = 0
self.current_epoch = 0
# order to be called among all callbacks
# validation metrics need to be calculated before other callbacks can access them
self.priority = priority
self.event_handlers = event_handlers
def train_begin(self, estimator, *args, **kwargs):
# reset epoch and batch counter
self.current_batch = 0
self.current_epoch = 0
def batch_end(self, estimator, *args, **kwargs):
self.current_batch += 1
if self.batch_period and self.current_batch % self.batch_period == 0:
self.eval_fn(val_data=self.val_data, batch_axis=estimator.batch_axis,
event_handlers=self.event_handlers)
def epoch_end(self, estimator, *args, **kwargs):
self.current_epoch += 1
if self.epoch_period and self.current_epoch % self.epoch_period == 0:
self.eval_fn(val_data=self.val_data, batch_axis=estimator.batch_axis,
event_handlers=self.event_handlers)
class LoggingHandler(TrainBegin, TrainEnd, EpochBegin, EpochEnd, BatchBegin, BatchEnd):
"""Basic Logging Handler that applies to every Gluon estimator by default.
:py:class:`LoggingHandler` logs hyper-parameters, training statistics,
and other useful information during training
Parameters
----------
log_interval: int or str, default 'epoch'
Logging interval during training.
log_interval='epoch': display metrics every epoch
log_interval=integer k: display metrics every interval of k batches
metrics : list of EvalMetrics
Metrics to be logged, logged at batch end, epoch end, train end.
priority : scalar, default np.Inf
Priority level of the LoggingHandler. Priority level is sorted in
ascending order. The lower the number is, the higher priority level the
handler is.
"""
def __init__(self, log_interval='epoch',
metrics=None,
priority=np.Inf):
super(LoggingHandler, self).__init__()
if not isinstance(log_interval, int) and log_interval != 'epoch':
raise ValueError("log_interval must be either an integer or string 'epoch'")
self.metrics = _check_metrics(metrics)
self.batch_index = 0
self.current_epoch = 0
self.processed_samples = 0
# logging handler need to be called at last to make sure all states are updated
# it will also shut down logging at train end
self.priority = priority
self.log_interval = log_interval
self.log_interval_time = 0
def train_begin(self, estimator, *args, **kwargs):
self.train_start = time.time()
trainer = estimator.trainer
optimizer = trainer.optimizer.__class__.__name__
lr = trainer.learning_rate
estimator.logger.info("Training begin: using optimizer %s "
"with current learning rate %.4f ",
optimizer, lr)
if estimator.max_epoch:
estimator.logger.info("Train for %d epochs.", estimator.max_epoch)
else:
estimator.logger.info("Train for %d batches.", estimator.max_batch)
# reset all counters
self.current_epoch = 0
self.batch_index = 0
self.processed_samples = 0
self.log_interval_time = 0
def train_end(self, estimator, *args, **kwargs):
train_time = time.time() - self.train_start
msg = 'Train finished using total %ds with %d epochs. ' % (train_time, self.current_epoch)
# log every result in train stats including train/validation loss & metrics
for metric in self.metrics:
name, value = metric.get()
msg += '%s: %.4f, ' % (name, value)
estimator.logger.info(msg.rstrip(', '))
def batch_begin(self, estimator, *args, **kwargs):
if isinstance(self.log_interval, int):
self.batch_start = time.time()
def batch_end(self, estimator, *args, **kwargs):
if isinstance(self.log_interval, int):
batch_time = time.time() - self.batch_start
msg = '[Epoch %d][Batch %d]' % (self.current_epoch, self.batch_index)
self.processed_samples += kwargs['batch'][0].shape[0]
msg += '[Samples %s] ' % (self.processed_samples)
self.log_interval_time += batch_time
if self.batch_index % self.log_interval == 0:
msg += 'time/interval: %.3fs ' % self.log_interval_time
self.log_interval_time = 0
for metric in self.metrics:
# only log current training loss & metric after each interval
name, value = metric.get()
msg += '%s: %.4f, ' % (name, value)
estimator.logger.info(msg.rstrip(', '))
self.batch_index += 1
def epoch_begin(self, estimator, *args, **kwargs):
if isinstance(self.log_interval, int) or self.log_interval == 'epoch':
is_training = False
# use the name hack defined in __init__() of estimator class
for metric in self.metrics:
if 'training' in metric.name:
is_training = True
self.epoch_start = time.time()
if is_training:
estimator.logger.info("[Epoch %d] Begin, current learning rate: %.4f",
self.current_epoch, estimator.trainer.learning_rate)
else:
estimator.logger.info("Validation Begin")
def epoch_end(self, estimator, *args, **kwargs):
if isinstance(self.log_interval, int) or self.log_interval == 'epoch':
epoch_time = time.time() - self.epoch_start
msg = '[Epoch %d] Finished in %.3fs, ' % (self.current_epoch, epoch_time)
for monitor in self.metrics:
name, value = monitor.get()
msg += '%s: %.4f, ' % (name, value)
estimator.logger.info(msg.rstrip(', '))
self.current_epoch += 1
self.batch_index = 0
class CheckpointHandler(TrainBegin, BatchEnd, EpochEnd):
"""Save the model after user define period
:py:class:`CheckpointHandler` saves the network architecture after first batch if the model
can be fully hybridized, saves model parameters and trainer states after user defined period,
default saves every epoch.
Parameters
----------
model_dir : str
File directory to save all the model related files including model architecture,
model parameters, and trainer states.
model_prefix : str default 'model'
Prefix to add for all checkpoint file names.
monitor: EvalMetric, default None
The metrics to monitor and determine if model has improved
verbose: int, default 0
Verbosity mode, 1 means inform user every time a checkpoint is saved
save_best: bool, default False
If True, monitor must not be None, :py:class:`CheckpointHandler` will save the
model parameters and trainer states with the best monitored value.
mode: str, default 'auto'
One of {auto, min, max}, if `save_best=True`, the comparison to make
and determine if the monitored value has improved. if 'auto' mode,
:py:class:`CheckpointHandler` will try to use min or max based on
the monitored metric name.
epoch_period: int, default 1
Epoch intervals between saving the network. By default, checkpoints are
saved every epoch.
batch_period: int, default None
Batch intervals between saving the network.
By default, checkpoints are not saved based on the number of batches.
max_checkpoints : int, default 5
Maximum number of checkpoint files to keep in the model_dir, older checkpoints
will be removed. Best checkpoint file is not counted.
resume_from_checkpoint : bool, default False
Whether to resume training from checkpoint in model_dir. If True and checkpoints
found, :py:class:`CheckpointHandler` will load net parameters and trainer states,
and train the remaining of epochs and batches.
"""
def __init__(self,
model_dir,
model_prefix='model',
monitor=None,
verbose=0,
save_best=False,
mode='auto',
epoch_period=1,
batch_period=None,
max_checkpoints=5,
resume_from_checkpoint=False):
self.monitor = monitor
self.verbose = verbose
if not os.path.exists(model_dir):
os.makedirs(model_dir)
self.model_dir = model_dir
self.model_prefix = model_prefix
self.save_best = save_best
if self.save_best and not isinstance(self.monitor, EvalMetric):
raise ValueError("To save best model only, please provide one of the metric objects "
"from estimator.train_metrics and estimator.val_metrics as monitor.")
self.epoch_period = epoch_period
self.batch_period = batch_period
self.current_batch = 0
self.current_epoch = 0
self.max_checkpoints = max_checkpoints
self.resume_from_checkpoint = resume_from_checkpoint
self.saved_checkpoints = []
if self.save_best:
if mode not in ['auto', 'min', 'max']:
warnings.warn('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode. CheckpointHandler will use'
'max mode for f1 and accuracy metric comparison and '
'use min mode other wise' % (mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
# use greater for accuracy and f1 and less otherwise
if 'acc' or 'f1' in self.monitor.get()[0].lower():
warnings.warn("`greater` operator will be used to determine if {} has improved. "
"Please specify `mode='min'` to use the `less` operator. "
"Specify `mode='max' to disable this warning.`"
.format(self.monitor.get()[0]))
self.monitor_op = np.greater
else:
warnings.warn("`less` operator will be used to determine if {} has improved. "
"Please specify `mode='max'` to use the `greater` operator. "
"Specify `mode='min' to disable this warning.`"
.format(self.monitor.get()[0]))
self.monitor_op = np.less
def train_begin(self, estimator, *args, **kwargs):
# reset all counters
self.current_epoch = 0
self.current_batch = 0
if self.save_best:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf # pylint: disable=comparison-with-callable
if self.resume_from_checkpoint:
error_msg = "To use resume from checkpoint, you must only specify " \
"the same type of period you used for training." \
"For example, if you are training based on number of epochs," \
"you must save only based on epochs, and set batch_period to None."
if estimator.max_batch:
assert self.batch_period, error_msg
assert not self.epoch_period, error_msg
if estimator.max_epoch:
assert self.epoch_period, error_msg
assert not self.batch_period, error_msg
self._resume_from_checkpoint(estimator)
def batch_end(self, estimator, *args, **kwargs):
# only save symbol once after first batch
if self.current_batch == 0:
self._save_symbol(estimator)
if self.batch_period and (self.current_batch + 1) % self.batch_period == 0:
self._save_checkpoint(estimator)
self.current_batch += 1
def epoch_end(self, estimator, *args, **kwargs):
if self.epoch_period and (self.current_epoch + 1) % self.epoch_period == 0:
self._save_checkpoint(estimator)
self.current_epoch += 1
def _save_checkpoint(self, estimator):
# if resumed from checkpoint, increment checkpoint number
if self.resume_from_checkpoint:
save_epoch_number = self.current_epoch + self.trained_epoch + 1
if estimator.max_epoch:
# checkpoint saved at epoch end, batch number already incremented
save_batch_number = self.current_batch + self.trained_batch
else:
save_batch_number = self.current_batch + self.trained_batch + 1
else:
save_epoch_number = self.current_epoch
save_batch_number = self.current_batch
prefix = "%s-epoch%dbatch%d" % (self.model_prefix, save_epoch_number, save_batch_number)
self._save_params_and_trainer(estimator, prefix)
if self.verbose > 0:
estimator.logger.info('[Epoch %d] CheckpointHandler: trained total %d batches, '
'saving model at %s with prefix: %s',
self.current_epoch, self.current_batch + 1, self.model_dir, prefix)
if self.save_best:
monitor_name, monitor_value = self.monitor.get()
# check if monitor exists in train stats
if np.isnan(monitor_value):
warnings.warn(RuntimeWarning(
'Skipping save best because %s is not updated, make sure you pass one of the '
'metric objects estimator.train_metrics and estimator.val_metrics as monitor',
monitor_name))
else:
if self.monitor_op(monitor_value, self.best):
prefix = self.model_prefix + '-best'
self._save_params_and_trainer(estimator, prefix)
if self.verbose > 0:
estimator.logger.info('[Epoch %d] CheckpointHandler: '
'%s improved from %0.5f to %0.5f, '
'updating best model at %s with prefix: %s',
self.current_epoch, monitor_name,
self.best, monitor_value, self.model_dir, prefix)
self.best = monitor_value
else:
if self.verbose > 0:
estimator.logger.info('[Epoch %d] CheckpointHandler: '
'%s did not improve from %0.5f, '
'skipping updating best model',
self.current_batch, monitor_name,
self.best)
def _save_symbol(self, estimator):
symbol_file = os.path.join(self.model_dir, self.model_prefix + '-symbol.json')
if hasattr(estimator.net, '_cached_graph') and estimator.net._cached_graph:
sym = estimator.net._cached_graph[1]
sym.save(symbol_file)
else:
estimator.logger.info(
"Model architecture(symbol file) is not saved, please use HybridBlock "
"to construct your model, and call net.hybridize() before passing to "
"Estimator in order to save model architecture as %s.",
symbol_file)
def _save_params_and_trainer(self, estimator, file_prefix):
param_file = os.path.join(self.model_dir, file_prefix + '.params')
trainer_file = os.path.join(self.model_dir, file_prefix + '.states')
estimator.net.save_parameters(param_file)
estimator.trainer.save_states(trainer_file)
# only count checkpoints with epoch or batch number in file name
if 'best' not in file_prefix:
self.saved_checkpoints.append(file_prefix)
# remove old checkpoint when max number of checkpoints reached
if len(self.saved_checkpoints) > self.max_checkpoints:
prefix = self.saved_checkpoints.pop(0)
for fname in os.listdir(self.model_dir):
if fname.startswith(prefix):
os.remove(os.path.join(self.model_dir, fname))
def _resume_from_checkpoint(self, estimator):
prefix = self.model_prefix + '-epoch'
self.trained_epoch = self._find_max_iteration(
dir=self.model_dir,
prefix=prefix,
start='epoch',
end='batch',
saved_checkpoints=self.saved_checkpoints)
prefix += str(self.trained_epoch)
self.trained_batch = self._find_max_iteration(
dir=self.model_dir,
prefix=prefix,
start='batch',
end='.params')
if self.trained_epoch == -1:
msg = "CheckpointHandler: No checkpoint found, training from scratch for "
if estimator.max_batch:
msg += "%d batches" % estimator.max_batch
else:
msg += "%d epochs" % estimator.max_epoch
estimator.logger.info(msg)
else:
msg = "CheckpointHandler: Checkpoint resumed from epoch %d batch %d, " \
"continue to train for " % (self.trained_epoch, self.trained_batch)
# change maximum number of epoch or batch to train if resumed from epoch checkpoint
if estimator.max_epoch:
if self.trained_epoch >= estimator.max_epoch - 1:
raise ValueError("Found checkpoint with maximum number of epoch %d reached, please specify "
"resume_from_checkpoint=False (default value) if you wan to train from scratch."
% estimator.max_epoch)
estimator.max_epoch = estimator.max_epoch - self.trained_epoch - 1
msg += "%d epochs " % estimator.max_epoch
if estimator.max_batch:
if self.trained_batch >= estimator.max_batch - 1:
raise ValueError("Found checkpoint with maximum number of batch %d reached, please specify"
"resume_from_checkpoint=False (default value) if you wan to train from scratch."
% self.trained_batch)
estimator.max_batch = estimator.max_batch - self.trained_batch - 1
msg += "%d batches " % estimator.max_batch
# load checkpoint
param_file = "%s-epoch%dbatch%d.params" % (self.model_prefix, self.trained_epoch, self.trained_batch)
param_file = os.path.join(self.model_dir, param_file)
trainer_file = "%s-epoch%dbatch%d.states" % (self.model_prefix, self.trained_epoch, self.trained_batch)
trainer_file = os.path.join(self.model_dir, trainer_file)
assert os.path.exists(param_file), "Failed to load checkpoint, %s does not exist" % param_file
assert os.path.exists(trainer_file), "Failed to load checkpoint, %s does not exist" % trainer_file
estimator.net.load_parameters(param_file, ctx=estimator.context)
estimator.trainer.load_states(trainer_file)
estimator.logger.warning(msg)
def _find_max_iteration(self, dir, prefix, start, end, saved_checkpoints=None):
error_msg = "Error parsing checkpoint file, please check your " \
"checkpoints have the format: " \
"{model_name}-epoch{epoch_number}batch{batch_number}.params, " \
"there should also be a .states file for each .params file "
max_iter = -1
for fname in os.listdir(dir):
if fname.startswith(prefix) and '.params' in fname:
if saved_checkpoints:
# save prefix of existing checkpoints
saved_checkpoints.append(fname[:fname.find('.params')])
try:
# find trained number of epoch
iter = int(fname[fname.find(start) + len(start): fname.find(end)])
if iter > max_iter:
max_iter = iter
except ValueError:
raise ValueError(error_msg)
return max_iter
class EarlyStoppingHandler(TrainBegin, EpochEnd, TrainEnd):
"""Early stop training if monitored value is not improving
Parameters
----------
monitor: EvalMetric
The metric to monitor, and stop training if this metric does not improve.
min_delta: float, default 0
Minimal change in monitored value to be considered as an improvement.
patience: int, default 0
Number of epochs to wait for improvement before terminate training.
mode: str, default 'auto'
One of {auto, min, max}, if `save_best_only=True`, the comparison to make
and determine if the monitored value has improved. if 'auto' mode, checkpoint
handler will try to use min or max based on the monitored metric name.
baseline: float
Baseline value to compare the monitored value with.
"""
def __init__(self,
monitor,
min_delta=0,
patience=0,
mode='auto',
baseline=None):
super(EarlyStoppingHandler, self).__init__()
if not isinstance(monitor, EvalMetric):
raise ValueError(
"Please provide one of the metric objects from estimator.train_metrics and "
"estimator.val_metrics as monitor.")
if isinstance(monitor, CompositeEvalMetric):
raise ValueError("CompositeEvalMetric is not supported for EarlyStoppingHandler, "
"please specify a simple metric instead.")
self.monitor = monitor
self.baseline = baseline
self.patience = patience
self.min_delta = min_delta
self.wait = 0
self.stopped_epoch = 0
self.current_epoch = 0
self.stop_training = False
if mode not in ['auto', 'min', 'max']:
warnings.warn('EarlyStopping mode %s is unknown, '
'fallback to auto mode. CheckpointHandler will use'
'max mode for f1 and accuracy metric comparison and '
'use min mode other wise' % (mode),
RuntimeWarning)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' or 'f1' in self.monitor.get()[0].lower():
warnings.warn("`greater` operator will be used to determine if {} has improved. "
"Please specify `mode='min'` to use the `less` operator. "
"Specify `mode='max' to disable this warning.`"
.format(self.monitor.get()[0]))
self.monitor_op = np.greater
else:
warnings.warn("`less` operator will be used to determine if {} has improved. "
"Please specify `mode='max'` to use the `greater` operator. "
"Specify `mode='min' to disable this warning.`"
.format(self.monitor.get()[0]))
self.monitor_op = np.less
if self.monitor_op == np.greater: # pylint: disable=comparison-with-callable
self.min_delta *= 1
else:
self.min_delta *= -1
def train_begin(self, estimator, *args, **kwargs):
self.wait = 0
self.stopped_epoch = 0
self.current_epoch = 0
self.stop_training = False
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf # pylint: disable=comparison-with-callable
def epoch_end(self, estimator, *args, **kwargs):
monitor_name, monitor_value = self.monitor.get()
if np.isnan(monitor_value):
warnings.warn(RuntimeWarning(
'%s is not updated, make sure you pass one of the metric objects from'
'estimator.train_metrics and estimator.val_metrics as monitor.', monitor_name))
else:
if self.monitor_op(monitor_value - self.min_delta, self.best):
self.best = monitor_value
self.wait = 0
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = self.current_epoch
self.stop_training = True
self.current_epoch += 1
return self.stop_training
def train_end(self, estimator, *args, **kwargs):
if self.stopped_epoch > 0:
estimator.logger.info('[Epoch %d] EarlyStoppingHanlder: '
'early stopping due to %s not improving',
self.stopped_epoch, self.monitor.get()[0])
class GradientUpdateHandler(BatchEnd):
"""Gradient Update Handler that apply gradients on network weights
:py:class:`GradientUpdateHandler` takes the priority level. It updates weight parameters
at the end of each batch
Parameters
----------
priority : scalar, default -2000
priority level of the gradient update handler. Priority level is sorted in ascending
order. The lower the number is, the higher priority level the handler is.
----------
"""
def __init__(self, priority=-2000):
self.priority = priority
def batch_end(self, estimator, *args, **kwargs):
loss = kwargs['loss']
batch_size = 0
if not isinstance(loss, list):
loss = [loss]
if isinstance(loss, list):
for l in loss:
batch_size += l.shape[0]
estimator.trainer.step(batch_size)
|
leezu/mxnet
|
python/mxnet/gluon/contrib/estimator/event_handler.py
|
Python
|
apache-2.0
| 33,227
|
import random
import threading
from collections import defaultdict
import logging
import time
from typing import Any, Dict, List, Optional
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler.tags import TAG_RAY_CLUSTER_NAME, TAG_RAY_NODE_NAME, \
TAG_RAY_LAUNCH_CONFIG, TAG_RAY_NODE_KIND, \
TAG_RAY_USER_NODE_TYPE, TAG_RAY_NODE_STATUS
from ray.autoscaler._private.constants import BOTO_MAX_RETRIES
from ray.autoscaler._private.log_timer import LogTimer
from ray.autoscaler._private.cli_logger import cli_logger
from ray.autoscaler._private.aliyun.utils import AcsClient
from ray.autoscaler._private.aliyun.config import PENDING, STOPPED, \
STOPPING, RUNNING, bootstrap_aliyun
logger = logging.getLogger(__name__)
TAG_BATCH_DELAY = 1
STOPPING_NODE_DELAY = 1
class AliyunNodeProvider(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes",
True)
self.acs = AcsClient(
access_key=provider_config["access_key"],
access_key_secret=provider_config["access_key_secret"],
region_id=provider_config["region"],
max_retries=BOTO_MAX_RETRIES,
)
# Try availability zones round-robin, starting from random offset
self.subnet_idx = random.randint(0, 100)
# Tags that we believe to actually be on the node.
self.tag_cache = {}
# Tags that we will soon upload.
self.tag_cache_pending = defaultdict(dict)
# Number of threads waiting for a batched tag update.
self.batch_thread_count = 0
self.batch_update_done = threading.Event()
self.batch_update_done.set()
self.ready_for_new_batch = threading.Event()
self.ready_for_new_batch.set()
self.tag_cache_lock = threading.Lock()
self.count_lock = threading.Lock()
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes = {}
def non_terminated_nodes(self, tag_filters: Dict[str, str]) -> List[str]:
tags = [
{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
},
]
for k, v in tag_filters.items():
tags.append({
"Key": k,
"Value": v,
})
instances = self.acs.describe_instances(tags=tags)
non_terminated_instance = []
for instance in instances:
if instance.get("Status") == RUNNING or instance.get(
"Status") == PENDING:
non_terminated_instance.append(instance.get("InstanceId"))
self.cached_nodes[instance.get("InstanceId")] = instance
return non_terminated_instance
def is_running(self, node_id: str) -> bool:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
instance = instances[0]
return instance.get("Status") == "Running"
cli_logger.error("Invalid node id: %s", node_id)
return False
def is_terminated(self, node_id: str) -> bool:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
return instance.get("Status") == "Stopped"
cli_logger.error("Invalid node id: %s", node_id)
return False
def node_tags(self, node_id: str) -> Dict[str, str]:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
if instance.get("Tags") is not None:
node_tags = dict()
for tag in instance.get("Tags").get("Tag"):
node_tags[tag.get("TagKey")] = tag.get("TagValue")
return node_tags
return dict()
def external_ip(self, node_id: str) -> str:
while True:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances)
instance = instances[0]
if instance.get("PublicIpAddress") is not None \
and instance.get(
"PublicIpAddress").get("IpAddress") is not None:
if len(instance.get("PublicIpAddress").get(
"IpAddress")) > 0:
return instance.get("PublicIpAddress").get(
"IpAddress")[0]
cli_logger.error(
"PublicIpAddress attribute is not exist. %s" % instance)
time.sleep(STOPPING_NODE_DELAY)
def internal_ip(self, node_id: str) -> str:
while True:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
if instance.get("VpcAttributes") is not None and instance.get(
"VpcAttributes").get(
"PrivateIpAddress") is not None and len(
instance.get("VpcAttributes").get(
"PrivateIpAddress").get("IpAddress")) > 0:
return instance.get("VpcAttributes").get(
"PrivateIpAddress").get("IpAddress")[0]
cli_logger.error(
"InnerIpAddress attribute is not exist. %s" % instance)
time.sleep(STOPPING_NODE_DELAY)
def set_node_tags(self, node_id: str, tags: Dict[str, str]) -> None:
is_batching_thread = False
with self.tag_cache_lock:
if not self.tag_cache_pending:
is_batching_thread = True
# Wait for threads in the last batch to exit
self.ready_for_new_batch.wait()
self.ready_for_new_batch.clear()
self.batch_update_done.clear()
self.tag_cache_pending[node_id].update(tags)
if is_batching_thread:
time.sleep(TAG_BATCH_DELAY)
with self.tag_cache_lock:
self._update_node_tags()
self.batch_update_done.set()
with self.count_lock:
self.batch_thread_count += 1
self.batch_update_done.wait()
with self.count_lock:
self.batch_thread_count -= 1
if self.batch_thread_count == 0:
self.ready_for_new_batch.set()
def _update_node_tags(self):
batch_updates = defaultdict(list)
for node_id, tags in self.tag_cache_pending.items():
for x in tags.items():
batch_updates[x].append(node_id)
self.tag_cache[node_id] = tags
self.tag_cache_pending = defaultdict(dict)
self._create_tags(batch_updates)
def _create_tags(self, batch_updates):
for (k, v), node_ids in batch_updates.items():
m = "Set tag {}={} on {}".format(k, v, node_ids)
with LogTimer("AliyunNodeProvider: {}".format(m)):
if k == TAG_RAY_NODE_NAME:
k = "Name"
self.acs.tag_resource(node_ids, [{"Key": k, "Value": v}])
def create_node(self, node_config: Dict[str, Any], tags: Dict[str, str],
count: int) -> Optional[Dict[str, Any]]:
filter_tags = [{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
}, {
"Key": TAG_RAY_NODE_KIND,
"Value": tags[TAG_RAY_NODE_KIND]
}, {
"Key": TAG_RAY_USER_NODE_TYPE,
"Value": tags[TAG_RAY_USER_NODE_TYPE]
}, {
"Key": TAG_RAY_LAUNCH_CONFIG,
"Value": tags[TAG_RAY_LAUNCH_CONFIG]
}, {
"Key": TAG_RAY_NODE_NAME,
"Value": tags[TAG_RAY_NODE_NAME]
}]
reused_nodes_dict = {}
if self.cache_stopped_nodes:
reuse_nodes_candidate = self.acs.describe_instances(
tags=filter_tags)
if reuse_nodes_candidate:
with cli_logger.group("Stopping instances to reuse"):
reuse_node_ids = []
for node in reuse_nodes_candidate:
node_id = node.get("InstanceId")
status = node.get("Status")
if status != STOPPING and status != STOPPED:
continue
if status == STOPPING:
# wait for node stopped
while self.acs.describe_instances(
instance_ids=[node_id])[0].get(
"Status") == STOPPING:
logging.info("wait for %s stop" % node_id)
time.sleep(STOPPING_NODE_DELAY)
# logger.info("reuse %s" % node_id)
reuse_node_ids.append(node_id)
reused_nodes_dict[node.get("InstanceId")] = node
self.acs.start_instance(node_id)
self.tag_cache[node_id] = node.get("Tags")
self.set_node_tags(node_id, tags)
if len(reuse_node_ids) == count:
break
count -= len(reuse_node_ids)
created_nodes_dict = {}
if count > 0:
filter_tags.append({
"Key": TAG_RAY_NODE_STATUS,
"Value": tags[TAG_RAY_NODE_STATUS]
})
instance_id_sets = self.acs.run_instances(
instance_type=node_config["InstanceType"],
image_id=node_config["ImageId"],
tags=filter_tags,
amount=count,
vswitch_id=self.provider_config["v_switch_id"],
security_group_id=self.provider_config["security_group_id"],
key_pair_name=self.provider_config["key_name"])
instances = self.acs.describe_instances(
instance_ids=instance_id_sets)
if instances is not None:
for instance in instances:
created_nodes_dict[instance.get("InstanceId")] = instance
all_created_nodes = reused_nodes_dict
all_created_nodes.update(created_nodes_dict)
return all_created_nodes
def terminate_node(self, node_id: str) -> None:
logger.info("terminate node: %s" % node_id)
if self.cache_stopped_nodes:
logger.info(
"Stopping instance {} (to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)").format(
node_id)
self.acs.stop_instance(node_id)
else:
self.acs.delete_instance(node_id)
def terminate_nodes(self, node_ids: List[str]) -> None:
if not node_ids:
return
if self.cache_stopped_nodes:
logger.info(
"Stopping instances {} (to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)".format(
node_ids))
self.acs.stop_instances(node_ids)
else:
self.acs.delete_instances(node_ids)
def _get_node(self, node_id):
"""Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
# Node not in {pending, running} -- retry with a point query. This
# usually means the node was recently preempted or terminated.
matches = self.acs.describe_instances(instance_ids=[node_id])
assert len(matches) == 1, "Invalid instance id {}".format(node_id)
return matches[0]
def _get_cached_node(self, node_id):
"""Return node info from cache if possible, otherwise fetches it."""
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_aliyun(cluster_config)
|
pcmoritz/ray-1
|
python/ray/autoscaler/_private/aliyun/node_provider.py
|
Python
|
apache-2.0
| 12,663
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the views for GSoC Organization Application.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from django.conf.urls.defaults import url
from soc.logic.models.org_app_survey import logic as org_app_logic
from soc.views import forms
from soc.views.models.org_app_survey import OrgAppSurveyForm
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.helper import url_patterns
class OrgAppForm(OrgAppSurveyForm, forms.ModelForm):
"""Form for Organization Applications inherited from Surveys.
"""
#TODO: Rewrite this class while refactoring surveys
def __init__(self, *args, **kwargs):
"""Act as a bridge between the new Forms APIs and the existing Survey
Form classes.
"""
kwargs.update({
'survey': kwargs.get('instance', None),
'survey_logic': org_app_logic,
})
super(OrgAppForm, self).__init__(*args, **kwargs)
class OrgApp(RequestHandler):
"""View methods for Organization Application Applications.
"""
def templatePath(self):
return 'v2/modules/gsoc/org_app/apply.html'
def djangoURLPatterns(self):
"""Returns the list of tuples for containing URL to view method mapping.
"""
return [
url(r'^gsoc/org_app/apply/%s$' % url_patterns.SURVEY, self,
name='gsoc_org_app_apply')
]
def checkAccess(self):
"""Access checks for GSoC Organization Application.
"""
pass
def context(self):
"""Handler to for GSoC Organization Application HTTP get request.
"""
org_app_keyfields = {
'prefix': self.kwargs.get('prefix'),
'scope_path': '%s/%s' % (self.kwargs.get('sponsor'),
self.kwargs.get('program')),
'link_id': self.kwargs.get('survey'),
}
org_app_entity = org_app_logic.getFromKeyFieldsOr404(org_app_keyfields)
if self.data.request.method == 'POST':
org_app_form = OrgAppForm(self.data.POST, instance=org_app_entity)
else:
org_app_form = OrgAppForm(instance=org_app_entity)
return {
'page_name': 'Organization Application',
'org_app_form': org_app_form,
}
|
SRabbelier/Melange
|
app/soc/modules/gsoc/views/org_app.py
|
Python
|
apache-2.0
| 2,789
|
import unittest
from mopidy_tunein import Extension
class ExtensionTest(unittest.TestCase):
def test_get_default_config(self):
ext = Extension()
config = ext.get_default_config()
self.assertIn("[tunein]", config)
self.assertIn("enabled = true", config)
def test_get_config_schema(self):
ext = Extension()
schema = ext.get_config_schema()
self.assertIn("timeout", schema)
self.assertIn("filter", schema)
|
kingosticks/mopidy-tunein
|
tests/test_extension.py
|
Python
|
apache-2.0
| 483
|
#
# UpdateLogger.py
#
"""
package oompa.tracking
TODO: stil waffling about whether to log two-columns - "datetime {json}" or "{json-with-datetime-field}"
"""
import json
import os
from datetime import datetime
class UpdateLogger:
"""
records updates for later replay
"""
def __init__(self, config):
self.config = config
# XXX really get from config
config_base = os.environ["HOME"]
oompa_base = os.path.join(config_base, "oompa")
self._updates_folder = os.path.join(oompa_base, "updates")
# cache
self._streams = {}
return
# ###
#
# file-based subclass
# TODO: factor out to a subclass
#
def _getUpdatePath(self, datetime = None):
if isinstance(datetime, str):
yyyymmdd = datetime
else:
# in file subclass, we assume datetime is not none
yyyymmdd = datetime.strftime("%Y%m%d")
return os.path.join(self._updates_folder, "%s.updates.log" % yyyymmdd)
def _getUpdateStream(self, datetime = None):
path = self._getUpdatePath(datetime)
if path not in self._streams:
self._streams[path] = open(path, "a")
# assumes that oompa_base exists
if not os.path.exists(self._updates_folder):
os.mkdir(self._updates_folder)
return self._streams[path]
def _logUpdate(self, info_d):
now = datetime.now()
info_d["datetime"] = now.strftime("%Y%m%d-%H:%M:%S")
updateStream = self._getUpdateStream(now)
updateStream.write("%s\n" % json.dumps(info_d))
# print("# wrote update: %s" % json.dumps(info_d))
# updateStream.flush()
return
#
# ###
def logListUpdate(self, entityMetadata, fieldName, action, **extra):
# i think this may end up being the only kind of update
info_d = {
"kind": "list3",
"subject_kind": entityMetadata.kind,
"subject": entityMetadata.name,
"field": fieldName,
"action": action,
}
info_d.update(extra)
self._logUpdate(info_d)
return
def logUpdates(self, entityMetadata, fieldName, newItems):
"""
XXX i don't think this is generic across any kind of update
"""
if not newItems:
return
info_d = {
"kind": "list1", # stupid - merge list1 and list2 handling
"field": fieldName,
"subject_kind": entityMetadata.kind,
"subject": entityMetadata.name,
}
for item in newItems:
# XXX non-generic
if fieldName == "repoNames":
# note that created_at and updated_at could be fetched later. updated will certainly change
info_d["full_name"] = item.full_name
# info_d["created_at"] = item.created_at
# these assume that someone has called refresh
# info_d["parent"] = item.parent
# info_d["source"] = item.source
# note: *not* including blurb
else:
print(" logUpdates() - *not* a repoName: %s" % fieldName)
pass
self._logUpdate(info_d)
pass
return
def logListUpdates(self, entityMetadata, fieldName, action, values):
"""
XXX i don't think this is generic across any kind of update
TODO: use self.logListUpdate, to get more normalized
"""
info_d = {
"kind": "list2",
"field": fieldName,
"action": action,
"subject_kind": entityMetadata.kind,
"subject": entityMetadata.name,
}
# TODO: probably just write out full list in one record
for value in values:
info_d["full_name"] = value
# TODO: if action is added, refresh the metadata to add parent and source
# TODO:
#
# info_d["created_at"] = item.created_at
# these assume that someone has called refresh
# info_d["parent"] = repo.parent
# info_d["source"] = item.source
#
# note: *not* including the blurb - renderer can look it up
self._logUpdate(info_d)
pass
return
def getUpdates(self, start_date = None, end_date = None):
"""
generate stream of updates
TODO: support various filters
"""
# print("UpdateLogger.getUpdates(): %s - %s" % ( start_date, end_date ))
# TODO: assuming today is dumb. maybe discover most recent
# update?
if end_date is None:
end_date = datetime.now()
if start_date is None:
start_date = end_date
if start_date != end_date:
xxx
# XXX need date_utils.date_range
date_range = [ start_date, ]
for date in date_range:
# yyyymmdd = date.strftime("%Y%m%d")
yyyymmdd = date
update_path = self._getUpdatePath(yyyymmdd)
if not os.path.exists(update_path):
print("# update_path does not exist: %s" % update_path)
continue
for line in open(update_path):
yield json.loads(line)
return
def organizeUpdatesByEntity(self, updates):
"""
organize updates by ( subject_kind, subject ) from the update
"""
byEntity = {}
for update in updates:
entity = ( update["subject_kind"], update["subject"] )
byEntity.setdefault(entity, []).append(update)
pass
return byEntity
def organizeUpdatesByKind(self, updates):
"""
organize updates by ( kind, ) from each update
"""
by_kind = {}
for update in updates:
by_kind.setdefault(update["kind"], []).append(update)
pass
return by_kind
def organizeUpdatesByField(self, updates):
"""
organize updates by ( field, ) from each update
"""
by_field = {}
for update in updates:
by_field.setdefault(update["field"], []).append(update)
pass
return by_field
def close(self):
map(lambda stream: stream.close(), self._streams)
return
pass
|
sjtsp2008/oompa
|
oompa/tracking/UpdateLogger.py
|
Python
|
apache-2.0
| 6,922
|
from helper import unittest, PillowTestCase, py3
from PIL import Image
class TestLibPack(PillowTestCase):
def pack(self):
pass # not yet
def test_pack(self):
def pack(mode, rawmode):
if len(mode) == 1:
im = Image.new(mode, (1, 1), 1)
else:
im = Image.new(mode, (1, 1), (1, 2, 3, 4)[:len(mode)])
if py3:
return list(im.tobytes("raw", rawmode))
else:
return [ord(c) for c in im.tobytes("raw", rawmode)]
order = 1 if Image._ENDIAN == '<' else -1
self.assertEqual(pack("1", "1"), [128])
self.assertEqual(pack("1", "1;I"), [0])
self.assertEqual(pack("1", "1;R"), [1])
self.assertEqual(pack("1", "1;IR"), [0])
self.assertEqual(pack("L", "L"), [1])
self.assertEqual(pack("I", "I"), [1, 0, 0, 0][::order])
self.assertEqual(pack("F", "F"), [0, 0, 128, 63][::order])
self.assertEqual(pack("LA", "LA"), [1, 2])
self.assertEqual(pack("RGB", "RGB"), [1, 2, 3])
self.assertEqual(pack("RGB", "RGB;L"), [1, 2, 3])
self.assertEqual(pack("RGB", "BGR"), [3, 2, 1])
self.assertEqual(pack("RGB", "RGBX"), [1, 2, 3, 255]) # 255?
self.assertEqual(pack("RGB", "BGRX"), [3, 2, 1, 0])
self.assertEqual(pack("RGB", "XRGB"), [0, 1, 2, 3])
self.assertEqual(pack("RGB", "XBGR"), [0, 3, 2, 1])
self.assertEqual(pack("RGBX", "RGBX"), [1, 2, 3, 4]) # 4->255?
self.assertEqual(pack("RGBA", "RGBA"), [1, 2, 3, 4])
self.assertEqual(pack("CMYK", "CMYK"), [1, 2, 3, 4])
self.assertEqual(pack("YCbCr", "YCbCr"), [1, 2, 3])
def test_unpack(self):
def unpack(mode, rawmode, bytes_):
im = None
if py3:
data = bytes(range(1, bytes_+1))
else:
data = ''.join(chr(i) for i in range(1, bytes_+1))
im = Image.frombytes(mode, (1, 1), data, "raw", rawmode, 0, 1)
return im.getpixel((0, 0))
def unpack_1(mode, rawmode, value):
assert mode == "1"
im = None
if py3:
im = Image.frombytes(
mode, (8, 1), bytes([value]), "raw", rawmode, 0, 1)
else:
im = Image.frombytes(
mode, (8, 1), chr(value), "raw", rawmode, 0, 1)
return tuple(im.getdata())
X = 255
self.assertEqual(unpack_1("1", "1", 1), (0, 0, 0, 0, 0, 0, 0, X))
self.assertEqual(unpack_1("1", "1;I", 1), (X, X, X, X, X, X, X, 0))
self.assertEqual(unpack_1("1", "1;R", 1), (X, 0, 0, 0, 0, 0, 0, 0))
self.assertEqual(unpack_1("1", "1;IR", 1), (0, X, X, X, X, X, X, X))
self.assertEqual(unpack_1("1", "1", 170), (X, 0, X, 0, X, 0, X, 0))
self.assertEqual(unpack_1("1", "1;I", 170), (0, X, 0, X, 0, X, 0, X))
self.assertEqual(unpack_1("1", "1;R", 170), (0, X, 0, X, 0, X, 0, X))
self.assertEqual(unpack_1("1", "1;IR", 170), (X, 0, X, 0, X, 0, X, 0))
self.assertEqual(unpack("L", "L;2", 1), 0)
self.assertEqual(unpack("L", "L;4", 1), 0)
self.assertEqual(unpack("L", "L", 1), 1)
self.assertEqual(unpack("L", "L;I", 1), 254)
self.assertEqual(unpack("L", "L;R", 1), 128)
self.assertEqual(unpack("L", "L;16", 2), 2) # little endian
self.assertEqual(unpack("L", "L;16B", 2), 1) # big endian
self.assertEqual(unpack("LA", "LA", 2), (1, 2))
self.assertEqual(unpack("LA", "LA;L", 2), (1, 2))
self.assertEqual(unpack("RGB", "RGB", 3), (1, 2, 3))
self.assertEqual(unpack("RGB", "RGB;L", 3), (1, 2, 3))
self.assertEqual(unpack("RGB", "RGB;R", 3), (128, 64, 192))
self.assertEqual(unpack("RGB", "RGB;16B", 6), (1, 3, 5)) # ?
self.assertEqual(unpack("RGB", "BGR", 3), (3, 2, 1))
self.assertEqual(unpack("RGB", "RGB;15", 2), (8, 131, 0))
self.assertEqual(unpack("RGB", "BGR;15", 2), (0, 131, 8))
self.assertEqual(unpack("RGB", "RGB;16", 2), (8, 64, 0))
self.assertEqual(unpack("RGB", "BGR;16", 2), (0, 64, 8))
self.assertEqual(unpack("RGB", "RGB;4B", 2), (17, 0, 34))
self.assertEqual(unpack("RGB", "RGBX", 4), (1, 2, 3))
self.assertEqual(unpack("RGB", "BGRX", 4), (3, 2, 1))
self.assertEqual(unpack("RGB", "XRGB", 4), (2, 3, 4))
self.assertEqual(unpack("RGB", "XBGR", 4), (4, 3, 2))
self.assertEqual(unpack("RGBA", "RGBA", 4), (1, 2, 3, 4))
self.assertEqual(unpack("RGBA", "BGRA", 4), (3, 2, 1, 4))
self.assertEqual(unpack("RGBA", "ARGB", 4), (2, 3, 4, 1))
self.assertEqual(unpack("RGBA", "ABGR", 4), (4, 3, 2, 1))
self.assertEqual(unpack("RGBA", "RGBA;15", 2), (8, 131, 0, 0))
self.assertEqual(unpack("RGBA", "BGRA;15", 2), (0, 131, 8, 0))
self.assertEqual(unpack("RGBA", "RGBA;4B", 2), (17, 0, 34, 0))
self.assertEqual(unpack("RGBX", "RGBX", 4), (1, 2, 3, 4)) # 4->255?
self.assertEqual(unpack("RGBX", "BGRX", 4), (3, 2, 1, 255))
self.assertEqual(unpack("RGBX", "XRGB", 4), (2, 3, 4, 255))
self.assertEqual(unpack("RGBX", "XBGR", 4), (4, 3, 2, 255))
self.assertEqual(unpack("RGBX", "RGB;15", 2), (8, 131, 0, 255))
self.assertEqual(unpack("RGBX", "BGR;15", 2), (0, 131, 8, 255))
self.assertEqual(unpack("RGBX", "RGB;4B", 2), (17, 0, 34, 255))
self.assertEqual(unpack("CMYK", "CMYK", 4), (1, 2, 3, 4))
self.assertEqual(unpack("CMYK", "CMYK;I", 4), (254, 253, 252, 251))
self.assertRaises(ValueError, lambda: unpack("L", "L", 0))
self.assertRaises(ValueError, lambda: unpack("RGB", "RGB", 2))
self.assertRaises(ValueError, lambda: unpack("CMYK", "CMYK", 2))
if __name__ == '__main__':
unittest.main()
# End of file
|
1upon0/rfid-auth-system
|
GUI/printer/Pillow-2.7.0/Tests/test_lib_pack.py
|
Python
|
apache-2.0
| 5,896
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
__all__ = [
'Initializer', 'Zeros', 'Ones', 'Constant', 'RandomUniform', 'RandomNormal', 'TruncatedNormal',
'deconv2d_bilinear_upsampling_initializer'
]
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Parameters
----------
shape : tuple of int.
The shape of the tensor.
dtype : Optional dtype of the tensor.
If not provided will return tensor of `tf.float32`.
Returns
-------
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns
-------
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Parameters
----------
config : A python dictionary.
It will typically be the output of `get_config`.
Returns
-------
An Initializer instance.
"""
if 'dtype' in config:
config.pop('dtype')
return cls(**config)
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0.
"""
def __call__(self, shape, dtype=tf.float32):
return tf.zeros(shape, dtype=dtype)
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1.
"""
def __call__(self, shape, dtype=tf.float32):
return tf.ones(shape, dtype=dtype)
class Constant(Initializer):
"""Initializer that generates tensors initialized to a constant value.
Parameters
----------
value : A python scalar or a numpy array.
The assigned value.
"""
def __init__(self, value=0):
self.value = value
def __call__(self, shape, dtype=None):
return tf.constant(self.value, shape=shape, dtype=dtype)
def get_config(self):
return {"value": self.value}
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Parameters
----------
minval : A python scalar or a scalar tensor.
Lower bound of the range of random values to generate.
maxval : A python scalar or a scalar tensor.
Upper bound of the range of random values to generate.
seed : A Python integer.
Used to seed the random generator.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
def __call__(self, shape, dtype=tf.float32):
return tf.random.uniform(shape, self.minval, self.maxval, dtype=dtype, seed=self.seed)
def get_config(self):
return {"minval": self.minval, "maxval": self.maxval, "seed": self.seed}
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Parameters
----------
mean : A python scalar or a scalar tensor.
Mean of the random values to generate.
stddev : A python scalar or a scalar tensor.
Standard deviation of the random values to generate.
seed : A Python integer.
Used to seed the random generator.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
def __call__(self, shape, dtype=tf.float32):
return tf.random.normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed)
def get_config(self):
return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `RandomNormal`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Parameters
----------
mean : A python scalar or a scalar tensor.
Mean of the random values to generate.
stddev : A python scalar or a scalar tensor.
Standard deviation of the andom values to generate.
seed : A Python integer.
Used to seed the random generator.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
def __call__(self, shape, dtype=tf.float32):
return tf.random.truncated_normal(shape, self.mean, self.stddev, dtype=dtype, seed=self.seed)
def get_config(self):
return {"mean": self.mean, "stddev": self.stddev, "seed": self.seed}
def deconv2d_bilinear_upsampling_initializer(shape):
"""Returns the initializer that can be passed to DeConv2dLayer for initializing the
weights in correspondence to channel-wise bilinear up-sampling.
Used in segmentation approaches such as [FCN](https://arxiv.org/abs/1605.06211)
Parameters
----------
shape : tuple of int
The shape of the filters, [height, width, output_channels, in_channels].
It must match the shape passed to DeConv2dLayer.
Returns
-------
``tf.constant_initializer``
A constant initializer with weights set to correspond to per channel bilinear upsampling
when passed as W_int in DeConv2dLayer
"""
if shape[0] != shape[1]:
raise Exception('deconv2d_bilinear_upsampling_initializer only supports symmetrical filter sizes')
if shape[3] < shape[2]:
raise Exception(
'deconv2d_bilinear_upsampling_initializer behaviour is not defined for num_in_channels < num_out_channels '
)
filter_size = shape[0]
num_out_channels = shape[2]
num_in_channels = shape[3]
# Create bilinear filter kernel as numpy array
bilinear_kernel = np.zeros([filter_size, filter_size], dtype=np.float32)
scale_factor = (filter_size + 1) // 2
if filter_size % 2 == 1:
center = scale_factor - 1
else:
center = scale_factor - 0.5
for x in range(filter_size):
for y in range(filter_size):
bilinear_kernel[x, y] = (1 - abs(x - center) / scale_factor) * (1 - abs(y - center) / scale_factor)
weights = np.zeros((filter_size, filter_size, num_out_channels, num_in_channels), dtype=np.float32)
for i in range(num_out_channels):
weights[:, :, i, i] = bilinear_kernel
# assign numpy array to constant_initalizer and pass to get_variable
return tf.constant_initializer(value=weights)
# Alias
zeros = Zeros
ones = Ones
constant = Constant
random_uniform = RandomUniform
random_normal = RandomNormal
truncated_normal = TruncatedNormal
|
zsdonghao/tensorlayer
|
tensorlayer/initializers.py
|
Python
|
apache-2.0
| 7,005
|
input = """
% Atom bug shouldn't be derived, as the body of the rule
% should be false. Auxiliary atoms shouldn't be printed
% out, as they are censored.
d(1).
d(2).
d(3).
bug :- 1 < #count{V : d(V)} <= 2.
"""
output = """
% Atom bug shouldn't be derived, as the body of the rule
% should be false. Auxiliary atoms shouldn't be printed
% out, as they are censored.
d(1).
d(2).
d(3).
bug :- 1 < #count{V : d(V)} <= 2.
"""
|
veltri/DLV2
|
tests/parser/aggregates.count.assignment.1.test.py
|
Python
|
apache-2.0
| 429
|
"""Component to make instant statistics about your history."""
import datetime
import logging
import math
import voluptuous as vol
from homeassistant.components import history
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_ENTITY_ID,
CONF_NAME,
CONF_STATE,
CONF_TYPE,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "history_stats"
CONF_START = "start"
CONF_END = "end"
CONF_DURATION = "duration"
CONF_PERIOD_KEYS = [CONF_START, CONF_END, CONF_DURATION]
CONF_TYPE_TIME = "time"
CONF_TYPE_RATIO = "ratio"
CONF_TYPE_COUNT = "count"
CONF_TYPE_KEYS = [CONF_TYPE_TIME, CONF_TYPE_RATIO, CONF_TYPE_COUNT]
DEFAULT_NAME = "unnamed statistics"
UNITS = {CONF_TYPE_TIME: "h", CONF_TYPE_RATIO: "%", CONF_TYPE_COUNT: ""}
ICON = "mdi:chart-line"
ATTR_VALUE = "value"
def exactly_two_period_keys(conf):
"""Ensure exactly 2 of CONF_PERIOD_KEYS are provided."""
if sum(param in conf for param in CONF_PERIOD_KEYS) != 2:
raise vol.Invalid(
"You must provide exactly 2 of the following:" " start, end, duration"
)
return conf
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_STATE): cv.string,
vol.Optional(CONF_START): cv.template,
vol.Optional(CONF_END): cv.template,
vol.Optional(CONF_DURATION): cv.time_period,
vol.Optional(CONF_TYPE, default=CONF_TYPE_TIME): vol.In(CONF_TYPE_KEYS),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
),
exactly_two_period_keys,
)
# noinspection PyUnusedLocal
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the History Stats sensor."""
entity_id = config.get(CONF_ENTITY_ID)
entity_state = config.get(CONF_STATE)
start = config.get(CONF_START)
end = config.get(CONF_END)
duration = config.get(CONF_DURATION)
sensor_type = config.get(CONF_TYPE)
name = config.get(CONF_NAME)
for template in [start, end]:
if template is not None:
template.hass = hass
add_entities(
[
HistoryStatsSensor(
hass, entity_id, entity_state, start, end, duration, sensor_type, name
)
]
)
return True
class HistoryStatsSensor(Entity):
"""Representation of a HistoryStats sensor."""
def __init__(
self, hass, entity_id, entity_state, start, end, duration, sensor_type, name
):
"""Initialize the HistoryStats sensor."""
self._entity_id = entity_id
self._entity_state = entity_state
self._duration = duration
self._start = start
self._end = end
self._type = sensor_type
self._name = name
self._unit_of_measurement = UNITS[sensor_type]
self._period = (datetime.datetime.now(), datetime.datetime.now())
self.value = None
self.count = None
@callback
def start_refresh(*args):
"""Register state tracking."""
@callback
def force_refresh(*args):
"""Force the component to refresh."""
self.async_schedule_update_ha_state(True)
force_refresh()
async_track_state_change(self.hass, self._entity_id, force_refresh)
# Delay first refresh to keep startup fast
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_refresh)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self.value is None or self.count is None:
return None
if self._type == CONF_TYPE_TIME:
return round(self.value, 2)
if self._type == CONF_TYPE_RATIO:
return HistoryStatsHelper.pretty_ratio(self.value, self._period)
if self._type == CONF_TYPE_COUNT:
return self.count
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
if self.value is None:
return {}
hsh = HistoryStatsHelper
return {ATTR_VALUE: hsh.pretty_duration(self.value)}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the states."""
# Get previous values of start and end
p_start, p_end = self._period
# Parse templates
self.update_period()
start, end = self._period
# Convert times to UTC
start = dt_util.as_utc(start)
end = dt_util.as_utc(end)
p_start = dt_util.as_utc(p_start)
p_end = dt_util.as_utc(p_end)
now = datetime.datetime.now()
# Compute integer timestamps
start_timestamp = math.floor(dt_util.as_timestamp(start))
end_timestamp = math.floor(dt_util.as_timestamp(end))
p_start_timestamp = math.floor(dt_util.as_timestamp(p_start))
p_end_timestamp = math.floor(dt_util.as_timestamp(p_end))
now_timestamp = math.floor(dt_util.as_timestamp(now))
# If period has not changed and current time after the period end...
if (
start_timestamp == p_start_timestamp
and end_timestamp == p_end_timestamp
and end_timestamp <= now_timestamp
):
# Don't compute anything as the value cannot have changed
return
# Get history between start and end
history_list = history.state_changes_during_period(
self.hass, start, end, str(self._entity_id)
)
if self._entity_id not in history_list.keys():
return
# Get the first state
last_state = history.get_state(self.hass, start, self._entity_id)
last_state = last_state is not None and last_state == self._entity_state
last_time = start_timestamp
elapsed = 0
count = 0
# Make calculations
for item in history_list.get(self._entity_id):
current_state = item.state == self._entity_state
current_time = item.last_changed.timestamp()
if last_state:
elapsed += current_time - last_time
if current_state and not last_state:
count += 1
last_state = current_state
last_time = current_time
# Count time elapsed between last history state and end of measure
if last_state:
measure_end = min(end_timestamp, now_timestamp)
elapsed += measure_end - last_time
# Save value in hours
self.value = elapsed / 3600
# Save counter
self.count = count
def update_period(self):
"""Parse the templates and store a datetime tuple in _period."""
start = None
end = None
# Parse start
if self._start is not None:
try:
start_rendered = self._start.render()
except (TemplateError, TypeError) as ex:
HistoryStatsHelper.handle_template_exception(ex, "start")
return
start = dt_util.parse_datetime(start_rendered)
if start is None:
try:
start = dt_util.as_local(
dt_util.utc_from_timestamp(math.floor(float(start_rendered)))
)
except ValueError:
_LOGGER.error(
"Parsing error: start must be a datetime" "or a timestamp"
)
return
# Parse end
if self._end is not None:
try:
end_rendered = self._end.render()
except (TemplateError, TypeError) as ex:
HistoryStatsHelper.handle_template_exception(ex, "end")
return
end = dt_util.parse_datetime(end_rendered)
if end is None:
try:
end = dt_util.as_local(
dt_util.utc_from_timestamp(math.floor(float(end_rendered)))
)
except ValueError:
_LOGGER.error(
"Parsing error: end must be a datetime " "or a timestamp"
)
return
# Calculate start or end using the duration
if start is None:
start = end - self._duration
if end is None:
end = start + self._duration
if start > dt_util.now():
# History hasn't been written yet for this period
return
if dt_util.now() < end:
# No point in making stats of the future
end = dt_util.now()
self._period = start, end
class HistoryStatsHelper:
"""Static methods to make the HistoryStatsSensor code lighter."""
@staticmethod
def pretty_duration(hours):
"""Format a duration in days, hours, minutes, seconds."""
seconds = int(3600 * hours)
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days > 0:
return "%dd %dh %dm" % (days, hours, minutes)
if hours > 0:
return "%dh %dm" % (hours, minutes)
return "%dm" % minutes
@staticmethod
def pretty_ratio(value, period):
"""Format the ratio of value / period duration."""
if len(period) != 2 or period[0] == period[1]:
return 0.0
ratio = 100 * 3600 * value / (period[1] - period[0]).total_seconds()
return round(ratio, 1)
@staticmethod
def handle_template_exception(ex, field):
"""Log an error nicely if the template cannot be interpreted."""
if ex.args and ex.args[0].startswith("UndefinedError: 'None' has no attribute"):
# Common during HA startup - so just a warning
_LOGGER.warning(ex)
return
_LOGGER.error("Error parsing template for field %s", field)
_LOGGER.error(ex)
|
leppa/home-assistant
|
homeassistant/components/history_stats/sensor.py
|
Python
|
apache-2.0
| 10,823
|
#!/usr/bin/env python
"""This utility installs an engage extension into a deployment home.
"""
import os
import os.path
import sys
from optparse import OptionParser
import shutil
import re
import logging
logger = logging.getLogger(__name__)
# enable importing from the python_pkg sub-directory
base_src_dir=os.path.abspath(os.path.dirname(__file__))
python_pkg_dir = os.path.join(base_src_dir, "python_pkg")
assert os.path.exists(python_pkg_dir), "Python package directory %s does not exist" % python_pkg_dir
sys.path.append(python_pkg_dir)
from engage.extensions import installed_extensions, extension_versions
dist_root = os.path.abspath(os.path.dirname(__file__))
dist_root_parent = os.path.abspath(os.path.join(dist_root, ".."))
class EngageExtension(object):
def __init__(self, path, name, version, update):
self.path = path
self.name = name
self.version = version
self.update = update
def _copy_dir(self, src_dirname, target, dry_run=False):
src_dir = os.path.join(self.path, src_dirname)
dest_dir = os.path.join(os.path.join(target, src_dirname),
self.name)
if os.path.exists(src_dir):
logger.info("Copying %s to %s" % (src_dirname, dest_dir))
if os.path.exists(dest_dir):
if self.update:
logger.warn("removing old version of %s" % dest_dir)
if not dry_run:
shutil.rmtree(dest_dir)
else:
raise Exception("Target directory %s already exists" % dest_dir)
if not dry_run:
shutil.copytree(src_dir, dest_dir)
elif self.update and os.path.exists(dest_dir):
logger.warn("removing old version of %s" % dest_dir)
if not dry_run:
shutil.rmtree(dest_dir)
def install(self, dist_root, dry_run=False):
if not dry_run:
logger.info("Running install of %s to %s" % (self.name, dist_root))
else:
logger.info("Dry run install of %s to %s" % (self.name, dist_root))
self._copy_dir("metadata", dist_root, dry_run=dry_run)
dest_engage_pkg_dir = os.path.join(os.path.join(dist_root, "python_pkg"),
"engage")
self._copy_dir("drivers", dest_engage_pkg_dir, dry_run=dry_run)
self._copy_dir("tests", dest_engage_pkg_dir, dry_run=dry_run)
self._copy_dir("mgt_backends", dest_engage_pkg_dir, dry_run=dry_run)
# For the software packages we copy the individual files to the main package
# cache.
src_cache_dir = os.path.join(self.path, "sw_packages")
dest_cache_dir = os.path.join(dist_root, "sw_packages")
if os.path.exists(src_cache_dir):
logger.info("Copying software packages from %s to %s" %
(src_cache_dir, dest_cache_dir))
for fname in os.listdir(src_cache_dir):
src_file = os.path.join(src_cache_dir, fname)
dest_file = os.path.join(dest_cache_dir, fname)
logger.debug("Copying %s to %s" % (fname, dest_file))
shutil.copyfile(src_file, dest_file)
# update the extension file
if self.name not in installed_extensions:
installed_extensions.append(self.name)
extension_versions[self.name] = self.version
extns_file = os.path.join(dest_engage_pkg_dir, "extensions.py")
logger.info("Updating extensions file %s" % extns_file)
with open(extns_file, "rb") as ef:
lines = ef.read().split("\n")
updated_list = False
updated_versions = False
if not dry_run:
with open(extns_file, "wb") as ef:
for line in lines:
if re.match("^installed_extensions = ", line):
ef.write("installed_extensions = %s\n" %
installed_extensions.__repr__())
updated_list = True
elif re.match("^extension_versions = ", line):
ef.write("extension_versions = %s\n" %
extension_versions.__repr__())
updated_versions = True
else:
ef.write(line + "\n")
else:
for line in lines:
if re.match("^installed_extensions = ", line):
sys.stdout.write("installed_extensions = %s\n" %
installed_extensions.__repr__())
updated_list = True
elif re.match("^extension_versions = ", line):
sys.stdout.write("extension_versions = %s\n" %
extension_versions.__repr__())
updated_versions = True
else:
sys.stdout.write(line + "\n")
if ((not updated_list) or (not updated_versions)):
raise Exception("Extension registration file %s did not have correct format, unable to complete update" % extns_file)
logger.info("Successfully installed extension %s" % self.name)
def process_args(argv):
usage = "usage: %prog [options] path_to_extension"
parser = OptionParser(usage=usage)
parser.add_option("--dry-run", action="store_true",
help="If specified, don't make changes, just log what would be done",
default=False)
parser.add_option("--update", "-u", action="store_true",
help="If specified, override any existing version of the extension",
default=False)
(options, args) = parser.parse_args(args=argv)
if len(args)==0:
parser.print_help()
sys.exit(0)
elif len(args) > 1:
parser.error("Expecting exactly one argument, path to extension directory")
extension_path = os.path.abspath(args[0])
if not os.path.exists(extension_path):
parser.error("Extension directory %s does not exist" % extension_path)
extension_name = os.path.basename(extension_path)
if os.path.basename(dist_root_parent)=="src":
parser.error("Cannot install extension into source tree %s, run from distribution tree" % dist_root)
if extension_name in installed_extensions and not options.update:
parser.error("Extension %s already installed" % extension_name)
version_file = os.path.join(extension_path, "version.txt")
if not os.path.exists(version_file):
parser.error("Missing version file %s" % version_file)
with open(version_file, "rb") as vf:
extension_version = vf.read().rstrip()
ext = EngageExtension(extension_path, extension_name,
extension_version, options.update)
return (ext, options)
def main(argv=sys.argv[1:]):
(ext, opts) = process_args(argv)
ext.install(dist_root, dry_run=opts.dry_run)
return 0
if __name__ == "__main__":
#formatter = logging.Formatter("[%(levelname)s][%(name)s] %(message)s")
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
#console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
logger.setLevel(logging.INFO)
sys.exit(main())
|
quaddra/engage
|
install_extension.py
|
Python
|
apache-2.0
| 7,602
|
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup, UnicodeDammit
import time
import os
import re
import log
import tools
class Get(object):
# timeout, retry_interval -> seconds
def __init__(self, url='', timeout=5, retry=5, retry_interval=2, proxies={}, headers={}, download_file=None, savepath='.'):
self.log = log.Log()
self.url = url
self.timeout = timeout
self.retry = retry
self.retry_interval = retry_interval
self.proxies = proxies
self.headers = headers
if download_file is None:
self.download_file = False
else:
self.download_file = download_file
self.savepath = savepath
self.download_result = None
self.__run()
def config(self, url='', timeout=5, retry=5, retry_interval=2, proxies={}, headers={}, download_file=None, savepath=''):
self.url = url
self.timeout = timeout
self.retry = retry
self.retry_interval = retry_interval
if len(proxies) > 0:
self.proxies = proxies
if not download_file is None:
self.download_file = download_file
if len(headers) > 0:
self.headers = headers
if savepath != '':
self.savepath = savepath
self.__run()
def __run(self):
if self.url != '':
self.s = requests.Session()
self.__get()
if self.download_file:
self.__download()
else:
self.__soup()
def __get(self):
if self.url == '':
return False
self.log.info('start get [%s]'%self.url)
self.r = None
for i in range(self.retry):
try:
self.r = self.s.get(self.url, timeout=self.timeout, proxies=self.proxies, headers=self.headers)
break
except Exception as e:
self.log.error( '[retry %d] get [%s] fail. except [%s]'%(i+1, self.url, str(e)) )
time.sleep(self.retry_interval)
if self.r is None:
self.log.error('get [%s] fail' % self.url)
return False
self.log.info('end, get [%s]' % self.url)
return True
def __soup(self):
if not self.r:
self.log.error('self.r is None, cannot get soup. url [%s]' % self.url)
return False
if self.download_file:
self.log.info('to download url [%s], should not get soup' % self.url)
return False
self.soup = None
try:
self.soup = BeautifulSoup(self.r.content, 'html.parser')
return True
except Exception as e:
log.error('contruct BeautifulSoup fail, url [%s], except [%s]' & (self.url, str(e)))
return False
def __download(self):
self.log.info('start download [%s]' % self.url)
if self.r is None:
self.log.error('self.r is None. download fail. url [%s]' % self.url)
return False
filepath = self.savepath
tools.mkdir(filepath)
r = self.r
url = self.url
# 获取headers中的content-length
tot_size = 0
try:
tot_size = int( r.headers['content-length'] )
except Exception as e:
self.log.error('cannot get content-length, url [%s], headers [%s]' % (url, str(r.headers)) )
# get file name
filename = self.__get_file_name()
chunk_size = 4096
flag = 3
# retry if size is not right.
for i in range(3):
now_size = 0
try:
#print filename, type(filename)
with open( os.path.join(self.savepath, filename), 'wb' ) as f:
for chunk in r.iter_content(chunk_size):
now_size = now_size + len(chunk)
f.write(chunk)
except Exception as e:
self.log.error(u'something wrong. url [%s], exception [%s], 文件名 [%s], retry [%d]' % (url, unicode(e), filename, i+1) )
flag = 3
if tot_size == 0:
self.log.info(u'获取文件size失败,无法校验。 获取的文件大小 [%d], 文件名 [%s], url [%s]' % (now_size, filename, url) )
flag = 0
break
if now_size != tot_size:
self.log.error('文件size不正确. 获取的文件大小 [%d], 正确文件大小 [%d], 文件名 [%s], url[%s], retry[%d]' % (now_size, tot_size, filename.encode('utf8'), url, i+1))
flag = 4
else:
flag = 0
break
time.sleep(1)
self.log.info('end download [%s]' % self.url)
self.download_result = {'errno': flag, 'filename': filename}
return self.download_result
def __get_file_name(self):
# 通过content-type获取后缀
r = self.r
url = self.url
suf = ''
try:
ct = r.headers['content-type']
ctl = ct.split(';')
for i in ctl:
try:
suf = constant.CONTENT_TYPE_REVERSE[i.strip()]
except Exception as e:
pass
except Exception as e:
self.log.error('cannot get suffix, url[%s], headers [%s]' % (url, str(r.headers)))
# 尝试获取Content-Disposition,并以该项中的文件名及后缀优先
try:
content_disposition = r.headers['Content-Disposition']
fntmp = re.findall(r'filename=[\"\'](.*?)[\"\']', content_disposition)[0]
pos = fntmp.rfind('.')
if pos > -1:
fn = fntmp[:pos]
suf = fntmp[pos:]
else:
fn = fntmp
if filename is None:
filename = fn
dammit = UnicodeDammit(filename, ['utf-8', 'gb2312', 'gbk'])
filename = dammit.unicode_markup
except Exception as e:
pass
# url中的文件名以及后缀
pos = url.rfind("/") + 1
if pos >= len(url) or pos == -1:
fn = str(time.time()).replace(".", "")
else:
fn = url[pos:]
pos = fn.rfind('.')
if pos >= len(fn) or pos == -1:
pass
else:
if suf == '':
suf = fn[pos:]
try:
fn = fn[:pos]
except Exception as e:
pass
filename = fn
dammit = UnicodeDammit(filename, ['utf-8', 'gb2312', 'gbk'])
filename = dammit.unicode_markup
# 这里要判断一下有没有重名的文件,并做处理
i = 0
while True:
if i == 0:
if not os.path.exists( os.path.join(self.savepath, filename+suf) ):
break
else:
if not os.path.exists( os.path.join(self.savepath, filename+("(%d)"%i)+suf ) ):
filename = filename + ("(%d)"%i)
break
i = i + 1
filename = filename + suf
# 确保文件名合法(windows)
filename = tools.replaceBanCharacter(filename)
return filename
def download(self, url, savepath=''):
self.url = url
self.download_file = True
if savepath != '':
self.savepath = savepath
return self.__download()
def get(self):
return self.r
def soup(self):
return self.soup
def getsoup(self):
return (self.r, self.soup)
def clear_headers(self):
self.headers = {}
def clear_proxies(self):
self.proxies = {}
def stop(self):
self.log.stop()
def __del__(self):
self.stop()
|
BD777/WindPythonToy
|
comm/network.py
|
Python
|
apache-2.0
| 7,827
|
# -*- coding: utf-8 -*-
'''
Created on 17/2/16.
@author: love
'''
import paho.mqtt.client as mqtt
import json
import ssl
def on_connect(client, userdata, flags, rc):
print("Connected with result code %d"%rc)
client.publish("Login/HD_Login/1", json.dumps({"userName": user, "passWord": "Hello,anyone!"}),qos=0,retain=False)
def on_message(client, userdata, msg):
print ('---------------')
print ("topic :"+msg.topic)
print ("payload :"+msg.payload)
client.subscribe([("chat",2),("aaa",2)])
client.unsubscribe(["chat"])
#client.publish("login/addUser", json.dumps({"user": user, "say": "Hello,anyone!"}),qos=2,retain=False)
#print(msg.topic+":"+str(msg.payload.decode()))
#print(msg.topic+":"+msg.payload.decode())
#payload = json.loads(msg.payload.decode())
#print(payload.get("user")+":"+payload.get("say"))
def mylog(self,userdata,level, buf):
print buf
if __name__ == '__main__':
client = mqtt.Client(protocol=mqtt.MQTTv31)
client.username_pw_set("admin", "password") # 必须设置,否则会返回「Connected with result code 4」
client.on_connect = on_connect
client.on_message = on_message
#链接测试服务器 需要用tls请求 python tls功能比较弱。
# 需要一个证书,这里使用的这个网站提供的证书https://curl.haxx.se/docs/caextract.html
HOST = "mqant.com"
# client.tls_set(ca_certs="caextract.pem", certfile=None, keyfile=None, cert_reqs=ssl.CERT_REQUIRED,
# tls_version=ssl.PROTOCOL_TLSv1, ciphers=None)
client.connect(HOST, 3563, 60)
#client.loop_forever()
user = raw_input("请输入用户名:")
client.user_data_set(user)
client.loop_start()
while True:
s = raw_input("请先输入'join'加入房间,然后输入任意聊天字符:\n")
if s:
if s=="join":
client.publish("Chat/HD_JoinChat/2", json.dumps({"roomName": "mqant"}),qos=0,retain=False)
elif s=="start":
client.publish("Master/HD_Start_Process/2", json.dumps({"ProcessID": "001"}),qos=0,retain=False)
elif s=="stop":
client.publish("Master/HD_Stop_Process/2", json.dumps({"ProcessID": "001"}),qos=0,retain=False)
else:
client.publish("Chat/HD_Say/2", json.dumps({"roomName": "mqant","from":user,"target":"*","content": s}),qos=0,retain=False)
|
liangdas/mqantserver
|
client/mqtt_chat_client.py
|
Python
|
apache-2.0
| 2,416
|
def joke():
return 'Knock Knock. Who is there?'
|
cloudfoundry/python-buildpack
|
fixtures/setup_py/funniest/__init__.py
|
Python
|
apache-2.0
| 52
|
# Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
minemeld.collectd
Provides a client to collectd for storing metrics.
"""
import socket
import logging
LOG = logging.getLogger(__name__)
class CollectdClient(object):
"""Collectd client.
Args:
path (str): path to the collectd unix socket
"""
def __init__(self, path):
self.path = path
self.socket = None
def _open_socket(self):
if self.socket is not None:
return
_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
_socket.connect(self.path)
self.socket = _socket
def _readline(self):
result = ''
data = None
while data != '\n':
data = self.socket.recv(1)
if data == '\n' or data is None:
return result
result += data
def _send_cmd(self, command):
self._open_socket()
self.socket.send(command+'\n')
ans = self._readline()
status, message = ans.split(None, 1)
status = int(status)
if status < 0:
raise RuntimeError('Error communicating with collectd %s' %
message)
message = [message]
for _ in range(status):
message.append(self._readline())
return status, '\n'.join(message)
def flush(self, identifier=None, timeout=None):
cmd = 'FLUSH'
if timeout is not None:
cmd += ' timeout=%d' % timeout
if identifier is not None:
cmd += ' identifier=%s' % identifier
self._send_cmd(
cmd
)
def putval(self, identifier, value, timestamp='N',
type_='minemeld_counter', hostname='minemeld', interval=None):
if isinstance(timestamp, int):
timestamp = '%d' % timestamp
identifier = '/'.join([hostname, identifier, type_])
command = 'PUTVAL %s' % identifier
if interval is not None:
command += ' interval=%d' % interval
command += ' %s:%d' % (timestamp, value)
self._send_cmd(command)
|
PaloAltoNetworks/minemeld-core
|
minemeld/collectd.py
|
Python
|
apache-2.0
| 2,672
|
from osrf_pycommon.process_utils import asyncio
from osrf_pycommon.process_utils.async_execute_process import async_execute_process
from osrf_pycommon.process_utils import get_loop
from .impl_aep_protocol import create_protocol
loop = get_loop()
@asyncio.coroutine
def run(cmd, **kwargs):
transport, protocol = yield from async_execute_process(
create_protocol(), cmd, **kwargs)
retcode = yield from protocol.complete
return protocol.stdout_buffer, protocol.stderr_buffer, retcode
|
ros2/ci
|
ros2_batch_job/vendor/osrf_pycommon/tests/unit/test_process_utils/impl_aep_asyncio.py
|
Python
|
apache-2.0
| 505
|
# subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several older modules and functions:
os.system
os.spawn*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False, pass_fds=()):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On POSIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On POSIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize will be supplied as the corresponding argument to the io.open()
function when creating the stdin/stdout/stderr pipe file objects:
0 means unbuffered (read & write are one system call and can return short),
1 means line buffered, any other positive value means use a buffer of
approximately that size. A negative bufsize, the default, means the system
default of io.DEFAULT_BUFFER_SIZE will be used.
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
On POSIX, if preexec_fn is set to a callable object, this object will be
called in the child process just before the child is executed. The use
of preexec_fn is not thread safe, using it in the presence of threads
could lead to a deadlock in the child process before the new executable
is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed. The default for close_fds
varies by platform: Always true on POSIX. True when stdin/stdout/stderr
are None on Windows, false otherwise.
pass_fds is an optional sequence of file descriptors to keep open between the
parent and child. Providing any pass_fds implicitly sets close_fds to true.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
On POSIX, if restore_signals is True all signals that Python sets to
SIG_IGN are restored to SIG_DFL in the child process before the exec.
Currently this includes the SIGPIPE, SIGXFZ and SIGXFSZ signals. This
parameter does nothing on Windows.
On POSIX, if start_new_session is True, the setsid() system call will be made
in the child process prior to executing the command.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is false, the file objects stdin, stdout and stderr
are opened as binary files, and no line ending conversion is done.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the old Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Also, the newlines attribute
of the file objects stdout, stdin and stderr are not updated by the
communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> retcode = subprocess.call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
>>> subprocess.check_call(["ls", "-l"])
0
getstatusoutput(cmd):
Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). Universal newlines mode is used,
meaning that the result with be decoded to a string.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
getoutput(cmd):
Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
check_output(*popenargs, **kwargs):
Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> output = subprocess.check_output(["ls", "-l", "/dev/null"])
There is an additional optional argument, "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument.
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the child's point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
Exceptions defined within this module inherit from SubprocessError.
check_call() and check_output() will raise CalledProcessError if the
called process returns a non-zero return code. TimeoutExpired
be raised if a timeout was specified and expired.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (POSIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print("Child was terminated by signal", -retcode, file=sys.stderr)
else:
print("Child returned", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
"""
import os
import sys
mswindows = (sys.platform == "win32") or (sys.platform == "cli" and os.name == "nt")
import io
import time
import signal
import builtins
import warnings
import errno
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
# Exception classes used by this module.
class SubprocessError(Exception): pass
class CalledProcessError(SubprocessError):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
class TimeoutExpired(SubprocessError):
"""This exception is raised when the timeout expires while waiting for a
child process.
"""
def __init__(self, cmd, timeout, output=None):
self.cmd = cmd
self.timeout = timeout
self.output = output
def __str__(self):
return ("Command '%s' timed out after %s seconds" %
(self.cmd, self.timeout))
if mswindows:
import threading
import msvcrt
import _winapi
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
else:
import _posixsubprocess
import select
import selectors
try:
import threading
except ImportError:
import dummy_threading as threading
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
# poll/select have the advantage of not requiring any extra file
# descriptor, contrarily to epoll/kqueue (also, they require a single
# syscall).
if hasattr(selectors, 'PollSelector'):
_PopenSelector = selectors.PollSelector
else:
_PopenSelector = selectors.SelectSelector
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "getstatusoutput",
"getoutput", "check_output", "CalledProcessError", "DEVNULL"]
if mswindows:
from _winapi import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
class Handle(int):
closed = False
def Close(self, CloseHandle=_winapi.CloseHandle):
if not self.closed:
self.closed = True
CloseHandle(self)
def Detach(self):
if not self.closed:
self.closed = True
return int(self)
raise ValueError("already closed")
def __repr__(self):
return "Handle(%d)" % int(self)
__del__ = Close
__str__ = __repr__
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# This lists holds Popen instances for which the underlying process had not
# exited at the time its __del__ method got called: those processes are wait()ed
# for synchronously from _cleanup() when a new Popen object is created, to avoid
# zombie processes.
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxsize)
if res is not None:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
PIPE = -1
STDOUT = -2
DEVNULL = -3
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except InterruptedError:
continue
# XXX This function is only used by multiprocessing and the test suite,
# but it's here so that it can be imported when Python is compiled without
# threads.
def _args_from_interpreter_flags():
"""Return a list of command-line arguments reproducing the current
settings in sys.flags and sys.warnoptions."""
flag_opt_map = {
'debug': 'd',
# 'inspect': 'i',
# 'interactive': 'i',
'optimize': 'O',
'dont_write_bytecode': 'B',
'no_user_site': 's',
'no_site': 'S',
'ignore_environment': 'E',
'verbose': 'v',
'bytes_warning': 'b',
'quiet': 'q',
}
args = []
for flag, opt in flag_opt_map.items():
v = getattr(sys.flags, flag)
if v > 0:
args.append('-' + opt * v)
for opt in sys.warnoptions:
args.append('-W' + opt)
return args
def call(*popenargs, timeout=None, **kwargs):
"""Run command with arguments. Wait for command to complete or
timeout, then return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
with Popen(*popenargs, **kwargs) as p:
try:
return p.wait(timeout=timeout)
except:
p.kill()
p.wait()
raise
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the call function. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, timeout=None, **kwargs):
r"""Run command with arguments and return its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
b'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
b'ls: non_existent_file: No such file or directory\n'
There is an additional optional argument, "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it too will be used internally. Example:
>>> check_output(["sed", "-e", "s/foo/bar/"],
... input=b"when in the course of fooman events\n")
b'when in the course of barman events\n'
If universal_newlines=True is passed, the return value will be a
string rather than bytes.
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if 'input' in kwargs:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
inputdata = kwargs['input']
del kwargs['input']
kwargs['stdin'] = PIPE
else:
inputdata = None
with Popen(*popenargs, stdout=PIPE, **kwargs) as process:
try:
output, unused_err = process.communicate(inputdata, timeout=timeout)
except TimeoutExpired:
process.kill()
output, unused_err = process.communicate()
raise TimeoutExpired(process.args, timeout, output=output)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if retcode:
raise CalledProcessError(retcode, process.args, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
# Various tools for executing commands and looking at their output and status.
#
def getstatusoutput(cmd):
""" Return (status, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). Universal newlines mode is used,
meaning that the result with be decoded to a string.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(256, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(256, 'sh: /bin/junk: not found')
"""
try:
data = check_output(cmd, shell=True, universal_newlines=True, stderr=STDOUT)
status = 0
except CalledProcessError as ex:
data = ex.output
status = ex.returncode
if data[-1:] == '\n':
data = data[:-1]
return status, data
def getoutput(cmd):
"""Return output (stdout or stderr) of executing cmd in a shell.
Like getstatusoutput(), except the exit status is ignored and the return
value is a string containing the command's output. Example:
>>> import subprocess
>>> subprocess.getoutput('ls /bin/ls')
'/bin/ls'
"""
return getstatusoutput(cmd)[1]
_PLATFORM_DEFAULT_CLOSE_FDS = object()
class Popen(object):
_child_created = False # Set here since __del__ checks it
def __init__(self, args, bufsize=-1, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=_PLATFORM_DEFAULT_CLOSE_FDS,
shell=False, cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0,
restore_signals=True, start_new_session=False,
pass_fds=()):
"""Create new Popen instance."""
_cleanup()
# Held while anything is calling waitpid before returncode has been
# updated to prevent clobbering returncode if wait() or poll() are
# called from multiple threads at once. After acquiring the lock,
# code must re-check self.returncode to see if another thread just
# finished a waitpid() call.
self._waitpid_lock = threading.Lock()
self._input = None
self._communication_started = False
if bufsize is None:
bufsize = -1 # Restore default
if not isinstance(bufsize, int):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
any_stdio_set = (stdin is not None or stdout is not None or
stderr is not None)
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
if any_stdio_set:
close_fds = False
else:
close_fds = True
elif close_fds and any_stdio_set:
raise ValueError(
"close_fds is not supported on Windows platforms"
" if you redirect stdin/stdout/stderr")
else:
# POSIX
if close_fds is _PLATFORM_DEFAULT_CLOSE_FDS:
close_fds = True
if pass_fds and not close_fds:
warnings.warn("pass_fds overriding close_fds.", RuntimeWarning)
close_fds = True
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.args = args
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are -1 when not using PIPEs. The child objects are -1
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
# We wrap OS handles *before* launching the child, otherwise a
# quickly terminating child could make our fds unwrappable
# (see #8458).
if mswindows:
if p2cwrite != -1:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread != -1:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread != -1:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite != -1:
self.stdin = io.open(p2cwrite, 'wb', bufsize)
if universal_newlines:
self.stdin = io.TextIOWrapper(self.stdin, write_through=True,
line_buffering=(bufsize == 1))
if c2pread != -1:
self.stdout = io.open(c2pread, 'rb', bufsize)
if universal_newlines:
self.stdout = io.TextIOWrapper(self.stdout)
if errread != -1:
self.stderr = io.open(errread, 'rb', bufsize)
if universal_newlines:
self.stderr = io.TextIOWrapper(self.stderr)
self._closed_child_pipe_fds = False
try:
self._execute_child(args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session)
except:
# Cleanup if the child failed starting.
for f in filter(None, (self.stdin, self.stdout, self.stderr)):
try:
f.close()
except OSError:
pass # Ignore EBADF or other errors.
if not self._closed_child_pipe_fds:
to_close = []
if stdin == PIPE:
to_close.append(p2cread)
if stdout == PIPE:
to_close.append(c2pwrite)
if stderr == PIPE:
to_close.append(errwrite)
if hasattr(self, '_devnull'):
to_close.append(self._devnull)
for fd in to_close:
try:
os.close(fd)
except OSError:
pass
raise
def _translate_newlines(self, data, encoding):
data = data.decode(encoding)
return data.replace("\r\n", "\n").replace("\r", "\n")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
try: # Flushing a BufferedWriter may raise an error
if self.stdin:
self.stdin.close()
finally:
# Wait for the process to terminate, to avoid zombies.
self.wait()
def __del__(self, _maxsize=sys.maxsize):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxsize)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def _get_devnull(self):
if not hasattr(self, '_devnull'):
self._devnull = os.open(os.devnull, os.O_RDWR)
return self._devnull
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be
bytes to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
if self._communication_started and input:
raise ValueError("Cannot send input after starting communication")
# Optimization: If we are not worried about timeouts, we haven't
# started communicating, and we have one or zero pipes, using select()
# or threads is unnecessary.
if (timeout is None and not self._communication_started and
[self.stdin, self.stdout, self.stderr].count(None) >= 2):
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except OSError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
else:
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
try:
stdout, stderr = self._communicate(input, endtime, timeout)
finally:
self._communication_started = True
sts = self.wait(timeout=self._remaining_time(endtime))
return (stdout, stderr)
def poll(self):
return self._internal_poll()
def _remaining_time(self, endtime):
"""Convenience for _communicate when computing timeouts."""
if endtime is None:
return None
else:
return endtime - _time()
def _check_timeout(self, endtime, orig_timeout):
"""Convenience for checking if a timeout has expired."""
if endtime is None:
return
if _time() > endtime:
raise TimeoutExpired(self.args, orig_timeout)
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (-1, -1, -1, -1, -1, -1)
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
p2cread = _winapi.GetStdHandle(_winapi.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _winapi.CreatePipe(None, 0)
p2cread = Handle(p2cread)
_winapi.CloseHandle(_)
elif stdin == PIPE:
p2cread, p2cwrite = _winapi.CreatePipe(None, 0)
p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite)
elif stdin == DEVNULL:
p2cread = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _winapi.GetStdHandle(_winapi.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _winapi.CreatePipe(None, 0)
c2pwrite = Handle(c2pwrite)
_winapi.CloseHandle(_)
elif stdout == PIPE:
c2pread, c2pwrite = _winapi.CreatePipe(None, 0)
c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite)
elif stdout == DEVNULL:
c2pwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _winapi.GetStdHandle(_winapi.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _winapi.CreatePipe(None, 0)
errwrite = Handle(errwrite)
_winapi.CloseHandle(_)
elif stderr == PIPE:
errread, errwrite = _winapi.CreatePipe(None, 0)
errread, errwrite = Handle(errread), Handle(errwrite)
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = msvcrt.get_osfhandle(self._get_devnull())
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
h = _winapi.DuplicateHandle(
_winapi.GetCurrentProcess(), handle,
_winapi.GetCurrentProcess(), 0, 1,
_winapi.DUPLICATE_SAME_ACCESS)
return Handle(h)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
unused_restore_signals, unused_start_new_session):
"""Execute program (MS Windows version)"""
assert not pass_fds, "pass_fds not supported on Windows."
if not isinstance(args, str):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if -1 not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _winapi.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _winapi.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
# Start the process
try:
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread != -1:
p2cread.Close()
if c2pwrite != -1:
c2pwrite.Close()
if errwrite != -1:
errwrite.Close()
if hasattr(self, '_devnull'):
os.close(self._devnull)
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = Handle(hp)
self.pid = pid
_winapi.CloseHandle(ht)
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_winapi.WaitForSingleObject,
_WAIT_OBJECT_0=_winapi.WAIT_OBJECT_0,
_GetExitCodeProcess=_winapi.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if endtime is not None:
timeout = self._remaining_time(endtime)
if timeout is None:
timeout_millis = _winapi.INFINITE
else:
timeout_millis = int(timeout * 1000)
if self.returncode is None:
result = _winapi.WaitForSingleObject(self._handle,
timeout_millis)
if result == _winapi.WAIT_TIMEOUT:
raise TimeoutExpired(self.args, timeout)
self.returncode = _winapi.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
fh.close()
def _communicate(self, input, endtime, orig_timeout):
# Start reader threads feeding into a list hanging off of this
# object, unless they've already been started.
if self.stdout and not hasattr(self, "_stdout_buff"):
self._stdout_buff = []
self.stdout_thread = \
threading.Thread(target=self._readerthread,
args=(self.stdout, self._stdout_buff))
self.stdout_thread.daemon = True
self.stdout_thread.start()
if self.stderr and not hasattr(self, "_stderr_buff"):
self._stderr_buff = []
self.stderr_thread = \
threading.Thread(target=self._readerthread,
args=(self.stderr, self._stderr_buff))
self.stderr_thread.daemon = True
self.stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except OSError as e:
if e.errno == errno.EPIPE:
# communicate() should ignore pipe full error
pass
elif (e.errno == errno.EINVAL
and self.poll() is not None):
# Issue #19612: stdin.write() fails with EINVAL
# if the process already exited before the write
pass
else:
raise
self.stdin.close()
# Wait for the reader threads, or time out. If we time out, the
# threads remain reading and the fds left open in case the user
# calls communicate again.
if self.stdout is not None:
self.stdout_thread.join(self._remaining_time(endtime))
if self.stdout_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
if self.stderr is not None:
self.stderr_thread.join(self._remaining_time(endtime))
if self.stderr_thread.is_alive():
raise TimeoutExpired(self.args, orig_timeout)
# Collect the output from and close both pipes, now that we know
# both have been read successfully.
stdout = None
stderr = None
if self.stdout:
stdout = self._stdout_buff
self.stdout.close()
if self.stderr:
stderr = self._stderr_buff
self.stderr.close()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process."""
# Don't signal a process that we know has already died.
if self.returncode is not None:
return
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process."""
# Don't terminate a process that we know has already died.
if self.returncode is not None:
return
try:
_winapi.TerminateProcess(self._handle, 1)
except PermissionError:
# ERROR_ACCESS_DENIED (winerror 5) is received when the
# process already died.
rc = _winapi.GetExitCodeProcess(self._handle)
if rc == _winapi.STILL_ACTIVE:
raise
self.returncode = rc
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = -1, -1
c2pread, c2pwrite = -1, -1
errread, errwrite = -1, -1
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif stdin == DEVNULL:
p2cread = self._get_devnull()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif stdout == DEVNULL:
c2pwrite = self._get_devnull()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif stderr == DEVNULL:
errwrite = self._get_devnull()
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _close_fds(self, fds_to_keep):
start_fd = 3
for fd in sorted(fds_to_keep):
if fd >= start_fd:
os.closerange(start_fd, fd)
start_fd = fd + 1
if start_fd <= MAXFD:
os.closerange(start_fd, MAXFD)
def _execute_child(self, args, executable, preexec_fn, close_fds,
pass_fds, cwd, env,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite,
restore_signals, start_new_session):
"""Execute program (POSIX version)"""
if isinstance(args, (str, bytes)):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
orig_executable = executable
# For transferring possible exec failure from child to parent.
# Data format: "exception name:hex errno:description"
# Pickle is not used; it is complex and involves memory allocation.
errpipe_read, errpipe_write = os.pipe()
# errpipe_write must not be in the standard io 0, 1, or 2 fd range.
low_fds_to_close = []
while errpipe_write < 3:
low_fds_to_close.append(errpipe_write)
errpipe_write = os.dup(errpipe_write)
for low_fd in low_fds_to_close:
os.close(low_fd)
try:
try:
# We must avoid complex work that could involve
# malloc or free in the child process to avoid
# potential deadlocks, thus we do all this here.
# and pass it to fork_exec()
if env is not None:
env_list = [os.fsencode(k) + b'=' + os.fsencode(v)
for k, v in env.items()]
else:
env_list = None # Use execv instead of execve.
executable = os.fsencode(executable)
if os.path.dirname(executable):
executable_list = (executable,)
else:
# This matches the behavior of os._execvpe().
executable_list = tuple(
os.path.join(os.fsencode(dir), executable)
for dir in os.get_exec_path(env))
fds_to_keep = set(pass_fds)
fds_to_keep.add(errpipe_write)
self.pid = _posixsubprocess.fork_exec(
args, executable_list,
close_fds, sorted(fds_to_keep), cwd, env_list,
p2cread, p2cwrite, c2pread, c2pwrite,
errread, errwrite,
errpipe_read, errpipe_write,
restore_signals, start_new_session, preexec_fn)
self._child_created = True
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
# self._devnull is not always defined.
devnull_fd = getattr(self, '_devnull', None)
if p2cread != -1 and p2cwrite != -1 and p2cread != devnull_fd:
os.close(p2cread)
if c2pwrite != -1 and c2pread != -1 and c2pwrite != devnull_fd:
os.close(c2pwrite)
if errwrite != -1 and errread != -1 and errwrite != devnull_fd:
os.close(errwrite)
if devnull_fd is not None:
os.close(devnull_fd)
# Prevent a double close of these fds from __init__ on error.
self._closed_child_pipe_fds = True
# Wait for exec to fail or succeed; possibly raising an
# exception (limited in size)
errpipe_data = bytearray()
while True:
part = _eintr_retry_call(os.read, errpipe_read, 50000)
errpipe_data += part
if not part or len(errpipe_data) > 50000:
break
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if errpipe_data:
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
try:
exception_name, hex_errno, err_msg = (
errpipe_data.split(b':', 2))
except ValueError:
exception_name = b'SubprocessError'
hex_errno = b'0'
err_msg = (b'Bad exception data from child: ' +
repr(errpipe_data))
child_exception_type = getattr(
builtins, exception_name.decode('ascii'),
SubprocessError)
err_msg = err_msg.decode(errors="surrogatepass")
if issubclass(child_exception_type, OSError) and hex_errno:
errno_num = int(hex_errno, 16)
child_exec_never_called = (err_msg == "noexec")
if child_exec_never_called:
err_msg = ""
if errno_num != 0:
err_msg = os.strerror(errno_num)
if errno_num == errno.ENOENT:
if child_exec_never_called:
# The error must be from chdir(cwd).
err_msg += ': ' + repr(cwd)
else:
err_msg += ': ' + repr(orig_executable)
raise child_exception_type(errno_num, err_msg)
raise child_exception_type(err_msg)
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
"""All callers to this function MUST hold self._waitpid_lock."""
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope.
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise SubprocessError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _ECHILD=errno.ECHILD):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
if not self._waitpid_lock.acquire(False):
# Something else is busy calling waitpid. Don't allow two
# at once. We know nothing yet.
return None
try:
if self.returncode is not None:
return self.returncode # Another thread waited.
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except OSError as e:
if _deadstate is not None:
self.returncode = _deadstate
elif e.errno == _ECHILD:
# This happens if SIGCLD is set to be ignored or
# waiting for child processes has otherwise been
# disabled for our process. This child is dead, we
# can't get the status.
# http://bugs.python.org/issue15756
self.returncode = 0
finally:
self._waitpid_lock.release()
return self.returncode
def _try_wait(self, wait_flags):
"""All callers to this function MUST hold self._waitpid_lock."""
try:
(pid, sts) = _eintr_retry_call(os.waitpid, self.pid, wait_flags)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
pid = self.pid
sts = 0
return (pid, sts)
def wait(self, timeout=None, endtime=None):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is not None:
return self.returncode
# endtime is preferred to timeout. timeout is only used for
# printing.
if endtime is not None or timeout is not None:
if endtime is None:
endtime = _time() + timeout
elif timeout is None:
timeout = self._remaining_time(endtime)
if endtime is not None:
# Enter a busy loop if we have a timeout. This busy loop was
# cribbed from Lib/threading.py in Thread.wait() at r71065.
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
if self._waitpid_lock.acquire(False):
try:
if self.returncode is not None:
break # Another thread waited.
(pid, sts) = self._try_wait(os.WNOHANG)
assert pid == self.pid or pid == 0
if pid == self.pid:
self._handle_exitstatus(sts)
break
finally:
self._waitpid_lock.release()
remaining = self._remaining_time(endtime)
if remaining <= 0:
raise TimeoutExpired(self.args, timeout)
delay = min(delay * 2, remaining, .05)
time.sleep(delay)
else:
while self.returncode is None:
with self._waitpid_lock:
if self.returncode is not None:
break # Another thread waited.
(pid, sts) = self._try_wait(0)
# Check the pid and loop as waitpid has been known to
# return 0 even without WNOHANG in odd situations.
# http://bugs.python.org/issue14396.
if pid == self.pid:
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input, endtime, orig_timeout):
if self.stdin and not self._communication_started:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
stdout = None
stderr = None
# Only create this mapping if we haven't already.
if not self._communication_started:
self._fileobj2output = {}
if self.stdout:
self._fileobj2output[self.stdout] = []
if self.stderr:
self._fileobj2output[self.stderr] = []
if self.stdout:
stdout = self._fileobj2output[self.stdout]
if self.stderr:
stderr = self._fileobj2output[self.stderr]
self._save_input(input)
if self._input:
input_view = memoryview(self._input)
with _PopenSelector() as selector:
if self.stdin and input:
selector.register(self.stdin, selectors.EVENT_WRITE)
if self.stdout:
selector.register(self.stdout, selectors.EVENT_READ)
if self.stderr:
selector.register(self.stderr, selectors.EVENT_READ)
while selector.get_map():
timeout = self._remaining_time(endtime)
if timeout is not None and timeout < 0:
raise TimeoutExpired(self.args, orig_timeout)
ready = selector.select(timeout)
self._check_timeout(endtime, orig_timeout)
# XXX Rewrite these to use non-blocking I/O on the file
# objects; they are no longer using C stdio!
for key, events in ready:
if key.fileobj is self.stdin:
chunk = input_view[self._input_offset :
self._input_offset + _PIPE_BUF]
try:
self._input_offset += os.write(key.fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
selector.unregister(key.fileobj)
key.fileobj.close()
else:
raise
else:
if self._input_offset >= len(self._input):
selector.unregister(key.fileobj)
key.fileobj.close()
elif key.fileobj in (self.stdout, self.stderr):
data = os.read(key.fd, 32768)
if not data:
selector.unregister(key.fileobj)
key.fileobj.close()
self._fileobj2output[key.fileobj].append(data)
self.wait(timeout=self._remaining_time(endtime))
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = b''.join(stdout)
if stderr is not None:
stderr = b''.join(stderr)
# Translate newlines, if requested.
# This also turns bytes into strings.
if self.universal_newlines:
if stdout is not None:
stdout = self._translate_newlines(stdout,
self.stdout.encoding)
if stderr is not None:
stderr = self._translate_newlines(stderr,
self.stderr.encoding)
return (stdout, stderr)
def _save_input(self, input):
# This method is called from the _communicate_with_*() methods
# so that if we time out while communicating, we can continue
# sending input if we retry.
if self.stdin and self._input is None:
self._input_offset = 0
self._input = input
if self.universal_newlines and input is not None:
self._input = self._input.encode(self.stdin.encoding)
def send_signal(self, sig):
"""Send a signal to the process."""
# Skip signalling a process that we know has already died.
if self.returncode is None:
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
|
moto-timo/ironpython3
|
Src/StdLib/Lib/subprocess.py
|
Python
|
apache-2.0
| 64,399
|
import json
import httplib
import requests
import six
import pyaml
from six.moves.urllib.parse import urljoin
from st2actions.runners.pythonrunner import Action
__all__ = [
'PostResultAction'
]
def _serialize(data):
return pyaml.dump(data)
def format_possible_failure_result(result):
'''
Error result as generator by the runner container is of the form
{'message': x, 'traceback': traceback}
Try and pull out these value upfront. Some other runners that could publish
these properties would get them for free.
'''
output = {}
message = result.get('message', None)
if message:
output['message'] = message
traceback = result.get('traceback', None)
if traceback:
output['traceback'] = traceback
return output
def format_default_result(result):
try:
output = json.loads(result)
return _serialize(output)
except ValueError:
return result
def format_localrunner_result(result, do_serialize=True):
output = format_possible_failure_result(result)
# Add in various properties if they have values
stdout = result.get('stdout', None)
if stdout:
try:
output['stdout'] = stdout.strip()
except AttributeError:
output['stdout'] = stdout
stderr = result.get('stderr', None)
if stderr:
output['stderr'] = stderr.strip()
return_code = result.get('return_code', 0)
if return_code != 0:
output['return_code'] = return_code
error = result.get('error', None)
if error:
output['error'] = error
return _serialize(output) if do_serialize else output
def format_remoterunner_result(result):
output = format_possible_failure_result(result)
output.update({k: format_localrunner_result(v, do_serialize=False)
for k, v in six.iteritems(result)})
return _serialize(output)
def format_actionchain_result(result):
output = format_possible_failure_result(result)
return '' if not output else _serialize(output)
def format_mistral_result(result):
return format_default_result(result)
def format_pythonrunner_result(result):
output = format_possible_failure_result(result)
# Add in various properties if they have values
result_ = result.get('result', None)
if result_ is not None:
output['result'] = result_
stdout = result.get('stdout', None)
if stdout:
try:
output['stdout'] = stdout.strip()
except AttributeError:
output['stdout'] = stdout
stderr = result.get('stderr', None)
if stderr:
output['stderr'] = stderr.strip()
exit_code = result.get('exit_code', 0)
if exit_code != 0:
output['exit_code'] = exit_code
return _serialize(output)
def format_httprunner_result(result):
return format_default_result(result)
def format_windowsrunner_result(result):
# same format as pythonrunner
return format_pythonrunner_result(result)
FORMATTERS = {
# localrunner
'local-shell-cmd': format_localrunner_result,
'run-local': format_localrunner_result,
'local-shell-script': format_localrunner_result,
'run-local-script': format_localrunner_result,
# remoterunner
'remote-shell-cmd': format_remoterunner_result,
'run-remote': format_remoterunner_result,
'remote-shell-script': format_remoterunner_result,
'run-remote-script': format_remoterunner_result,
# httprunner
'http-request': format_httprunner_result,
'http-runner': format_httprunner_result,
# mistralrunner
'mistral-v1': format_mistral_result,
'mistral-v2': format_mistral_result,
# actionchainrunner
'action-chain': format_actionchain_result,
# pythonrunner
'run-python': format_pythonrunner_result,
'python-script': format_pythonrunner_result,
# windowsrunner
'windows-cmd': format_windowsrunner_result,
'windows-script': format_windowsrunner_result
}
class PostResultAction(Action):
def run(self, result, channel, user=None, whisper=False):
endpoint = self.config['endpoint']
if not endpoint:
raise ValueError('Missing "endpoint" config option')
url = urljoin(endpoint, "/hubot/st2")
headers = {}
headers['Content-Type'] = 'application/json'
body = {
'channel': channel,
'message': self._get_message(result)
}
if user:
body['user'] = user
if whisper is True:
body['whisper'] = whisper
data = json.dumps(body)
self.logger.info(data)
response = requests.post(url=url, headers=headers, data=data)
if response.status_code == httplib.OK:
self.logger.info('Message successfully posted')
else:
self.logger.exception('Failed to post message: %s' % (response.text))
return True
def _get_message(self, data):
envelope = '{message}\nstatus : {status}\nexecution: {execution_id}'.format(**data)
result = self._get_result(data)
if result:
message = '%s\n\nresult :\n--------\n%s' % (envelope, self._get_result(data))
else:
message = envelope
return message
def _get_result(self, data):
result = data.get('data', {'result': {}}).get('result', '{}')
try:
result = json.loads(result)
except ValueError:
# if json.loads fails then very return result as-is. Should not happen.
return result
return FORMATTERS.get(data['runner_ref'], format_default_result)(result)
|
meirwah/st2contrib
|
packs/hubot/actions/post_result.py
|
Python
|
apache-2.0
| 5,619
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
interactiveshell = pytest.importorskip("IPython.terminal.interactiveshell")
tools = pytest.importorskip("IPython.testing.tools")
@pytest.fixture(scope="session")
def ipython():
config = tools.default_config()
config.TerminalInteractiveShell.simple_prompt = True
shell = interactiveshell.TerminalInteractiveShell.instance(config=config)
return shell
@pytest.fixture(autouse=True)
def ipython_interactive(ipython):
"""Activate IPython's builtin hooks
for the duration of the test scope.
"""
with ipython.builtin_trap:
yield ipython
|
googleapis/python-bigquery
|
samples/magics/conftest.py
|
Python
|
apache-2.0
| 1,166
|
# pylint: disable=redefined-outer-name, comparison-with-callable
"""Test helper functions."""
import gzip
import importlib
import logging
import os
import sys
from typing import Any, Dict, List, Optional, Tuple, Union
import cloudpickle
import numpy as np
import pytest
from _pytest.outcomes import Skipped
from packaging.version import Version
from ..data import InferenceData, from_dict
_log = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def eight_schools_params():
"""Share setup for eight schools."""
return {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
@pytest.fixture(scope="module")
def draws():
"""Share default draw count."""
return 500
@pytest.fixture(scope="module")
def chains():
"""Share default chain count."""
return 2
def create_model(seed=10):
"""Create model with fake data."""
np.random.seed(seed)
nchains = 4
ndraws = 500
data = {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
posterior = {
"mu": np.random.randn(nchains, ndraws),
"tau": abs(np.random.randn(nchains, ndraws)),
"eta": np.random.randn(nchains, ndraws, data["J"]),
"theta": np.random.randn(nchains, ndraws, data["J"]),
}
posterior_predictive = {"y": np.random.randn(nchains, ndraws, len(data["y"]))}
sample_stats = {
"energy": np.random.randn(nchains, ndraws),
"diverging": np.random.randn(nchains, ndraws) > 0.90,
"max_depth": np.random.randn(nchains, ndraws) > 0.90,
}
log_likelihood = {
"y": np.random.randn(nchains, ndraws, data["J"]),
}
prior = {
"mu": np.random.randn(nchains, ndraws) / 2,
"tau": abs(np.random.randn(nchains, ndraws)) / 2,
"eta": np.random.randn(nchains, ndraws, data["J"]) / 2,
"theta": np.random.randn(nchains, ndraws, data["J"]) / 2,
}
prior_predictive = {"y": np.random.randn(nchains, ndraws, len(data["y"])) / 2}
sample_stats_prior = {
"energy": np.random.randn(nchains, ndraws),
"diverging": (np.random.randn(nchains, ndraws) > 0.95).astype(int),
}
model = from_dict(
posterior=posterior,
posterior_predictive=posterior_predictive,
sample_stats=sample_stats,
log_likelihood=log_likelihood,
prior=prior,
prior_predictive=prior_predictive,
sample_stats_prior=sample_stats_prior,
observed_data={"y": data["y"]},
dims={
"y": ["obs_dim"],
"log_likelihood": ["obs_dim"],
"theta": ["school"],
"eta": ["school"],
},
coords={"obs_dim": range(data["J"])},
)
return model
def create_multidimensional_model(seed=10):
"""Create model with fake data."""
np.random.seed(seed)
nchains = 4
ndraws = 500
ndim1 = 5
ndim2 = 7
data = {
"y": np.random.normal(size=(ndim1, ndim2)),
"sigma": np.random.normal(size=(ndim1, ndim2)),
}
posterior = {
"mu": np.random.randn(nchains, ndraws),
"tau": abs(np.random.randn(nchains, ndraws)),
"eta": np.random.randn(nchains, ndraws, ndim1, ndim2),
"theta": np.random.randn(nchains, ndraws, ndim1, ndim2),
}
posterior_predictive = {"y": np.random.randn(nchains, ndraws, ndim1, ndim2)}
sample_stats = {
"energy": np.random.randn(nchains, ndraws),
"diverging": np.random.randn(nchains, ndraws) > 0.90,
}
log_likelihood = {
"y": np.random.randn(nchains, ndraws, ndim1, ndim2),
}
prior = {
"mu": np.random.randn(nchains, ndraws) / 2,
"tau": abs(np.random.randn(nchains, ndraws)) / 2,
"eta": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2,
"theta": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2,
}
prior_predictive = {"y": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2}
sample_stats_prior = {
"energy": np.random.randn(nchains, ndraws),
"diverging": (np.random.randn(nchains, ndraws) > 0.95).astype(int),
}
model = from_dict(
posterior=posterior,
posterior_predictive=posterior_predictive,
sample_stats=sample_stats,
log_likelihood=log_likelihood,
prior=prior,
prior_predictive=prior_predictive,
sample_stats_prior=sample_stats_prior,
observed_data={"y": data["y"]},
dims={"y": ["dim1", "dim2"], "log_likelihood": ["dim1", "dim2"]},
coords={"dim1": range(ndim1), "dim2": range(ndim2)},
)
return model
def create_data_random(groups=None, seed=10):
"""Create InferenceData object using random data."""
if groups is None:
groups = ["posterior", "sample_stats", "observed_data", "posterior_predictive"]
rng = np.random.default_rng(seed)
data = rng.normal(size=(4, 500, 8))
idata_dict = dict(
posterior={"a": data[..., 0], "b": data},
sample_stats={"a": data[..., 0], "b": data},
observed_data={"b": data[0, 0, :]},
posterior_predictive={"a": data[..., 0], "b": data},
prior={"a": data[..., 0], "b": data},
prior_predictive={"a": data[..., 0], "b": data},
warmup_posterior={"a": data[..., 0], "b": data},
warmup_posterior_predictive={"a": data[..., 0], "b": data},
warmup_prior={"a": data[..., 0], "b": data},
)
idata = from_dict(
**{group: ary for group, ary in idata_dict.items() if group in groups}, save_warmup=True
)
return idata
@pytest.fixture()
def data_random():
"""Fixture containing InferenceData object using random data."""
idata = create_data_random()
return idata
@pytest.fixture(scope="module")
def models():
"""Fixture containing 2 mock inference data instances for testing."""
# blank line to keep black and pydocstyle happy
class Models:
model_1 = create_model(seed=10)
model_2 = create_model(seed=11)
return Models()
@pytest.fixture(scope="module")
def multidim_models():
"""Fixture containing 2 mock inference data instances with multidimensional data for testing."""
# blank line to keep black and pydocstyle happy
class Models:
model_1 = create_multidimensional_model(seed=10)
model_2 = create_multidimensional_model(seed=11)
return Models()
def check_multiple_attrs(
test_dict: Dict[str, List[str]], parent: InferenceData
) -> List[Union[str, Tuple[str, str]]]:
"""Perform multiple hasattr checks on InferenceData objects.
It is thought to first check if the parent object contains a given dataset,
and then (if present) check the attributes of the dataset.
Given the output of the function, all mismatches between expectation and reality can
be retrieved: a single string indicates a group mismatch and a tuple of strings
``(group, var)`` indicates a mismatch in the variable ``var`` of ``group``.
Parameters
----------
test_dict: dict of {str : list of str}
Its structure should be `{dataset1_name: [var1, var2], dataset2_name: [var]}`.
A ``~`` at the beginning of a dataset or variable name indicates the name NOT
being present must be asserted.
parent: InferenceData
InferenceData object on which to check the attributes.
Returns
-------
list
List containing the failed checks. It will contain either the dataset_name or a
tuple (dataset_name, var) for all non present attributes.
Examples
--------
The output below indicates that ``posterior`` group was expected but not found, and
variables ``a`` and ``b``:
["posterior", ("prior", "a"), ("prior", "b")]
Another example could be the following:
[("posterior", "a"), "~observed_data", ("sample_stats", "~log_likelihood")]
In this case, the output indicates that variable ``a`` was not found in ``posterior``
as it was expected, however, in the other two cases, the preceding ``~`` (kept from the
input negation notation) indicates that ``observed_data`` group should not be present
but was found in the InferenceData and that ``log_likelihood`` variable was found
in ``sample_stats``, also against what was expected.
"""
failed_attrs: List[Union[str, Tuple[str, str]]] = []
for dataset_name, attributes in test_dict.items():
if dataset_name.startswith("~"):
if hasattr(parent, dataset_name[1:]):
failed_attrs.append(dataset_name)
elif hasattr(parent, dataset_name):
dataset = getattr(parent, dataset_name)
for attribute in attributes:
if attribute.startswith("~"):
if hasattr(dataset, attribute[1:]):
failed_attrs.append((dataset_name, attribute))
elif not hasattr(dataset, attribute):
failed_attrs.append((dataset_name, attribute))
else:
failed_attrs.append(dataset_name)
return failed_attrs
def emcee_version():
"""Check emcee version.
Returns
-------
int
Major version number
"""
import emcee
return int(emcee.__version__[0])
def needs_emcee3_func():
"""Check if emcee3 is required."""
# pylint: disable=invalid-name
needs_emcee3 = pytest.mark.skipif(emcee_version() < 3, reason="emcee3 required")
return needs_emcee3
def _emcee_lnprior(theta):
"""Proper function to allow pickling."""
mu, tau, eta = theta[0], theta[1], theta[2:]
# Half-cauchy prior, hwhm=25
if tau < 0:
return -np.inf
prior_tau = -np.log(tau ** 2 + 25 ** 2)
prior_mu = -((mu / 10) ** 2) # normal prior, loc=0, scale=10
prior_eta = -np.sum(eta ** 2) # normal prior, loc=0, scale=1
return prior_mu + prior_tau + prior_eta
def _emcee_lnprob(theta, y, sigma):
"""Proper function to allow pickling."""
mu, tau, eta = theta[0], theta[1], theta[2:]
prior = _emcee_lnprior(theta)
like_vect = -(((mu + tau * eta - y) / sigma) ** 2)
like = np.sum(like_vect)
return like + prior, (like_vect, np.random.normal((mu + tau * eta), sigma))
def emcee_schools_model(data, draws, chains):
"""Schools model in emcee."""
import emcee
chains = 10 * chains # emcee is sad with too few walkers
y = data["y"]
sigma = data["sigma"]
J = data["J"] # pylint: disable=invalid-name
ndim = J + 2
pos = np.random.normal(size=(chains, ndim))
pos[:, 1] = np.absolute(pos[:, 1]) # pylint: disable=unsupported-assignment-operation
if emcee_version() < 3:
sampler = emcee.EnsembleSampler(chains, ndim, _emcee_lnprob, args=(y, sigma))
# pylint: enable=unexpected-keyword-arg
sampler.run_mcmc(pos, draws)
else:
here = os.path.dirname(os.path.abspath(__file__))
data_directory = os.path.join(here, "saved_models")
filepath = os.path.join(data_directory, "reader_testfile.h5")
backend = emcee.backends.HDFBackend(filepath) # pylint: disable=no-member
backend.reset(chains, ndim)
# pylint: disable=unexpected-keyword-arg
sampler = emcee.EnsembleSampler(
chains, ndim, _emcee_lnprob, args=(y, sigma), backend=backend
)
# pylint: enable=unexpected-keyword-arg
sampler.run_mcmc(pos, draws, store=True)
return sampler
# pylint:disable=no-member,no-value-for-parameter,invalid-name
def _pyro_noncentered_model(J, sigma, y=None):
import pyro
import pyro.distributions as dist
mu = pyro.sample("mu", dist.Normal(0, 5))
tau = pyro.sample("tau", dist.HalfCauchy(5))
with pyro.plate("J", J):
eta = pyro.sample("eta", dist.Normal(0, 1))
theta = mu + tau * eta
return pyro.sample("obs", dist.Normal(theta, sigma), obs=y)
def pyro_noncentered_schools(data, draws, chains):
"""Non-centered eight schools implementation in Pyro."""
import torch
from pyro.infer import MCMC, NUTS
y = torch.from_numpy(data["y"]).float()
sigma = torch.from_numpy(data["sigma"]).float()
nuts_kernel = NUTS(_pyro_noncentered_model, jit_compile=True, ignore_jit_warnings=True)
posterior = MCMC(nuts_kernel, num_samples=draws, warmup_steps=draws, num_chains=chains)
posterior.run(data["J"], sigma, y)
# This block lets the posterior be pickled
posterior.sampler = None
posterior.kernel.potential_fn = None
return posterior
# pylint:disable=no-member,no-value-for-parameter,invalid-name
def _numpyro_noncentered_model(J, sigma, y=None):
import numpyro
import numpyro.distributions as dist
mu = numpyro.sample("mu", dist.Normal(0, 5))
tau = numpyro.sample("tau", dist.HalfCauchy(5))
with numpyro.plate("J", J):
eta = numpyro.sample("eta", dist.Normal(0, 1))
theta = mu + tau * eta
return numpyro.sample("obs", dist.Normal(theta, sigma), obs=y)
def numpyro_schools_model(data, draws, chains):
"""Centered eight schools implementation in NumPyro."""
from jax.random import PRNGKey
from numpyro.infer import MCMC, NUTS
mcmc = MCMC(
NUTS(_numpyro_noncentered_model),
num_warmup=draws,
num_samples=draws,
num_chains=chains,
chain_method="sequential",
)
mcmc.run(PRNGKey(0), extra_fields=("num_steps", "energy"), **data)
# This block lets the posterior be pickled
mcmc.sampler._sample_fn = None # pylint: disable=protected-access
mcmc.sampler._init_fn = None # pylint: disable=protected-access
mcmc.sampler._postprocess_fn = None # pylint: disable=protected-access
mcmc.sampler._potential_fn = None # pylint: disable=protected-access
mcmc.sampler._potential_fn_gen = None # pylint: disable=protected-access
mcmc._cache = {} # pylint: disable=protected-access
return mcmc
def pystan_noncentered_schools(data, draws, chains):
"""Non-centered eight schools implementation for pystan."""
schools_code = """
data {
int<lower=0> J;
real y[J];
real<lower=0> sigma[J];
}
parameters {
real mu;
real<lower=0> tau;
real eta[J];
}
transformed parameters {
real theta[J];
for (j in 1:J)
theta[j] = mu + tau * eta[j];
}
model {
mu ~ normal(0, 5);
tau ~ cauchy(0, 5);
eta ~ normal(0, 1);
y ~ normal(theta, sigma);
}
generated quantities {
vector[J] log_lik;
vector[J] y_hat;
for (j in 1:J) {
log_lik[j] = normal_lpdf(y[j] | theta[j], sigma[j]);
y_hat[j] = normal_rng(theta[j], sigma[j]);
}
}
"""
if pystan_version() == 2:
import pystan # pylint: disable=import-error
stan_model = pystan.StanModel(model_code=schools_code)
fit = stan_model.sampling(
data=data,
iter=draws + 500,
warmup=500,
chains=chains,
check_hmc_diagnostics=False,
control=dict(adapt_engaged=False),
)
else:
import stan # pylint: disable=import-error
stan_model = stan.build(schools_code, data=data)
fit = stan_model.sample(
num_chains=chains, num_samples=draws, num_warmup=500, save_warmup=False
)
return stan_model, fit
def pymc3_noncentered_schools(data, draws, chains):
"""Non-centered eight schools implementation for pymc3."""
import pymc3 as pm
with pm.Model() as model:
mu = pm.Normal("mu", mu=0, sd=5)
tau = pm.HalfCauchy("tau", beta=5)
eta = pm.Normal("eta", mu=0, sd=1, shape=data["J"])
theta = pm.Deterministic("theta", mu + tau * eta)
pm.Normal("obs", mu=theta, sd=data["sigma"], observed=data["y"])
trace = pm.sample(draws, chains=chains)
return model, trace
def library_handle(library):
"""Import a library and return the handle."""
if library == "pystan":
try:
module = importlib.import_module("pystan")
except ImportError:
module = importlib.import_module("stan")
else:
module = importlib.import_module(library)
return module
def load_cached_models(eight_schools_data, draws, chains, libs=None):
"""Load pymc3, pystan, emcee, and pyro models from pickle."""
here = os.path.dirname(os.path.abspath(__file__))
supported = (
("pystan", pystan_noncentered_schools),
("pymc3", pymc3_noncentered_schools),
("emcee", emcee_schools_model),
("pyro", pyro_noncentered_schools),
("numpyro", numpyro_schools_model),
)
data_directory = os.path.join(here, "saved_models")
models = {}
if isinstance(libs, str):
libs = [libs]
for library_name, func in supported:
if libs is not None and library_name not in libs:
continue
library = library_handle(library_name)
if library.__name__ == "stan":
# PyStan3 does not support pickling
# httpstan caches models automatically
_log.info("Generating and loading stan model")
models["pystan"] = func(eight_schools_data, draws, chains)
continue
py_version = sys.version_info
fname = "{0.major}.{0.minor}_{1.__name__}_{1.__version__}_{2}_{3}_{4}.pkl.gzip".format(
py_version, library, sys.platform, draws, chains
)
path = os.path.join(data_directory, fname)
if not os.path.exists(path):
with gzip.open(path, "wb") as buff:
try:
_log.info("Generating and caching %s", fname)
cloudpickle.dump(func(eight_schools_data, draws, chains), buff)
except AttributeError as err:
raise AttributeError(f"Failed caching {library_name}") from err
with gzip.open(path, "rb") as buff:
_log.info("Loading %s from cache", fname)
models[library.__name__] = cloudpickle.load(buff)
return models
def pystan_version():
"""Check PyStan version.
Returns
-------
int
Major version number
"""
try:
import pystan # pylint: disable=import-error
version = int(pystan.__version__[0])
except ImportError:
try:
import stan # pylint: disable=import-error
version = int(stan.__version__[0])
except ImportError:
version = None
return version
def test_precompile_models(eight_schools_params, draws, chains):
"""Precompile model files."""
load_cached_models(eight_schools_params, draws, chains)
def running_on_ci() -> bool:
"""Return True if running on CI machine."""
return os.environ.get("ARVIZ_CI_MACHINE") is not None
def importorskip(
modname: str, minversion: Optional[str] = None, reason: Optional[str] = None
) -> Any:
"""Import and return the requested module ``modname``.
Doesn't allow skips on CI machine.
Borrowed and modified from ``pytest.importorskip``.
:param str modname: the name of the module to import
:param str minversion: if given, the imported module's ``__version__``
attribute must be at least this minimal version, otherwise the test is
still skipped.
:param str reason: if given, this reason is shown as the message when the
module cannot be imported.
:returns: The imported module. This should be assigned to its canonical
name.
Example::
docutils = pytest.importorskip("docutils")
"""
# ARVIZ_CI_MACHINE is True if tests run on CI, where ARVIZ_CI_MACHINE env variable exists
ARVIZ_CI_MACHINE = running_on_ci()
if ARVIZ_CI_MACHINE:
import warnings
compile(modname, "", "eval") # to catch syntaxerrors
with warnings.catch_warnings():
# make sure to ignore ImportWarnings that might happen because
# of existing directories with the same name we're trying to
# import but without a __init__.py file
warnings.simplefilter("ignore")
__import__(modname)
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, "__version__", None)
if minversion is not None:
if verattr is None or Version(verattr) < Version(minversion):
raise Skipped(
"module %r has __version__ %r, required is: %r"
% (modname, verattr, minversion),
allow_module_level=True,
)
return mod
else:
return pytest.importorskip(modname=modname, minversion=minversion, reason=reason)
|
arviz-devs/arviz
|
arviz/tests/helpers.py
|
Python
|
apache-2.0
| 21,624
|
##
# Copyright (c) 2008-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks, succeed
from twisted.internet import reactor
from twisted.python.modules import getModule
from twisted.trial import unittest
from twistedcaldav.config import ConfigDict
from twistedcaldav.ical import Component
from txdav.caldav.datastore.scheduling.imip.inbound import IMIPReplyWork
from txdav.caldav.datastore.scheduling.imip.inbound import MailReceiver
from txdav.caldav.datastore.scheduling.imip.inbound import MailRetriever
from txdav.caldav.datastore.scheduling.imip.inbound import injectMessage
from txdav.caldav.datastore.scheduling.imip.inbound import shouldDeleteAllMail
from txdav.caldav.datastore.scheduling.imip.inbound import IMAP4DownloadProtocol
from txdav.common.datastore.test.util import CommonCommonTests
from twext.enterprise.jobqueue import JobItem
import email
class InboundTests(CommonCommonTests, unittest.TestCase):
@inlineCallbacks
def setUp(self):
super(InboundTests, self).setUp()
yield self.buildStoreAndDirectory()
self.receiver = MailReceiver(self.store, self.directory)
self.retriever = MailRetriever(
self.store, self.directory,
ConfigDict({
"Type" : "pop",
"UseSSL" : False,
"Server" : "example.com",
"Port" : 123,
"Username" : "xyzzy",
})
)
def decorateTransaction(txn):
txn._mailRetriever = self.retriever
self.store.callWithNewTransactions(decorateTransaction)
module = getModule(__name__)
self.dataPath = module.filePath.sibling("data")
def dataFile(self, name):
"""
Get the contents of a given data file from the 'data/mail' test
fixtures directory.
"""
return self.dataPath.child(name).getContent()
def test_checkDSNFailure(self):
data = {
'good_reply' : (False, None, None),
'dsn_failure_no_original' : (True, 'failed', None),
'dsn_failure_no_ics' : (True, 'failed', None),
'dsn_failure_with_ics' : (True, 'failed', '''BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:REQUEST
PRODID:-//example Inc.//iCal 3.0//EN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:STANDARD
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
TZNAME:PST
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
END:STANDARD
BEGIN:DAYLIGHT
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
TZNAME:PDT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
END:DAYLIGHT
END:VTIMEZONE
BEGIN:VEVENT
UID:1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C
DTSTART;TZID=US/Pacific:20080812T094500
DTEND;TZID=US/Pacific:20080812T104500
ATTENDEE;CUTYPE=INDIVIDUAL;CN=User 01;PARTSTAT=ACCEPTED:mailto:user01@exam
ple.com
ATTENDEE;CUTYPE=INDIVIDUAL;RSVP=TRUE;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-A
CTION;CN=nonexistant@example.com:mailto:nonexistant@example.com
CREATED:20080812T191857Z
DTSTAMP:20080812T191932Z
ORGANIZER;CN=User 01:mailto:xyzzy+8e16b897-d544-4217-88e9-a363d08
46f6c@example.com
SEQUENCE:2
SUMMARY:New Event
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
'''),
}
for filename, expected in data.iteritems():
msg = email.message_from_string(self.dataFile(filename))
self.assertEquals(self.receiver.checkDSN(msg), expected)
@inlineCallbacks
def test_processDSN(self):
template = """BEGIN:VCALENDAR
VERSION:2.0
CALSCALE:GREGORIAN
METHOD:REQUEST
PRODID:-//example Inc.//iCal 3.0//EN
BEGIN:VTIMEZONE
TZID:US/Pacific
BEGIN:DAYLIGHT
DTSTART:20070311T020000
RRULE:FREQ=YEARLY;BYMONTH=3;BYDAY=2SU
TZNAME:PDT
TZOFFSETFROM:-0800
TZOFFSETTO:-0700
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:20071104T020000
RRULE:FREQ=YEARLY;BYMONTH=11;BYDAY=1SU
TZNAME:PST
TZOFFSETFROM:-0700
TZOFFSETTO:-0800
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
UID:1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C
DTSTART;TZID=US/Pacific:20080812T094500
DTEND;TZID=US/Pacific:20080812T104500
ATTENDEE;CUTYPE=INDIVIDUAL;CN=User 01;PARTSTAT=ACCEPTED:mailto:user01@exam
ple.com
ATTENDEE;CUTYPE=INDIVIDUAL;RSVP=TRUE;ROLE=REQ-PARTICIPANT;PARTSTAT=NEEDS-A
CTION;CN=nonexistant@example.com:mailto:nonexistant@example.com
CREATED:20080812T191857Z
DTSTAMP:20080812T191932Z
ORGANIZER;CN=User 01:mailto:xyzzy+%s@example.com
SEQUENCE:2
SUMMARY:New Event
TRANSP:OPAQUE
END:VEVENT
END:VCALENDAR
"""
# Make sure an unknown token is not processed
calBody = template % "bogus_token"
self.assertEquals(
(yield self.receiver.processDSN(calBody, "xyzzy")),
MailReceiver.UNKNOWN_TOKEN
)
# Make sure a known token *is* processed
txn = self.store.newTransaction()
record = (yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:user02@example.com",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C"
))
yield txn.commit()
calBody = template % record.token
result = (yield self.receiver.processDSN(calBody, "xyzzy"))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReply(self):
# Make sure an unknown token in an older email is deleted
msg = email.message_from_string(self.dataFile('good_reply_past'))
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.UNKNOWN_TOKEN_OLD)
# Make sure an unknown token is not processed
msg = email.message_from_string(self.dataFile('good_reply_future'))
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.UNKNOWN_TOKEN)
# Make sure a known token *is* processed
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:xyzzy@example.com",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReplyMissingOrganizer(self):
msg = email.message_from_string(self.dataFile('reply_missing_organizer'))
# stick the token in the database first
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:xyzzy@example.com",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReplyMissingAttendee(self):
msg = email.message_from_string(self.dataFile('reply_missing_attendee'))
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:xyzzy@example.com",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.INJECTION_SUBMITTED)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_processReplyMissingAttachment(self):
msg = email.message_from_string(
self.dataFile('reply_missing_attachment')
)
# stick the token in the database first
txn = self.store.newTransaction()
yield txn.imipCreateToken(
"urn:x-uid:5A985493-EE2C-4665-94CF-4DFEA3A89500",
"mailto:xyzzy@example.com",
"1E71F9C8-AEDA-48EB-98D0-76E898F6BB5C",
token="d7cdf68d-8b73-4df1-ad3b-f08002fb285f"
)
yield txn.commit()
result = (yield self.receiver.processReply(msg))
self.assertEquals(result, MailReceiver.REPLY_FORWARDED_TO_ORGANIZER)
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
@inlineCallbacks
def test_injectMessage(self):
calendar = Component.fromString("""BEGIN:VCALENDAR
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
VERSION:2.0
METHOD:REPLY
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20130208T120000Z
DTSTART:20180601T120000Z
DTEND:20180601T130000Z
ORGANIZER:urn:x-uid:user01
ATTENDEE:mailto:xyzzy@example.com;PARTSTAT=ACCEPTED
END:VEVENT
END:VCALENDAR
""")
txn = self.store.newTransaction()
result = (yield injectMessage(
txn,
"urn:x-uid:user01",
"mailto:xyzzy@example.com",
calendar
))
yield txn.commit()
self.assertEquals(
"1.2;Scheduling message has been delivered",
result.responses[0].reqstatus.toString()
)
@inlineCallbacks
def test_injectMessageWithError(self):
calendar = Component.fromString("""BEGIN:VCALENDAR
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
VERSION:2.0
METHOD:REPLY
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20130208T120000Z
DTSTART:20180601T120000Z
DTEND:20180601T130000Z
ORGANIZER:urn:x-uid:unknown_user
ATTENDEE:mailto:xyzzy@example.com;PARTSTAT=ACCEPTED
END:VEVENT
END:VCALENDAR
""")
txn = self.store.newTransaction()
result = (yield injectMessage(
txn,
"urn:x-uid:unknown_user",
"mailto:xyzzy@example.com",
calendar
))
yield txn.commit()
self.assertEquals(
"3.7;Invalid Calendar User",
result.responses[0].reqstatus.toString()
)
@inlineCallbacks
def test_work(self):
calendar = """BEGIN:VCALENDAR
PRODID:-//CALENDARSERVER.ORG//NONSGML Version 1//EN
VERSION:2.0
METHOD:REPLY
BEGIN:VEVENT
UID:12345-67890
DTSTAMP:20130208T120000Z
DTSTART:20180601T120000Z
DTEND:20180601T130000Z
ORGANIZER:urn:x-uid:user01
ATTENDEE:mailto:xyzzy@example.com;PARTSTAT=ACCEPTED
END:VEVENT
END:VCALENDAR
"""
txn = self.store.newTransaction()
yield txn.enqueue(
IMIPReplyWork,
organizer="urn:x-uid:user01",
attendee="mailto:xyzzy@example.com",
icalendarText=calendar
)
yield txn.commit()
yield JobItem.waitEmpty(self.store.newTransaction, reactor, 60)
def test_shouldDeleteAllMail(self):
# Delete if the mail server is on the same host and using our
# dedicated account:
self.assertTrue(shouldDeleteAllMail(
"calendar.example.com",
"calendar.example.com",
"com.apple.calendarserver"
))
self.assertTrue(shouldDeleteAllMail(
"calendar.example.com",
"localhost",
"com.apple.calendarserver"
))
# Don't delete all otherwise:
self.assertFalse(shouldDeleteAllMail(
"calendar.example.com",
"calendar.example.com",
"not_ours"
))
self.assertFalse(shouldDeleteAllMail(
"calendar.example.com",
"localhost",
"not_ours"
))
self.assertFalse(shouldDeleteAllMail(
"calendar.example.com",
"mail.example.com",
"com.apple.calendarserver"
))
@inlineCallbacks
def test_deletion(self):
"""
Verify the IMAP protocol will delete messages only when the right
conditions are met. Either:
A) We've been told to delete all mail
B) We've not been told to delete all mail, but it was a message
we processed
"""
def stubFetchNextMessage():
pass
def stubCbFlagDeleted(result):
self.flagDeletedResult = result
return succeed(None)
proto = IMAP4DownloadProtocol()
self.patch(proto, "fetchNextMessage", stubFetchNextMessage)
self.patch(proto, "cbFlagDeleted", stubCbFlagDeleted)
results = {
"ignored" : (
{
"RFC822" : "a message"
}
)
}
# Delete all mail = False; action taken = submitted; result = deletion
proto.factory = StubFactory(MailReceiver.INJECTION_SUBMITTED, False)
self.flagDeletedResult = None
yield proto.cbGotMessage(results, "xyzzy")
self.assertEquals(self.flagDeletedResult, "xyzzy")
# Delete all mail = False; action taken = not submitted; result = no deletion
proto.factory = StubFactory(MailReceiver.NO_TOKEN, False)
self.flagDeletedResult = None
yield proto.cbGotMessage(results, "xyzzy")
self.assertEquals(self.flagDeletedResult, None)
# Delete all mail = True; action taken = submitted; result = deletion
proto.factory = StubFactory(MailReceiver.INJECTION_SUBMITTED, True)
self.flagDeletedResult = None
yield proto.cbGotMessage(results, "xyzzy")
self.assertEquals(self.flagDeletedResult, "xyzzy")
# Delete all mail = True; action taken = not submitted; result = deletion
proto.factory = StubFactory(MailReceiver.NO_TOKEN, True)
self.flagDeletedResult = None
yield proto.cbGotMessage(results, "xyzzy")
self.assertEquals(self.flagDeletedResult, "xyzzy")
@inlineCallbacks
def test_missingIMAPMessages(self):
"""
Make sure L{IMAP4DownloadProtocol.cbGotMessage} can deal with missing messages.
"""
class DummyResult(object):
def __init__(self):
self._values = []
def values(self):
return self._values
noResult = DummyResult()
missingKey = DummyResult()
missingKey.values().append({})
imap4 = IMAP4DownloadProtocol()
imap4.messageUIDs = []
imap4.fetchNextMessage = lambda : None
result = yield imap4.cbGotMessage(noResult, [])
self.assertTrue(result is None)
result = yield imap4.cbGotMessage(missingKey, [])
self.assertTrue(result is None)
class StubFactory(object):
def __init__(self, actionTaken, deleteAllMail):
self.actionTaken = actionTaken
self.deleteAllMail = deleteAllMail
def handleMessage(self, messageData):
return succeed(self.actionTaken)
|
red-hood/calendarserver
|
txdav/caldav/datastore/scheduling/imip/test/test_inbound.py
|
Python
|
apache-2.0
| 15,369
|
import io
from molotov.api import get_fixture
_UNREADABLE = "***WARNING: Molotov can't display this body***"
_BINARY = "**** Binary content ****"
_FILE = "**** File content ****"
_COMPRESSED = ('gzip', 'compress', 'deflate', 'identity', 'br')
class BaseListener(object):
async def __call__(self, event, **options):
attr = getattr(self, 'on_' + event, None)
if attr is not None:
await attr(**options)
class StdoutListener(BaseListener):
def __init__(self, **options):
self.verbose = options.get('verbose', 0)
self.console = options['console']
def _body2str(self, body):
try:
from aiohttp.payload import Payload
except ImportError:
Payload = None
if Payload is not None and isinstance(body, Payload):
body = body._value
if isinstance(body, io.IOBase):
return _FILE
if not isinstance(body, str):
try:
body = str(body, 'utf8')
except UnicodeDecodeError:
return _UNREADABLE
return body
async def on_sending_request(self, session, request):
if self.verbose < 2:
return
raw = '>' * 45
raw += '\n' + request.method + ' ' + str(request.url)
if len(request.headers) > 0:
headers = '\n'.join('%s: %s' % (k, v) for k, v in
request.headers.items())
raw += '\n' + headers
if request.headers.get('Content-Encoding') in _COMPRESSED:
raw += '\n\n' + _BINARY + '\n'
elif request.body:
raw += '\n\n' + self._body2str(request.body) + '\n'
self.console.print(raw)
async def on_response_received(self, session, response, request):
if self.verbose < 2:
return
raw = '\n' + '=' * 45 + '\n'
raw += 'HTTP/1.1 %d %s\n' % (response.status, response.reason)
items = response.headers.items()
headers = '\n'.join('{}: {}'.format(k, v) for k, v in items)
raw += headers
if response.headers.get('Content-Encoding') in _COMPRESSED:
raw += '\n\n' + _BINARY
elif response.content:
content = await response.content.read()
if len(content) > 0:
# put back the data in the content
response.content.unread_data(content)
try:
raw += '\n\n' + content.decode()
except UnicodeDecodeError:
raw += '\n\n' + _UNREADABLE
else:
raw += '\n\n'
raw += '\n' + '<' * 45 + '\n'
self.console.print(raw)
class CustomListener(object):
def __init__(self, fixture):
self.fixture = fixture
async def __call__(self, event, **options):
await self.fixture(event, **options)
class EventSender(object):
def __init__(self, console, listeners=None):
self.console = console
if listeners is None:
listeners = []
self._listeners = listeners
self._stopped = False
fixture_listeners = get_fixture('events')
if fixture_listeners is not None:
for listener in fixture_listeners:
self.add_listener(CustomListener(listener))
def add_listener(self, listener):
self._listeners.append(listener)
async def stop(self):
self._stopped = True
def stopped(self):
return self._stopped
async def send_event(self, event, **options):
for listener in self._listeners:
try:
await listener(event, **options)
except Exception as e:
self.console.print_error(e)
|
loads/ailoads
|
molotov/listeners.py
|
Python
|
apache-2.0
| 3,733
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for running and managing PostgreSQL with Flocker.
"""
from unittest import skipUnless
from uuid import uuid4
from pyrsistent import pmap, freeze, thaw
from twisted.python.filepath import FilePath
from twisted.trial.unittest import TestCase
from flocker.control import (
Application, DockerImage, AttachedVolume, Port, Dataset, Manifestation,
)
from flocker.testtools import loop_until
from .testtools import (assert_expected_deployment, flocker_deploy, get_nodes,
require_flocker_cli, require_moving_backend)
from ..testtools import REALISTIC_BLOCKDEVICE_SIZE
try:
from pg8000 import connect, InterfaceError, ProgrammingError
PG8000_INSTALLED = True
except ImportError:
PG8000_INSTALLED = False
POSTGRES_INTERNAL_PORT = 5432
POSTGRES_EXTERNAL_PORT = 5432
POSTGRES_APPLICATION_NAME = u"postgres-volume-example"
POSTGRES_IMAGE = u"postgres"
POSTGRES_VOLUME_MOUNTPOINT = u'/var/lib/postgresql/data'
POSTGRES_APPLICATION = Application(
name=POSTGRES_APPLICATION_NAME,
image=DockerImage.from_string(POSTGRES_IMAGE + u':latest'),
ports=frozenset([
Port(internal_port=POSTGRES_INTERNAL_PORT,
external_port=POSTGRES_EXTERNAL_PORT),
]),
volume=AttachedVolume(
manifestation=Manifestation(
dataset=Dataset(
dataset_id=unicode(uuid4()),
metadata=pmap({"name": POSTGRES_APPLICATION_NAME}),
maximum_size=REALISTIC_BLOCKDEVICE_SIZE),
primary=True),
mountpoint=FilePath(POSTGRES_VOLUME_MOUNTPOINT),
),
)
class PostgresTests(TestCase):
"""
Tests for running and managing PostgreSQL with Flocker.
"""
@require_flocker_cli
def setUp(self):
"""
Deploy PostgreSQL to a node.
"""
getting_nodes = get_nodes(self, num_nodes=2)
def deploy_postgres(node_ips):
self.node_1, self.node_2 = node_ips
postgres_deployment = {
u"version": 1,
u"nodes": {
self.node_1: [POSTGRES_APPLICATION_NAME],
self.node_2: [],
},
}
self.postgres_deployment_moved = {
u"version": 1,
u"nodes": {
self.node_1: [],
self.node_2: [POSTGRES_APPLICATION_NAME],
},
}
self.postgres_application = {
u"version": 1,
u"applications": {
POSTGRES_APPLICATION_NAME: {
u"image": POSTGRES_IMAGE,
u"ports": [{
u"internal": POSTGRES_INTERNAL_PORT,
u"external": POSTGRES_EXTERNAL_PORT,
}],
u"volume": {
u"dataset_id":
POSTGRES_APPLICATION.volume.dataset.dataset_id,
# The location within the container where the data
# volume will be mounted; see:
# https://github.com/docker-library/postgres/blob/
# docker/Dockerfile.template
u"mountpoint": POSTGRES_VOLUME_MOUNTPOINT,
u"maximum_size":
"%d" % (REALISTIC_BLOCKDEVICE_SIZE,),
},
},
},
}
self.postgres_application_different_port = thaw(freeze(
self.postgres_application).transform(
[u"applications", POSTGRES_APPLICATION_NAME, u"ports", 0,
u"external"], POSTGRES_EXTERNAL_PORT + 1))
flocker_deploy(self, postgres_deployment,
self.postgres_application)
getting_nodes.addCallback(deploy_postgres)
return getting_nodes
def test_deploy(self):
"""
Verify that Docker reports that PostgreSQL is running on one node and
not another.
"""
return assert_expected_deployment(self, {
self.node_1: set([POSTGRES_APPLICATION]),
self.node_2: set([]),
})
@require_moving_backend
def test_moving_postgres(self):
"""
It is possible to move PostgreSQL to a new node.
"""
flocker_deploy(self, self.postgres_deployment_moved,
self.postgres_application)
return assert_expected_deployment(self, {
self.node_1: set([]),
self.node_2: set([POSTGRES_APPLICATION]),
})
def _get_postgres_connection(self, host, user, port, database=None):
"""
Returns a ``Deferred`` which fires with a pg800 connection when one
has been created.
See http://pythonhosted.org//pg8000/dbapi.html#pg8000.connect for
parameter information.
"""
def connect_to_postgres():
try:
return connect(host=host, user=user, port=port,
database=database)
except (InterfaceError, ProgrammingError):
return False
d = loop_until(connect_to_postgres)
return d
@skipUnless(PG8000_INSTALLED, "pg8000 not installed")
@require_moving_backend
def test_moving_postgres_data(self):
"""
PostgreSQL and its data can be deployed and moved with Flocker. In
particular, if PostgreSQL is deployed to a node, and data added to it,
and then the application is moved to another node, the data remains
available.
"""
database = b'flockertest'
user = b'postgres'
connecting_to_application = self._get_postgres_connection(
host=self.node_1,
user=user,
port=POSTGRES_EXTERNAL_PORT,
)
def create_database(connection_to_application):
connection_to_application.autocommit = True
application_cursor = connection_to_application.cursor()
application_cursor.execute("CREATE DATABASE flockertest;")
application_cursor.close()
connection_to_application.close()
connecting_to_application.addCallback(create_database)
def connect_to_database(ignored):
return self._get_postgres_connection(
host=self.node_1,
user=user,
port=POSTGRES_EXTERNAL_PORT,
database=database,
)
connecting_to_database = connecting_to_application.addCallback(
connect_to_database)
def add_data_node_1(db_connection_node_1):
db_node_1_cursor = db_connection_node_1.cursor()
db_node_1_cursor.execute(
"CREATE TABLE testtable (testcolumn int);")
db_node_1_cursor.execute(
"INSERT INTO testtable (testcolumn) VALUES (3);")
db_node_1_cursor.execute("SELECT * FROM testtable;")
db_connection_node_1.commit()
fetched_data = db_node_1_cursor.fetchone()[0]
db_node_1_cursor.close()
db_connection_node_1.close()
self.assertEqual(fetched_data, 3)
connecting_to_database.addCallback(add_data_node_1)
def get_postgres_node_2(ignored):
"""
Move PostgreSQL to ``node_2`` and return a ``Deferred`` which fires
with a connection to the previously created database on ``node_2``.
"""
flocker_deploy(self, self.postgres_deployment_moved,
self.postgres_application_different_port)
return self._get_postgres_connection(
host=self.node_2,
user=user,
port=POSTGRES_EXTERNAL_PORT + 1,
database=database,
)
getting_postgres_2 = connecting_to_database.addCallback(
get_postgres_node_2)
def verify_data_moves(db_connection_node_2):
db_node_2_cursor = db_connection_node_2.cursor()
db_node_2_cursor.execute("SELECT * FROM testtable;")
fetched_data = db_node_2_cursor.fetchone()[0]
db_node_2_cursor.close()
db_connection_node_2.close()
self.assertEqual(fetched_data, 3)
verifying_data_moves = getting_postgres_2.addCallback(
verify_data_moves)
return verifying_data_moves
|
runcom/flocker
|
flocker/acceptance/test_postgres.py
|
Python
|
apache-2.0
| 8,587
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Provides generic deployment steps for machines post boot.
"""
import os
class Deployment(object):
"""
Base class for deployment tasks.
"""
def run(self, node, client):
"""
Runs this deployment task on C{node} using the C{client} provided.
@type node: L{Node}
@keyword node: Node to operate one
@type client: L{BaseSSHClient}
@keyword client: Connected SSH client to use.
@return: L{Node}
"""
raise NotImplementedError, \
'run not implemented for this deployment'
class SSHKeyDeployment(Deployment):
"""
Installs a public SSH Key onto a host.
"""
def __init__(self, key):
"""
@type key: C{str}
@keyword key: Contents of the public key write
"""
self.key = key
def run(self, node, client):
"""
Installs SSH key into C{.ssh/authorized_keys}
See also L{Deployment.run}
"""
client.put(".ssh/authorized_keys", contents=self.key)
return node
class ScriptDeployment(Deployment):
"""
Runs an arbitrary Shell Script task.
"""
def __init__(self, script, name=None, delete=False):
"""
@type script: C{str}
@keyword script: Contents of the script to run
@type name: C{str}
@keyword name: Name of the script to upload it as, if not specified, a random name will be choosen.
@type delete: C{bool}
@keyword delete: Whether to delete the script on completion.
"""
self.script = script
self.stdout = None
self.stderr = None
self.exit_status = None
self.delete = delete
self.name = name
if self.name is None:
self.name = "/root/deployment_%s.sh" % (os.urandom(4).encode('hex'))
def run(self, node, client):
"""
Uploads the shell script and then executes it.
See also L{Deployment.run}
"""
client.put(path=self.name, chmod=755, contents=self.script)
self.stdout, self.stderr, self.exit_status = client.run(self.name)
if self.delete:
client.delete(self.name)
return node
class MultiStepDeployment(Deployment):
"""
Runs a chain of Deployment steps.
"""
def __init__(self, add = None):
"""
@type add: C{list}
@keyword add: Deployment steps to add.
"""
self.steps = []
self.add(add)
def add(self, add):
"""Add a deployment to this chain.
@type add: Single L{Deployment} or a C{list} of L{Deployment}
@keyword add: Adds this deployment to the others already in this object.
"""
if add is not None:
add = add if isinstance(add, (list, tuple)) else [add]
self.steps.extend(add)
def run(self, node, client):
"""
Run each deployment that has been added.
See also L{Deployment.run}
"""
for s in self.steps:
node = s.run(node, client)
return node
|
cloudkick/libcloud
|
libcloud/compute/deployment.py
|
Python
|
apache-2.0
| 3,855
|
from __future__ import absolute_import
from hamcrest.library import *
from hamcrest.core import *
|
eve-basil/common
|
tests/__init__.py
|
Python
|
apache-2.0
| 98
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests /freebase/object_hints/best_hrid resolution.
/freebase/object_hints/best_hrid specifies a persistent HRID
for an entity. This should be favored over the earlier MQL
algorithm for choosing an HRID based on namespace traversal
and various heuristics.
"""
__author__ = 'nix@google.com (Nick Thompson)'
import json
import random
import string
import google3
from pymql.mql import error
from pymql.test import mql_fixture
class HRIDTest(mql_fixture.MQLTest):
"""Tests HRID queries using mqlread."""
def setUp(self):
# NOTE: the mock graphd support is broken, so there is no best_hrid.yaml
#self.SetMockPath('data/best_hrid.yaml')
super(HRIDTest, self).setUp()
self.env = {'user': '/user/mw_brendan'}
def newNodeWithHRID(self, best_hrid):
query = """
{
"create":"unless_exists",
"/freebase/object_hints/best_hrid": "%s",
"guid":null
}
""" % best_hrid
self.DoQuery(query, mqlwrite=True)
self.assertEquals(self.mql_result.result["create"],
"created")
return self.mql_result.result["guid"]
def query_assert(self, q, r, exc_response=None, type="mqlread", asof=None):
self.env = {}
if asof is not None:
self.env["as_of_time"] = asof
self.DoQuery(q, exp_response=r, exc_response=exc_response)
def test_missing_hrid(self):
"""Test that MQL still finds an id even if best_hrid is not present"""
q= '{"id":null, "guid":"#9202a8c04000641f8000000000092a01", "mid":null}'
r= ('{"guid": "#9202a8c04000641f8000000000092a01",'
'"id": "/en/sting","mid":"/m/0lbj1"}')
self.query_assert(q,r)
def test_good_hrid(self):
"""Test /type/type, a best_hrid that agrees with the MQL heuristics"""
# /m/0j == /type/type
q= '{"id":null, "mid":"/m/0j", "/freebase/object_hints/best_hrid":null}'
r= ('{"id": "/type/type","mid":"/m/0j",'
'"/freebase/object_hints/best_hrid":"/type/type"}')
self.query_assert(q, r)
def test_hrid_override(self):
"""Create a new node with a bogus best_hrid.
The old MQL heuristics will fail; check that best_hrid works.
"""
best_hrid = ('/user/nix/random_test_hrid/' +
''.join(random.choice(string.ascii_lowercase)
for x in range(16)))
guid = self.newNodeWithHRID(best_hrid)
q= (('{"id":null, "guid":"%(guid)s",'
'"/freebase/object_hints/best_hrid":null}' %
{"guid":guid}))
r= (('{"id": "%(best_hrid)s","guid":"%(guid)s",'
'"/freebase/object_hints/best_hrid":"%(best_hrid)s"}') %
{"guid":guid,"best_hrid":best_hrid})
self.query_assert(q, r)
if __name__ == '__main__':
mql_fixture.main()
|
google/pymql
|
test/best_hrid_test.py
|
Python
|
apache-2.0
| 3,260
|
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for multistep_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
from trfl import sequence_ops
def get_n_step_backup(rewards, pcontinues, state_values, start, n):
"""Evaluates a single n-step backup (return) starting at position start.
http://incompleteideas.net/sutton/book/ebook/node73.html (Eq. 7.1)
Args:
rewards: a list containing a sequence of rewards.
pcontinues: a list containing a sequence of discounts.
state_values: a list containing a sequence of state-values.
start: position at which the n-Step return has to be evaluated.
n: number of steps over which rewards are summed before adding the
respective bootstrapped state-value.
Returns:
Sum of discounted rewards plus discounted bootstrapped value.
"""
accumulator = 0.0
k = 1.0
for i in xrange(start, start + n):
accumulator += k * rewards[i]
k *= pcontinues[i]
accumulator += k * state_values[start + n - 1]
return accumulator
def get_complex_n_step_backup(rewards, pcontinues, state_values, start, n,
lambda_):
"""Evaluates a complex n=step backup (sum of lambda-weighted n-step backups).
http://incompleteideas.net/sutton/book/ebook/node74.html (Eq. 7.3)
Args:
rewards: a list containing rewards.
pcontinues: a list containing discounts.
state_values: a list containing boostrapped state values.
start: position at which the n-Step return has to be evaluated.
n: number of steps over which rewards are summed before adding respective
boostrapped state values.
lambda_: mixing parameter lambda.
Returns:
A single complex backup.
"""
accumulator = 0.0
for t in xrange(1, n):
value = get_n_step_backup(rewards, pcontinues, state_values, start, t)
weight = (1 - lambda_) * (lambda_ ** (t - 1))
accumulator += + value * weight
value = get_n_step_backup(rewards, pcontinues, state_values, start, n)
weight = lambda_ ** (n - 1)
accumulator += value * weight
return accumulator
def get_complex_n_step_backup_at_all_times(rewards, pcontinues, state_values,
lambda_):
"""Evaluates complex n-step backups at all time-points.
Args:
rewards: a list containing rewards.
pcontinues: a list containing discounts.
state_values: a list containing bootstrapped state values.
lambda_: mixing parameter lambda.
Returns:
A list containing complex backups at all times.
"""
res = []
length = len(rewards)
for i in xrange(0, length):
res.append(get_complex_n_step_backup(rewards, pcontinues, state_values, i,
length - i, lambda_))
return res
class ScanDiscountedSumTest(tf.test.TestCase):
def testScanSumShapeInference(self):
"""scan_discounted_sum should support static shape inference."""
# No session needed since we're not evaluating any ops.
sequence_in = tf.placeholder(tf.float32, shape=[1647, 2001])
decays_in = tf.placeholder(tf.float32, shape=[1647, 2001])
bootstrap = tf.placeholder(tf.float32, shape=[2001])
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=False)
self.assertAllEqual(result.get_shape(), [1647, 2001])
# Let's do it again with higher-dimensional inputs.
sequence_in = tf.placeholder(tf.float32, shape=[4, 8, 15, 16, 23, 42])
decays_in = tf.placeholder(tf.float32, shape=[4, 8, 15, 16, 23, 42])
bootstrap = tf.placeholder(tf.float32, shape=[8, 15, 16, 23, 42])
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=False)
self.assertAllEqual(result.get_shape(), [4, 8, 15, 16, 23, 42])
def testScanSumShapeInferenceWithSeqLen(self):
"""scan_discounted_sum should support static shape inference."""
# No session needed since we're not evaluating any ops.
sequence_in = tf.placeholder(tf.float32, shape=[1647, 2001])
decays_in = tf.placeholder(tf.float32, shape=[1647, 2001])
bootstrap = tf.placeholder(tf.float32, shape=[2001])
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=False)
self.assertAllEqual(result.get_shape(), [1647, 2001])
# Let's do it again with higher-dimensional inputs.
sequence_in = tf.placeholder(tf.float32, shape=[4, 8, 15, 16, 23, 42])
decays_in = tf.placeholder(tf.float32, shape=[4, 8, 15, 16, 23, 42])
bootstrap = tf.placeholder(tf.float32, shape=[8, 15, 16, 23, 42])
sequence_lengths = tf.placeholder(tf.float32, shape=[8])
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=False,
sequence_lengths=sequence_lengths)
self.assertAllEqual(result.get_shape(), [4, 8, 15, 16, 23, 42])
def testScanSumWithDecays(self):
with self.test_session() as sess:
sequence = [[3, 1, 5, 2, 1], [-1.7, 1.2, 2.3, 0, 1]]
decays = [[0.5, 0.9, 1.0, 0.1, 0.5], [0.9, 0.5, 0.0, 2, 0.8]]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32))
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32))
bootstrap = tf.constant([0, 1.5], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=False)
expected_result = tf.constant(
[[3,
3 * 0.9 + 1,
(3 * 0.9 + 1) * 1.0 + 5,
((3 * 0.9 + 1) * 1.0 + 5) * 0.1 + 2,
(((3 * 0.9 + 1) * 1.0 + 5) * 0.1 + 2) * 0.5 + 1],
[-1.7 + 1.5 * 0.9,
(-1.7 + 1.5 * 0.9) * 0.5 + 1.2,
((-1.7 + 1.5 * 0.9) * 0.5 + 1.2) * 0.0 + 2.3,
(((-1.7 + 1.5 * 0.9) * 0.5 + 1.2) * 0.0 + 2.3) * 2 + 0,
((((-1.7 + 1.5 * 0.9) * 0.5 + 1.2) * 0.0 + 2.3) * 2 + 0) * 0.8 + 1,
]], dtype=tf.float32)
self.assertAllClose(sess.run(result),
sess.run(tf.transpose(expected_result)))
def testScanSumWithDecaysWithSeqLen(self):
with self.test_session() as sess:
sequence = [[3, 1, 5, 2, 1], [-1.7, 1.2, 2.3, 0, 1]]
decays = [[0.5, 0.9, 1.0, 0.1, 0.5], [0.9, 0.5, 0.0, 2, 0.8]]
sequence_lengths = [0, 2]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32))
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32))
bootstrap = tf.constant([0, 1.5], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(
sequence_in, decays_in, bootstrap, reverse=False,
sequence_lengths=sequence_lengths)
expected_result = tf.constant(
[[0, 0, 0, 0, 0],
[-1.7 + 1.5 * 0.9, (-1.7 + 1.5 * 0.9) * 0.5 + 1.2, 0, 0, 0]],
dtype=tf.float32)
self.assertAllClose(sess.run(result),
sess.run(tf.transpose(expected_result)))
def testScanSumEquivalenceWithSeqLen(self):
with self.test_session() as sess:
sequence_lengths = [0, 2]
bootstrap = tf.constant([0.5, 1.5], dtype=tf.float32)
sequence = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
decays = [[.1, .2, .3, .4, .5], [.6, .7, .8, .9, .10]]
eq_sequence = [[0, 0, 0, 0, 0], [6, 7, 0, 0, 0]]
eq_decays = [[0, 0, 0, 0, 0], [.6, .7, 0, 0, 0]]
eq_reverse_sequence = [[0, 0, 0, 0, 0], [7, 6, 0, 0, 0]]
eq_reverse_decays = [[0, 0, 0, 0, 0], [.7, .6, 0, 0, 0]]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32))
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32))
eq_sequence_in = tf.transpose(tf.constant(eq_sequence, dtype=tf.float32))
eq_decays_in = tf.transpose(tf.constant(eq_decays, dtype=tf.float32))
eq_reverse_sequence_in = tf.transpose(
tf.constant(eq_reverse_sequence, dtype=tf.float32))
eq_reverse_decays_in = tf.transpose(
tf.constant(eq_reverse_decays, dtype=tf.float32))
eq_result = sequence_ops.scan_discounted_sum(
sequence_in, decays_in, bootstrap, reverse=False,
sequence_lengths=sequence_lengths)
exp_eq_result = sequence_ops.scan_discounted_sum(
eq_sequence_in, eq_decays_in, bootstrap)
eq_reverse_result = sequence_ops.scan_discounted_sum(
sequence_in, decays_in, bootstrap, reverse=True,
sequence_lengths=sequence_lengths)
exp_eq_reverse_result = sequence_ops.scan_discounted_sum(
eq_reverse_sequence_in, eq_reverse_decays_in, bootstrap)
exp_eq_reverse_result = tf.reverse_sequence(
exp_eq_reverse_result, sequence_lengths, seq_axis=0, batch_axis=1)
self.assertAllClose(sess.run(eq_result),
sess.run(exp_eq_result))
self.assertAllClose(sess.run(eq_reverse_result),
sess.run(exp_eq_reverse_result))
def testScanSumWithDecaysReverse(self):
with self.test_session() as sess:
sequence = [[3, 1, 5], [-1.7, 1.2, 2.3]]
decays = [[0.5, 0.9, 1.0], [0.9, 0.5, 0.3]]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32))
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32))
bootstrap = tf.constant([0, 1.5], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=True)
expected_result = tf.constant(
[[(5 * 0.9 + 1) * 0.5 + 3,
5 * 0.9 + 1,
5],
[((2.3 + 0.3 * 1.5) * 0.5 + 1.2) * 0.9 - 1.7,
(2.3 + 0.3 * 1.5) * 0.5 + 1.2,
2.3 + 0.3 * 1.5,
]], dtype=tf.float32)
self.assertAllClose(sess.run(result),
sess.run(tf.transpose(expected_result)))
def testScanSumWithDecaysReverseWithSeqLen(self):
with self.test_session() as sess:
sequence = [[3, 1, 5], [-1.7, 1.2, 2.3]]
decays = [[0.5, 0.9, 1.0], [0.9, 0.5, 0.3]]
sequence_lengths = [2, 0]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32))
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32))
bootstrap = tf.constant([2.5, 1.5], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(
sequence_in, decays_in, bootstrap, reverse=True,
sequence_lengths=sequence_lengths)
expected_result = tf.constant(
[[(0.9 * 2.5 + 1) * 0.5 + 3, (0.9 * 2.5 + 1), 0], [0, 0, 0]],
dtype=tf.float32)
self.assertAllClose(sess.run(result),
sess.run(tf.transpose(expected_result)))
def testScanSumWithDecaysReverse3D(self):
"""scan_discounted_sum vs. higher-dimensional arguments."""
with self.test_session() as sess:
sequence = [[[3, 33], [1, 11], [5, 55]],
[[-1.7, -17], [1.2, 12], [2.3, 23]]]
decays = [[[0.5, 5], [0.9, 9], [1.0, 10]],
[[0.9, 9], [0.5, 5], [0.3, 3]]]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32),
perm=[1, 0, 2])
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32),
perm=[1, 0, 2])
bootstrap = tf.constant([[0, 0], [1.5, 15]], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(sequence_in, decays_in,
bootstrap,
reverse=True)
expected_result = tf.constant(
[[[(5 * 0.9 + 1) * 0.5 + 3,
(55 * 9 + 11) * 5 + 33],
[5 * 0.9 + 1,
55 * 9 + 11],
[5,
55]],
[[((2.3 + 0.3 * 1.5) * 0.5 + 1.2) * 0.9 - 1.7,
((23 + 3 * 15) * 5 + 12) * 9 - 17],
[(2.3 + 0.3 * 1.5) * 0.5 + 1.2,
(23 + 3 * 15) * 5 + 12],
[2.3 + 0.3 * 1.5,
23 + 3 * 15]]],
dtype=tf.float32)
self.assertAllClose(sess.run(result),
sess.run(tf.transpose(expected_result,
perm=[1, 0, 2])))
def testScanSumWithDecaysReverse3DWithSeqLen(self):
"""scan_discounted_sum vs. higher-dimensional arguments."""
with self.test_session() as sess:
sequence = [[[3, 33], [1, 11], [5, 55]],
[[-1.7, -17], [1.2, 12], [2.3, 23]]]
decays = [[[0.5, 5], [0.9, 9], [1.0, 10]],
[[0.9, 9], [0.5, 5], [0.3, 3]]]
sequence_lengths = [2, 0]
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
sequence_in = tf.transpose(tf.constant(sequence, dtype=tf.float32),
perm=[1, 0, 2])
decays_in = tf.transpose(tf.constant(decays, dtype=tf.float32),
perm=[1, 0, 2])
bootstrap = tf.constant([[0, 0], [1.5, 15]], dtype=tf.float32)
result = sequence_ops.scan_discounted_sum(
sequence_in, decays_in, bootstrap, reverse=True,
sequence_lengths=sequence_lengths)
expected_result = np.asarray(
[[[1 * 0.5 + 3, 11 * 5 + 33], [1, 11], [0, 0]],
[[0, 0], [0, 0], [0, 0]]], dtype=np.float32)
self.assertAllClose(sess.run(result),
np.transpose(expected_result, axes=[1, 0, 2]))
class MultistepForwardViewTest(tf.test.TestCase):
def testMultistepForwardView(self):
with self.test_session() as sess:
# Define input data.
rewards = [[1, 0, -1, 0, 1], [0.5, 0.8, -0.7, 0.0, 2.1]]
pcontinues = [[0.5, 0.9, 1.0, 0.5, 0.8], [0.9, 0.5, 0.3, 0.8, 0.7]]
state_values = [[3, 1, 5, -5, 3], [-1.7, 1.2, 2.3, 2.2, 2.7]]
lambda_ = 0.75
# Evaluate expected complex backups at all time-steps for both batches.
expected_result = []
for b in xrange(0, 2):
expected_result.append(
get_complex_n_step_backup_at_all_times(rewards[b], pcontinues[b],
state_values[b], lambda_))
# Only partially-specify the input shapes - verifies that the
# dynamically sized Tensors are handled correctly.
state_values_pl = tf.placeholder(tf.float32, shape=[None, None])
rewards_pl = tf.placeholder(tf.float32, shape=[None, None])
pcontinues_pl = tf.placeholder(tf.float32, shape=[None, None])
# We use transpose because it is easier to define the input data in
# BxT (batch x time) form, while scan_discounted_sum assumes TxB form.
state_values_in = tf.transpose(state_values_pl)
rewards_in = tf.transpose(rewards_pl)
pcontinues_in = tf.transpose(pcontinues_pl)
expected = tf.transpose(tf.constant(expected_result, dtype=tf.float32))
# Evaluate complex backups.
result = sequence_ops.multistep_forward_view(rewards_in, pcontinues_in,
state_values_in, lambda_)
feed_dict = {state_values_pl: state_values,
rewards_pl: rewards,
pcontinues_pl: pcontinues}
self.assertAllClose(sess.run(result, feed_dict=feed_dict),
sess.run(expected))
if __name__ == "__main__":
tf.test.main()
|
deepmind/trfl
|
trfl/sequence_ops_test.py
|
Python
|
apache-2.0
| 17,471
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Backend compiler related feature registration"""
# pylint: disable=invalid-name,unused-argument, len-as-condition, too-many-nested-blocks, too-many-local-variables, too-many-arguments
from __future__ import absolute_import
import tvm
from tvm import te
from tvm.te.hybrid import script
from tvm.runtime import convert
from tvm import topi
from tvm.topi.util import get_const_int, get_const_tuple
from . import op as _reg
from . import strategy
from .op import OpPattern
from ._tensor import elemwise_shape_func
_reg.register_broadcast_schedule("broadcast_to")
_reg.register_broadcast_schedule("broadcast_to_like")
_reg.register_broadcast_schedule("expand_dims")
_reg.register_broadcast_schedule("repeat")
_reg.register_broadcast_schedule("tile")
_reg.register_broadcast_schedule("where")
_reg.register_injective_schedule("squeeze")
_reg.register_injective_schedule("reshape")
_reg.register_injective_schedule("reshape_like")
_reg.register_injective_schedule("full")
_reg.register_injective_schedule("full_like")
_reg.register_injective_schedule("arange")
_reg.register_injective_schedule("meshgrid")
_reg.register_injective_schedule("reverse")
_reg.register_injective_schedule("reverse_sequence")
_reg.register_injective_schedule("cast")
_reg.register_injective_schedule("cast_like")
_reg.register_injective_schedule("reinterpret")
_reg.register_injective_schedule("strided_slice")
_reg.register_injective_schedule("slice_like")
_reg.register_injective_schedule("split")
_reg.register_injective_schedule("take")
_reg.register_injective_schedule("transpose")
_reg.register_injective_schedule("stack")
_reg.register_injective_schedule("contrib_reverse_reshape")
_reg.register_injective_schedule("gather")
_reg.register_injective_schedule("gather_nd")
_reg.register_injective_schedule("sequence_mask")
_reg.register_injective_schedule("one_hot")
_reg.register_reduce_schedule("collapse_sum_like")
_reg.register_reduce_schedule("collapse_sum_to")
_reg.register_injective_schedule("unravel_index")
_reg.register_injective_schedule("sparse_to_dense")
_reg.register_injective_schedule("matrix_set_diag")
_reg.register_injective_schedule("adv_index")
# concatenate
_reg.register_schedule("concatenate", strategy.schedule_concatenate)
# strided_set
@_reg.register_compute("strided_set")
def compute_strided_set(attrs, inputs, output_type):
"""Compute definition of strided_set"""
return [topi.strided_set(inputs[0], inputs[1], inputs[2], inputs[3], inputs[4])]
_reg.register_injective_schedule("strided_set")
# layout_transform
_reg.register_injective_schedule("layout_transform")
_reg.register_pattern("layout_transform", OpPattern.INJECTIVE)
# argwhere
@_reg.register_compute("argwhere")
def compute_argwhere(attrs, inputs, output_type):
"""Compute definition of argwhere"""
output_shape = []
for s in output_type.shape:
if hasattr(s, "value"):
output_shape.append(s)
else:
# see Any, replace it with a var
output_shape.append(te.var("any_dim", "int32"))
new_output_type = tvm.relay.ty.TensorType(output_shape, "int32")
return [topi.argwhere(new_output_type, inputs[0])]
_reg.register_schedule("argwhere", strategy.schedule_argwhere)
# scatter
@_reg.register_compute("scatter")
def compute_scatter(attrs, inputs, output_type):
"""Compute definition of scatter"""
return [topi.scatter(inputs[0], inputs[1], inputs[2], attrs.axis)]
_reg.register_schedule("scatter", strategy.schedule_scatter)
# scatter_add
@_reg.register_compute("scatter_add")
def compute_scatter_add(attrs, inputs, output_type):
"""Compute definition of scatter_add"""
return [topi.scatter_add(inputs[0], inputs[1], inputs[2], attrs.axis)]
_reg.register_schedule("scatter_add", strategy.schedule_scatter_add)
#####################
# Shape functions #
#####################
@script
def _arange_shape_func(start, stop, step):
out = output_tensor((1,), "int64")
if step[0] < 0:
out[0] = int64(ceil_div((int64(start[0]) - int64(stop[0])), int64(-step[0])))
else:
out[0] = int64(ceil_div((int64(stop[0]) - int64(start[0])), int64(step[0])))
return out
@_reg.register_shape_func("arange", True)
def arange_shape_func(attrs, inputs, _):
"""
Shape func for arange
"""
return [_arange_shape_func(*inputs)]
@script
def _strided_slice_shape_func_input_shape(data_shape, begin, end, strides, slice_mode):
ndim = data_shape.shape[0]
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
cbegin = int64(0)
cend = int64(data_shape[i])
cstride = int64(1)
if len(strides) > i:
cstride = int64(strides[i])
if len(begin) > i:
cbegin = int64(begin[i])
if cbegin < 0:
cbegin += int64(data_shape[i])
if len(end) <= i:
cend = int64(data_shape[i])
elif slice_mode != 0:
cstride = int64(1)
if end[i] < 0:
cend = int64(data_shape[i])
else:
cend = cbegin + int64(end[i])
else:
if end[i] > data_shape[i]:
cend = int64(data_shape[i])
elif end[i] < -data_shape[i]:
cend = int64(-1)
else:
cend = int64(end[i])
if cend < 0:
cend += int64(data_shape[i])
assert cstride != 0, "Strides can't be zero."
if cstride < 0:
slice_range = cbegin - cend
step = -cstride
else:
slice_range = cend - cbegin
step = cstride
out[i] = int64(ceil_div(slice_range, step))
return out
@_reg.register_shape_func("strided_slice", False)
def strided_slice_shape_func(attrs, inputs, _):
"""
Shape func for strided_slice
"""
slice_mode = convert(0 if attrs.slice_mode == "end" else 1)
return [
_strided_slice_shape_func_input_shape(
inputs[0], attrs.begin, attrs.end, attrs.strides, slice_mode
)
]
@script
def _concatenate_shape_func(inputs, axis):
ndim = inputs[0].shape[0]
out = output_tensor((ndim,), "int64")
for i in const_range(ndim):
if i != axis:
out[i] = inputs[0][i]
for j in const_range(1, len(inputs)):
assert out[i] == inputs[j][i], "Dims mismatch in the inputs of concatenate."
else:
out[i] = int64(0)
for j in const_range(len(inputs)):
out[i] += inputs[j][i]
return out
@_reg.register_shape_func("concatenate", False)
def concatenate_shape_func(attrs, inputs, _):
axis = get_const_int(attrs.axis)
if axis < 0:
axis += inputs[0].shape[0]
return [_concatenate_shape_func(inputs, convert(axis))]
@script
def _reshape_shape_func_input_shape(data_shape, newshape, ndim):
out = output_tensor((ndim,), "int64")
src_idx = 0
dst_idx = 0
infer_idx = -1
copy = False
skip = 0
for i in const_range(len(newshape)):
if skip > 0:
skip -= 1
elif newshape[i] > 0:
out[dst_idx] = int64(newshape[i])
src_idx += 1
dst_idx += 1
elif newshape[i] == 0:
out[dst_idx] = data_shape[src_idx]
src_idx += 1
dst_idx += 1
elif newshape[i] == -1:
assert infer_idx < 0, "One and only one dim can be inferred"
out[dst_idx] = int64(1)
infer_idx = i
dst_idx += 1
elif newshape[i] == -2:
copy = True
elif newshape[i] == -3:
assert data_shape.shape[0] - src_idx > 1, "Not enough dims in input shape for -3"
out[dst_idx] = data_shape[src_idx] * data_shape[src_idx + 1]
src_idx += 2
dst_idx += 1
elif newshape[i] == -4:
assert len(newshape) - i > 2, "Not enough dims in new shape for -4"
if newshape[i + 1] == -1:
assert newshape[i + 2] != -1, "Split dims cannot both be -1."
out[dst_idx] = data_shape[src_idx] // int64(newshape[i + 2])
out[dst_idx + 1] = int64(newshape[i + 2])
else:
out[dst_idx] = int64(newshape[i + 1])
if newshape[i + 2] == -1:
out[dst_idx + 1] = data_shape[src_idx] // int64(newshape[i + 1])
else:
out[dst_idx + 1] = int64(newshape[i + 2])
assert (
data_shape[src_idx] == out[dst_idx] * out[dst_idx + 1]
), "Product of split dims doesn't match to input dim"
src_idx += 1
dst_idx += 2
skip = 2
else:
assert False, "Invalid special values in new shape"
if len(data_shape.shape) > 0:
# if data is not constant, we can then handle -1 and -2
if copy:
for i in range(src_idx, data_shape.shape[0]):
out[dst_idx] = data_shape[i]
dst_idx += 1
if infer_idx >= 0:
old_size = int64(1)
for i in const_range(data_shape.shape[0]):
old_size *= data_shape[i]
new_size = int64(1)
for i in const_range(out.shape[0]):
new_size *= out[i]
out[infer_idx] = old_size // new_size
return out
@_reg.register_shape_func("reshape", False)
def reshape_shape_func(attrs, inputs, out_ndims):
newshape = get_const_tuple(attrs.newshape)
return [_reshape_shape_func_input_shape(inputs[0], convert(newshape), out_ndims[0])]
@script
def _take_no_axis_shape_func(indices_shape, out_ndim):
out = output_tensor((out_ndim,), "int64")
for i in const_range(out_ndim):
out[i] = indices_shape[i]
return out
@script
def _take_with_axis_shape_func(data_shape, indices_shape, axis, out_ndim):
out = output_tensor((out_ndim,), "int64")
for i in const_range(axis):
out[i] = data_shape[i]
if len(indices_shape.shape) == 0:
# indices is constant
for i in const_range(axis + 1, len(data_shape)):
out[i - 1] = data_shape[i]
else:
for i in const_range(len(indices_shape)):
out[axis + i] = indices_shape[i]
for i in const_range(axis + 1, len(data_shape)):
out[len(indices_shape) + i - 1] = data_shape[i]
return out
@_reg.register_shape_func("take", False)
def take_shape_func(attrs, inputs, out_ndims):
"""
Shape function for take op.
"""
if attrs.axis is None:
return [_take_no_axis_shape_func(inputs[1], out_ndims[0])]
axis = get_const_int(attrs.axis)
data_ndim = int(inputs[0].shape[0])
if axis < 0:
axis += data_ndim
assert 0 <= axis < data_ndim
return [_take_with_axis_shape_func(*inputs, convert(axis), out_ndims[0])]
@script
def _argwhere_shape_func_1d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(1)
for i1 in range(condition.shape[0]):
if condition[i1] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_2d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(2)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
if condition[i1, i2] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_3d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(3)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
for i3 in range(condition.shape[2]):
if condition[i1, i2, i3] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_4d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(4)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
for i3 in range(condition.shape[2]):
for i4 in range(condition.shape[3]):
if condition[i1, i2, i3, i4] != 0:
out[0] += int64(1)
return out
@script
def _argwhere_shape_func_5d(condition):
out = output_tensor((2,), "int64")
out[0] = int64(0)
out[1] = int64(5)
for i1 in range(condition.shape[0]):
for i2 in range(condition.shape[1]):
for i3 in range(condition.shape[2]):
for i4 in range(condition.shape[3]):
for i5 in range(condition.shape[4]):
if condition[i1, i2, i3, i4, i5] != 0:
out[0] += int64(1)
return out
@_reg.register_shape_func("argwhere", True)
def argwhere_shape_func(attrs, inputs, out_ndims):
"""
Shape function for argwhere.
"""
if len(inputs[0].shape) == 1:
return [_argwhere_shape_func_1d(inputs[0])]
if len(inputs[0].shape) == 2:
return [_argwhere_shape_func_2d(inputs[0])]
if len(inputs[0].shape) == 3:
return [_argwhere_shape_func_3d(inputs[0])]
if len(inputs[0].shape) == 4:
return [_argwhere_shape_func_4d(inputs[0])]
if len(inputs[0].shape) == 5:
return [_argwhere_shape_func_5d(inputs[0])]
return ValueError("Does not support rank higher than 5 in argwhere")
_reg.register_shape_func("scatter", False, elemwise_shape_func)
_reg.register_shape_func("scatter_add", False, elemwise_shape_func)
@script
def _layout_transform_shape_func(
data_shape, out_layout_len, dst_equal_list, dst_mul_list, dst_div_list, dst_mix_list
):
out = output_tensor((out_layout_len,), "int64")
for i in const_range(len(dst_equal_list)):
out[dst_equal_list[i][0]] = data_shape[dst_equal_list[i][1]]
for i in const_range(len(dst_mul_list)):
out[dst_mul_list[i][0]] = data_shape[dst_mul_list[i][1]] * data_shape[dst_mul_list[i][2]]
for i in const_range(len(dst_div_list)):
out[dst_div_list[i][0]] = data_shape[dst_div_list[i][1]] // dst_div_list[i][3]
out[dst_div_list[i][2]] = int64(dst_div_list[i][3])
for i in const_range(len(dst_mix_list)):
out[dst_mix_list[i][0]] = (
data_shape[dst_mix_list[i][1]] * dst_mix_list[i][2] // dst_mix_list[i][4]
)
out[dst_mix_list[i][3]] = int64(dst_mix_list[i][4])
return out
@_reg.register_shape_func("layout_transform", False)
def layout_transform_shape_func(attrs, inputs, _):
"""
Shape function for layout_transform op.
"""
def _fetch_axis(layout):
major_axes = []
minor_axes = {}
num_start = -1
for i, item in enumerate(layout):
if "A" <= item <= "Z":
major_axes.append(item)
elif "a" <= item <= "z":
last_num = int(layout[num_start:i])
minor_axes[item] = last_num
num_start = -1
elif num_start < 0:
num_start = i
return major_axes, minor_axes
_, src_minor_axes = _fetch_axis(attrs.src_layout)
dst_major_axes, dst_minor_axes = _fetch_axis(attrs.dst_layout)
src_letter_list = []
dst_letter_list = []
for item in attrs.src_layout:
if "A" <= item <= "Z" or "a" <= item <= "z":
src_letter_list.append(item)
for item in attrs.dst_layout:
if "A" <= item <= "Z" or "a" <= item <= "z":
dst_letter_list.append(item)
out_layout_len = len(dst_major_axes) + len(dst_minor_axes)
dst_equal_list = []
dst_mul_list = []
dst_div_list = []
dst_mix_list = []
for key in dst_major_axes:
if key.lower() not in dst_minor_axes:
if key.lower() not in src_minor_axes:
dst_equal_list.append((dst_letter_list.index(key), src_letter_list.index(key)))
else:
dst_mul_list.append(
(
dst_letter_list.index(key),
src_letter_list.index(key),
src_letter_list.index(key.lower()),
)
)
else:
if key.lower() not in src_minor_axes:
dst_div_list.append(
(
dst_letter_list.index(key),
src_letter_list.index(key),
dst_letter_list.index(key.lower()),
dst_minor_axes[key.lower()],
)
)
else:
dst_mix_list.append(
(
dst_letter_list.index(key),
src_letter_list.index(key),
src_minor_axes[key.lower()],
dst_letter_list.index(key.lower()),
dst_minor_axes[key.lower()],
)
)
return [
_layout_transform_shape_func(
inputs[0],
convert(out_layout_len),
convert(dst_equal_list),
convert(dst_mul_list),
convert(dst_div_list),
convert(dst_mix_list),
)
]
@script
def _expand_dim_shape_func(data_shape, ndim, axis, num_newaxis):
out = output_tensor((ndim + num_newaxis,), "int64")
for i in const_range(out.shape[0]):
if i < axis:
out[i] = data_shape[i]
elif i < axis + num_newaxis:
out[i] = int64(1)
else:
out[i] = data_shape[i - num_newaxis]
return out
@_reg.register_shape_func("expand_dims", False)
def expand_dim_shape_func(attrs, inputs, _):
"""
Shape function for expand_dim op.
"""
axis = get_const_int(attrs.axis)
num_newaxis = get_const_int(attrs.num_newaxis)
if axis < 0:
axis = inputs[0].shape[0] + axis + 1
ndim = inputs[0].shape[0] if inputs[0].shape else 0
return [_expand_dim_shape_func(inputs[0], convert(ndim), convert(axis), convert(num_newaxis))]
@script
def _transpose_shape_func(data_shape, axes):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(len(axes)):
out[i] = data_shape[axes[i]]
return out
@_reg.register_shape_func("transpose", False)
def transpose_shape_func(attrs, inputs, _):
"""
Shape function for transpose op.
"""
axes = attrs.axes if attrs.axes is None else get_const_tuple(attrs.axes)
if axes is None:
axes = list(range(inputs[0].shape[0].value))
axes.reverse()
axes = list(axes)
for i, axis in enumerate(axes):
if axis < 0:
axes[i] = inputs[0].shape[0] + axis
return [_transpose_shape_func(inputs[0], convert(axes))]
@script
def _squeeze_shape_func(data_shape, keep_axes):
out = output_tensor((len(keep_axes),), "int64")
for i in const_range(len(keep_axes)):
out[i] = data_shape[keep_axes[i]]
return out
@_reg.register_shape_func("squeeze", False)
def squeeze_shape_func(attrs, inputs, _):
"""
Shape function for squeeze op.
"""
axis = attrs.axis if attrs.axis is None else get_const_tuple(attrs.axis)
keep_axes = []
if axis is not None:
for i in range(inputs[0].shape[0].value):
if i not in axis:
keep_axes.append(i)
# Due to current relay type system, it is possible even
# a static kernel function needs shape function. To handle
# this case, we allow axis to be None in squeeze shape func
# for now.
# TODO(kevinthesun): Enhance relay type system to avoid this.
if keep_axes:
out = _squeeze_shape_func(inputs[0], convert(keep_axes))
else:
out = te.compute((), lambda *indices: 0)
return [out]
@script
def _reshape_like_shape_func(target_shape):
out = output_tensor((target_shape.shape[0],), "int64")
for i in const_range(target_shape.shape[0]):
out[i] = target_shape[i]
return out
@_reg.register_shape_func("reshape_like", False)
def reshape_like_shape_func(attrs, inputs, _):
"""
Shape function for reshape_like op.
"""
return [_reshape_like_shape_func(inputs[1])]
@script
def _tile_shape_func(data, reps, ndim, tndim, rndim):
out = output_tensor((tndim,), "int64")
if ndim == rndim:
for i in const_range(tndim):
out[i] = data[i] * int64(reps[i])
elif ndim > rndim:
ngap = ndim - rndim
for i in const_range(ndim):
if i < ngap:
out[i] = data[i]
else:
out[i] = data[i] * int64(reps[i - ngap])
else:
rgap = rndim - ndim
for i in const_range(rndim):
if i < rgap:
out[i] = int64(reps[i])
else:
out[i] = int64(reps[i]) * data[i - rgap]
return out
@_reg.register_shape_func("tile", False)
def tile_shape_func(attrs, inputs, _):
"""
Shape function for tile op.
"""
reps = get_const_tuple(attrs.reps)
ndim = inputs[0].shape[0].value
rndim = len(reps)
tndim = ndim if ndim > rndim else rndim
return [
_tile_shape_func(inputs[0], convert(reps), convert(ndim), convert(tndim), convert(rndim))
]
@script
def _split_shape_func(data_shape, index, indices_or_sections, axis):
out = output_tensor((data_shape.shape[0],), "int64")
if len(indices_or_sections) == 1:
for i in const_range(data_shape.shape[0]):
if i == axis:
assert (
data_shape[axis] % indices_or_sections[0] == 0
), "num_sections must be an integer factor of the size of axis"
out[i] = ceil_div(data_shape[axis], indices_or_sections[0])
else:
out[i] = data_shape[i]
else:
start = int64(0)
if index > 0:
start = int64(indices_or_sections[index - 1])
end = data_shape[axis]
if index < len(indices_or_sections):
end = int64(indices_or_sections[index])
for i in const_range(data_shape.shape[0]):
if i == axis:
out[i] = end - start
else:
out[i] = data_shape[i]
return out
@_reg.register_shape_func("split", False)
def split_shape_func(attrs, inputs, _):
"""
Shape function for split op.
"""
if isinstance(attrs.indices_or_sections, (int, tvm.tir.IntImm)):
indices_or_sections = get_const_int(attrs.indices_or_sections)
assert indices_or_sections > 0, "Slice count must be > 0"
else:
indices_or_sections = list(get_const_tuple(attrs.indices_or_sections))
assert sorted(indices_or_sections)[0] > 0 and indices_or_sections == sorted(
indices_or_sections
), "split_indices must be sorted"
axis = get_const_int(attrs.axis)
num_out = (
indices_or_sections
if isinstance(indices_or_sections, int)
else len(indices_or_sections) + 1
)
if isinstance(indices_or_sections, int):
indices_or_sections = [indices_or_sections]
return [
_split_shape_func(inputs[0], convert(i), convert(indices_or_sections), convert(axis))
for i in range(num_out)
]
@script
def _adv_index_shape_func(inputs):
index_rank = inputs[1].shape[0]
data_rank = inputs[0].shape[0]
out = output_tensor((data_rank + index_rank - len(inputs) + 1,), "int64")
max_flatten_len = int64(1)
for i in const_range(index_rank):
max_flatten_len *= inputs[1][i]
out[i] = inputs[1][i]
for i in const_range(len(inputs) - 2):
flatten_len = int64(1)
for j in const_range(index_rank):
flatten_len *= inputs[i + 2][j]
if flatten_len > max_flatten_len:
max_flatten_len = flatten_len
for k in const_range(index_rank):
out[k] = inputs[i + 2][k]
for i in const_range(data_rank - len(inputs) + 1):
out[i + index_rank] = inputs[0][i + len(inputs) - 1]
return out
@_reg.register_shape_func("adv_index", False)
def adv_index_shape_func(attrs, inputs, _):
"""
Shape func for adv_index.
Only allow single index tensor.
"""
return [_adv_index_shape_func(inputs)]
@script
def _repeat_shape_func(data_shape, repeats, axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
if i == axis:
out[i] = int64(data_shape[i] * repeats)
else:
out[i] = data_shape[i]
return out
@_reg.register_shape_func("repeat", False)
def repeat_shape_func(attrs, inputs, _):
"""
Shape func for repeat.
"""
axis = get_const_int(attrs.axis)
if axis < 0:
axis = inputs[0].shape[0] + axis
return [_repeat_shape_func(inputs[0], attrs.repeats, convert(axis))]
@_reg.register_shape_func("broadcast_to_like", False)
def broadcast_to_like_shape_func(attrs, inputs, _):
return [topi.math.identity(inputs[1])]
@script
def _stack_shape_func(data_shape, axis, num_inputs):
out = output_tensor((data_shape.shape[0] + 1,), "int64")
for i in const_range(data_shape.shape[0] + 1):
if i == axis:
out[i] = int64(num_inputs)
elif i < axis:
out[i] = data_shape[i]
else:
out[i] = data_shape[i - 1]
return out
@_reg.register_shape_func("stack", False)
def stack_shape_func(attrs, inputs, _):
axis = get_const_int(attrs.axis)
if axis < 0:
axis += inputs[0].shape[0] + 1
return [_stack_shape_func(inputs[0], convert(axis), convert(len(inputs)))]
|
sxjscience/tvm
|
python/tvm/relay/op/_transform.py
|
Python
|
apache-2.0
| 26,343
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
if (sys.version_info > (3,)):
from urllib.parse import quote
else:
from urllib import quote
import jsonpickle
from cairis.core.Asset import Asset
from cairis.core.ObjectSummary import ObjectSummary
from cairis.core.AssetEnvironmentProperties import AssetEnvironmentProperties
from cairis.core.ValueType import ValueType
from cairis.test.CairisDaemonTestCase import CairisDaemonTestCase
from cairis.tools.JsonConverter import json_deserialize
from cairis.tools.ModelDefinitions import AssetEnvironmentPropertiesModel, SecurityAttribute
from cairis.tools.SessionValidator import check_required_keys
from cairis.mio.ModelImport import importModelFile
import os
__author__ = 'Robin Quetin, Shamal Faily'
class AssetAPITests(CairisDaemonTestCase):
@classmethod
def setUpClass(cls):
importModelFile(os.environ['CAIRIS_SRC'] + '/../examples/exemplars/NeuroGrid/NeuroGrid.xml',1,'test')
def setUp(self):
# region Class fields
self.logger = logging.getLogger(__name__)
self.existing_asset_name = 'Data node'
self.new_asset = Asset(
assetId=-1,
assetName='Test',
shortCode='TST',
assetDescription='This is a new test asset',
assetSig='Very significant',
assetType='Hardware',
cFlag=0,
cRationale=None,
tags=[],
ifs=[],
cProps=[]
)
self.new_asset_sec_attr = [
SecurityAttribute(
'Accountability',
'Low',
'None'
),
SecurityAttribute(
'Confidentiality',
'Medium',
'None'
)
]
self.new_asset_props = [
AssetEnvironmentProperties(
environmentName='Stroke',
associations=[[0,"Association","1..*","","","1","Association",0,"Grid meta-data"]],
syProperties=self.new_asset_sec_attr,
pRationale=[]
)
]
self.new_asset_dict = {
'session_id': 'test',
'object': self.new_asset
}
# endregion
def test_get_all(self):
method = 'test_get_all'
rv = self.app.get('/api/assets?session_id=test')
if (sys.version_info > (3,)):
assets = json_deserialize(rv.data.decode('utf-8'))
else:
assets = json_deserialize(rv.data)
self.assertIsNotNone(assets, 'No results after deserialization')
self.assertIsInstance(assets, list, 'The result is not a list as expected')
self.assertGreater(len(assets), 0, 'No assets in the list')
self.assertIsInstance(assets[0], dict)
self.logger.info('[%s] Assets found: %d', method, len(assets))
self.logger.info('[%s] First asset: %s\n', method, assets[0]['theName'])
def test_get_all_summary(self):
method = 'test_get_all_summary'
rv = self.app.get('/api/assets/summary?session_id=test')
if (sys.version_info > (3,)):
assets = json_deserialize(rv.data.decode('utf-8'))
else:
assets = json_deserialize(rv.data)
self.assertIsNotNone(assets, 'No results after deserialization')
self.assertGreater(len(assets), 0, 'No asset summaries')
self.assertIsInstance(assets[0], dict)
self.logger.info('[%s] Assets found: %d', method, len(assets))
self.logger.info('[%s] First asset summary: %s [%d]\n', method, assets[0]['theName'], assets[0]['theType'])
def test_post(self):
method = 'test_post_new'
rv = self.app.post('/api/assets', content_type='application/json', data=jsonpickle.encode(self.new_asset_dict))
if (sys.version_info > (3,)):
postResponse = rv.data.decode('utf-8')
else:
postResponse = rv.data
self.logger.debug('[%s] Response data: %s', method, postResponse)
json_resp = json_deserialize(postResponse)
self.assertIsNotNone(postResponse, 'No results after deserialization')
url = '/api/assets/name/' + quote(self.new_asset_dict['object'].theName) + '?session_id=test'
rv = self.app.get(url)
if (sys.version_info > (3,)):
asset = json_deserialize(rv.data.decode('utf-8'))
else:
asset = json_deserialize(rv.data)
self.logger.info('[%s] Asset: %s\n', method, asset['theName'])
def test_get_invalid_name(self):
method = 'test_get_name'
url = '/api/assets/name/invalidname?session_id=test'
rv = self.app.get(url)
msg = json_deserialize(rv.data.decode('utf-8'))
self.assertIsNotNone(msg, 'No results after deserialization')
self.assertEqual(msg['code'],404)
def test_get_name(self):
method = 'test_get_name'
url = '/api/assets/name/%s?session_id=test' % quote(self.existing_asset_name)
rv = self.app.get(url)
if (sys.version_info > (3,)):
asset = json_deserialize(rv.data.decode('utf-8'))
else:
asset = json_deserialize(rv.data)
self.assertIsNotNone(asset, 'No results after deserialization')
self.assertEqual(asset['theName'],self.existing_asset_name)
def test_put_name(self):
method = 'test_put_name'
rv = self.app.post('/api/assets', content_type='application/json', data=jsonpickle.encode(self.new_asset_dict))
url = '/api/assets/name/%s' % quote(self.new_asset.theName)
upd_asset = self.new_asset
upd_asset.theName = 'Test2'
upd_asset_dict = self.new_asset_dict
upd_asset_dict['object'] = upd_asset
upd_asset_body = jsonpickle.encode(upd_asset_dict)
self.logger.info('[%s] JSON data: %s', method, upd_asset_body)
rv = self.app.put(url, content_type='application/json', data=upd_asset_body)
if (sys.version_info > (3,)):
putResponse = rv.data.decode('utf-8')
else:
putResponse = rv.data
self.logger.debug('[%s] Response data: %s', method, putResponse)
json_resp = json_deserialize(putResponse)
self.assertIsNotNone(json_resp, 'No results after deserialization')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message returned')
rv = self.app.get('/api/assets/name/Test2?session_id=test')
if (sys.version_info > (3,)):
asset = json_deserialize(rv.data.decode('utf-8'))
else:
asset = json_deserialize(rv.data)
self.logger.info('[%s] Asset: %s\n', method, asset['theName'])
def test_delete_name(self):
method = 'test_delete_name'
url = '/api/assets/name/{}?session_id=test'.format(quote(self.new_asset.theName))
rv = self.app.delete(url)
url = '/api/assets/name/Test2?session_id=test'.format(quote(self.new_asset.theName))
rv = self.app.delete(url)
if (sys.version_info > (3,)):
delResponse = rv.data.decode('utf-8')
else:
delResponse = rv.data
self.logger.debug('[%s] Response data: %s', method, delResponse)
json_resp = json_deserialize(delResponse)
self.assertIsNotNone(json_resp, 'No results after deserialization')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message returned')
self.logger.info('[%s] Message: %s\n', method, message)
def test_types_get(self):
method = 'test_types_get'
rv = self.app.get('/api/assets/types?session_id=test')
if (sys.version_info > (3,)):
assets = jsonpickle.decode(rv.data.decode('utf-8'))
else:
assets = jsonpickle.decode(rv.data)
self.assertIsNotNone(assets, 'No results after deserialization')
self.assertIsInstance(assets, list, 'The result is not a dictionary as expected')
self.assertGreater(len(assets), 0, 'No assets in the dictionary')
self.logger.info('[%s] Asset types found: %d', method, len(assets))
asset_type = assets[0]
self.logger.info('[%s] First asset types: %s\n', method, asset_type['theName'])
def test_types_delete(self):
method = 'test_types_delete'
url = '/api/assets/types/name/%s?session_id=test' % quote(self.prepare_new_asset_type().theName)
json_dict = {
'session_id': 'test',
'object': self.prepare_new_asset_type()
}
new_asset_type_body = jsonpickle.encode(json_dict, unpicklable=False)
self.app.delete(url)
self.logger.info('[%s] Object to delete: %s', method, new_asset_type_body)
self.app.post('/api/assets/types', content_type='application/json', data=new_asset_type_body)
self.logger.info('[%s] URL: %s', method, url)
rv = self.app.delete(url)
if (sys.version_info > (3,)):
delResponse = rv.data.decode('utf-8')
else:
delResponse = rv.data
self.logger.info('[%s] Response data: %s', method, delResponse)
self.assertIsNotNone(delResponse, 'No response')
json_resp = jsonpickle.decode(delResponse)
self.assertIsInstance(json_resp, dict, 'The response cannot be converted to a dictionary')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.assertEqual(message,'Test asset type deleted')
def test_types_post(self):
method = 'test_types_post'
url = '/api/assets/types'
self.logger.info('[%s] URL: %s', method, url)
json_dict = {'session_id': 'test', 'object': self.prepare_new_asset_type()}
new_asset_type_body = jsonpickle.encode(json_dict, unpicklable=False)
self.logger.info('JSON data: %s', new_asset_type_body)
rv = self.app.post(url, content_type='application/json', data=new_asset_type_body)
if (sys.version_info > (3,)):
postResponse = rv.data.decode('utf-8')
else:
postResponse = rv.data
self.logger.debug('[%s] Response data: %s', method, postResponse)
json_resp = jsonpickle.decode(postResponse)
self.assertIsNotNone(json_resp, 'No results after deserialization')
ackMsg = json_resp.get('message', None)
self.assertEqual(ackMsg,'Test asset type created')
rv = self.app.delete('/api/assets/types/name/%s?session_id=test' % quote(self.prepare_new_asset_type().theName))
delResponse = rv.data.decode('utf-8')
self.assertIsNotNone(delResponse, 'No response')
json_resp = jsonpickle.decode(delResponse)
self.assertIsInstance(json_resp, dict, 'The response cannot be converted to a dictionary')
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.assertEqual(message,'Test asset type deleted')
def test_types_put(self):
method = 'test_types_put'
url = '/api/assets/types'
self.logger.info('[%s] URL: %s', method, url)
json_dict = {'session_id': 'test', 'object': self.prepare_new_asset_type()}
new_asset_type_body = jsonpickle.encode(json_dict)
self.logger.info('JSON data: %s', new_asset_type_body)
rv = self.app.delete('/api/assets/types/name/%s?session_id=test' % quote(self.prepare_new_asset_type().theName))
rv = self.app.post(url, content_type='application/json', data=new_asset_type_body)
if (sys.version_info > (3,)):
postResponse = rv.data.decode('utf-8')
else:
postResponse = rv.data
self.logger.debug('[%s] Response data: %s', method, postResponse)
json_resp = jsonpickle.decode(postResponse)
self.assertIsNotNone(json_resp, 'No results after deserialization')
ackMsg = json_resp.get('message', None)
self.assertEqual(ackMsg,'Test asset type created')
type_to_update = self.prepare_new_asset_type()
type_to_update.theName = 'Edited test asset type'
json_dict = {'session_id': 'test', 'object': type_to_update}
upd_type_body = jsonpickle.encode(json_dict)
rv = self.app.put('/api/assets/types/name/%s?session_id=test' % quote(self.prepare_new_asset_type().theName), data=upd_type_body, content_type='application/json')
if (sys.version_info > (3,)):
putResponse = rv.data.decode('utf-8')
else:
putResponse = rv.data
self.assertIsNotNone(rv.data, 'No response')
json_resp = jsonpickle.decode(putResponse)
self.assertIsNotNone(json_resp)
self.assertIsInstance(json_resp, dict)
message = json_resp.get('message', None)
self.assertIsNotNone(message, 'No message in response')
self.logger.info('[%s] Message: %s', method, message)
self.assertEqual(message,'Edited test asset type updated')
rv = self.app.get('/api/assets/types/name/%s?session_id=test' % quote(type_to_update.theName))
if (sys.version_info > (3,)):
getResponse = rv.data.decode('utf-8')
else:
getResponse = rv.data
upd_asset_type = jsonpickle.decode(getResponse)
self.assertIsNotNone(upd_asset_type, 'Unable to decode JSON data')
self.logger.debug('[%s] Response data: %s', method, getResponse)
self.logger.info('[%s] Asset type: %s\n', method, upd_asset_type['theName'])
rv = self.app.delete('/api/assets/types/name/%s?session_id=test' % quote(type_to_update.theName))
def prepare_new_asset_type(self):
new_type = ValueType(
valueTypeId=-1,
valueTypeName='Test asset type',
valueTypeDescription='This is a test asset type',
vType='asset-type',
vEnv='all'
)
new_type.theEnvironmentName = 'all'
return new_type
|
failys/CAIRIS
|
cairis/test/test_AssetAPI.py
|
Python
|
apache-2.0
| 13,606
|
from muntjac.api import VerticalLayout, Link
from muntjac.terminal.theme_resource import ThemeResource
from muntjac.terminal.external_resource import ExternalResource
class LinkCurrentWindowExample(VerticalLayout):
_CAPTION = 'Open Google'
_TOOLTIP = 'http://www.google.com'
_ICON = ThemeResource('../sampler/icons/icon_world.gif')
def __init__(self):
super(LinkCurrentWindowExample, self).__init__()
self.setSpacing(True)
# Link w/ text and tooltip
l = Link(self._CAPTION, ExternalResource('http://www.google.com'))
l.setDescription(self._TOOLTIP)
self.addComponent(l)
# Link w/ text, icon and tooltip
l = Link(self._CAPTION, ExternalResource('http://www.google.com'))
l.setDescription(self._TOOLTIP)
l.setIcon(self._ICON)
self.addComponent(l)
# Link w/ icon and tooltip
l = Link()
l.setResource(ExternalResource('http://www.google.com'))
l.setDescription(self._TOOLTIP)
l.setIcon(self._ICON)
self.addComponent(l)
|
rwl/muntjac
|
muntjac/demo/sampler/features/link/LinkCurrentWindowExample.py
|
Python
|
apache-2.0
| 1,076
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from include.dataset_fnames import generate_station_data_fname
from include.feature_lists import numeric_features
from xgboost import XGBRegressor
def numeric_df_missing_values_summary():
for i, station_id in enumerate(sorted(numeric_features)):
# print station_id,
fname = generate_station_data_fname(station_id, sample_type='train', data_type='numeric', allow_nan_values=False)
features = ['Id'] + numeric_features[station_id]
station_df = pd.read_csv(fname, usecols=features, index_col=['Id'], dtype=object)
for feature in station_df.columns:
if station_df[feature].isnull().any():
print feature
# print
del station_df
# if (i == 10): break
def regresor_function(X_train, y_train):
regresor = XGBRegressor(objective='reg:linear', n_estimator=1000, learning_rate=0.01, base_score=0.005)
regresor.fit(X_train, y_train)
return regresor
def numeric_df_impute_missing_values_iteration(df, null_features, null_indices, iter):
print "Features with null data:", null_features
print "Iterations:", iter
full_features = df.columns
new_df = df.copy()
for iter_number in range(iter):
iter_df = new_df.copy()
for feature in null_features:
print "Iteration: ", iter_number, "Feature:", feature
train_features = list((set(full_features) - set([feature])))
train_indices = df[df[feature].notnull()].index
test_indices = df[df[feature].isnull()].index
X_train = new_df.loc[train_indices, train_features]
y_train = new_df.loc[train_indices, feature]
X_test = new_df.loc[test_indices, train_features] # feature == null
regresor = regresor_function(X_train.values, y_train.values)
y_predict = regresor.predict(X_test.values)
iter_df.loc[test_indices, feature] = y_predict
print iter_df.loc[2342802]
print iter_df.loc[2347536]
del new_df
new_df = iter_df.copy()
del iter_df
# for each iteration
# cross_validation_check(new_df)
print new_df.loc[2342802]
print new_df.loc[2347536]
return new_df
def numeric_df_impute_missing_values(station_id, sample_type='train', iter=1):
fname = generate_station_data_fname(station_id=station_id, sample_type=sample_type, data_type='numeric', allow_nan_values=False)
features = ['Id'] + numeric_features[station_id]
station_df = pd.read_csv(fname, usecols=features, index_col=['Id'], dtype=np.float32)
station_df_null_features = []
station_df_null_indices = {}
for feature in station_df.columns:
if station_df[feature].isnull().any():
station_df_null_features.append(feature)
station_df_null_indices[feature] = station_df[station_df[feature].isnull()].index.tolist()
ret_df = numeric_df_impute_missing_values_iteration(station_df, station_df_null_features, station_df_null_indices, iter)
del station_df
# print station_df_null_features
# print station_df_null_indices
return ret_df
if __name__ == '__main__':
df = numeric_df_impute_missing_values('L0S09')
print df.head()
|
zakkum42/Bosch
|
src/03-feature_engineering/missing_values_numeric.py
|
Python
|
apache-2.0
| 3,477
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-28 02:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('webcore', '0003_auto_20170427_1825'),
]
operations = [
migrations.RemoveField(
model_name='postad',
name='user',
),
migrations.DeleteModel(
name='PostAd',
),
]
|
Nikita1710/ANUFifty50-Online-Mentoring-Platform
|
project/fifty_fifty/webcore/migrations/0004_auto_20170428_0228.py
|
Python
|
apache-2.0
| 459
|
"""Elementwise operators"""
from __future__ import absolute_import as _abs
import tvm
from .. import tag
from ..util import get_const_int
@tvm.tag_scope(tag=tag.ELEMWISE)
def relu(x):
"""Take relu of input x.
Parameters
----------
x : tvm.Tensor
Input argument.
Returns
-------
y : tvm.Tensor
The result.
"""
return tvm.compute(x.shape, lambda *i: tvm.max(x(*i), tvm.const(0, x.dtype)))
@tvm.tag_scope(tag=tag.ELEMWISE)
def leaky_relu(x, alpha):
"""Take leaky relu of input x.
Parameters
----------
x : tvm.Tensor
Input argument.
alpha : float
The slope for the small gradient when x < 0
Returns
-------
y : tvm.Tensor
The result.
"""
def _compute(*indices):
value = x(*indices)
calpha = tvm.const(alpha, value.dtype)
return tvm.select(value > 0, value, value * calpha)
return tvm.compute(x.shape, _compute)
@tvm.tag_scope(tag=tag.BROADCAST)
def prelu(x, slope, axis=1):
""" PReLU.
It accepts two arguments: an input ``x`` and a weight array ``W``
and computes the output as :math:`PReLU(x) y = x > 0 ? x : W * x`,
where :math:`*` is an elementwise multiplication for each sample in the
batch.
Arguments:
x : tvm.Tensor
Input argument.
slope : tvm.Tensor
Channelised slope tensor for prelu
axis : int
The axis where the channel data needs to be applied
Returns:
y : tvm.Tensor
The result.
Links:
[http://arxiv.org/pdf/1502.01852v1.pdf]
"""
assert len(x.shape) == 4 and len(slope.shape) == 1
assert axis < len(x.shape)
assert get_const_int(slope.shape[0]) == get_const_int(x.shape[axis])
def _compute_channelwise(*indices):
return tvm.select(x(*indices) > 0, x(*indices), x(*indices) * slope(indices[axis]))
return tvm.compute(x.shape, _compute_channelwise)
|
mlperf/training_results_v0.6
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/python/topi/nn/elemwise.py
|
Python
|
apache-2.0
| 1,936
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Intel Corp.
#
"""
Interface for all resource control plugins.
"""
from abc import ABCMeta, abstractmethod
from ..plugin import DeclareFramework
@DeclareFramework('provisioner')
class Provisioner(object, metaclass=ABCMeta):
PROVISIONER_KEY = "provisioner"
PROVISIONER_IMAGE_KEY = "image"
PROVISIONER_BOOTSTRAP_KEY = "provisioner_bootstrap"
PROVISIONER_FILE_KEY = "provisioner_files"
PROVISIONER_KARGS_KEY = "provisioner_kernel_args"
PROVISIONER_UNSET_KEY = "UNDEF"
@abstractmethod
def add(self, device):
"""
Attempts to add a device to the provisioner. Does nothing if the device is already added.
:param device:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def delete(self, device):
"""
Attempts to remove a device from the provisioner. Does nothing if the device isn't already there.
:param device:
:return: Updated device with the correct fields removed
"""
pass
@abstractmethod
def set_ip_address(self, device, ip_address, interface="eth0"):
"""
Mutate the device to include this ip_address.
Save it to the DataStore
And set it in the provisioner
:param device:
:param ip_address:
:param interface:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def set_hardware_address(self, device, hardware_address, interface="eth0"):
"""
Same as Provisioner.set_ip_address
:param device:
:param hardware_address:
:param interface:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def set_image(self, device, image):
"""
Set an image (already known by the provisioner) to a given device.
:param device:
:param image:
:param kernel:
:param network_interface:
:return: Updated device with the new fields applied
:raise: ProvisionException, the image specified is not known to the provisioner
"""
pass
@abstractmethod
def set_bootstrap(self, device, bootstrap):
"""
:param device:
:param bootstrap:
:return: Updated device with the new fields applied
:raise: ProvisionException, the bootstrap specified is not known to the provisioner
"""
pass
@abstractmethod
def set_files(self, device, files):
"""
:param device:
:param files:
:return: Updated device with the new fields applied
:raise: ProvisionException, the file(s) specified is not known to the provisioner
"""
pass
@abstractmethod
def set_kernel_args(self, device, args):
"""
:param device:
:param args:
:return: Updated device with the new fields applied
"""
pass
@abstractmethod
def list(self):
"""
List all devices that the provisioner knows about.
does this come the DataStore or Warewulf?
:return: return the list of device names
"""
pass
@abstractmethod
def list_images(self):
"""
List all the images this provisioner knows about.
:return: list of known images (names only)
"""
pass
class ProvisionerException(Exception):
"""
A staple Exception thrown by the Provisioner
"""
def __init__(self, msg, command_output=None):
super(ProvisionerException, self).__init__()
self.msg = msg
if command_output is not None:
self.cmd_stdout = command_output.stdout
self.cmd_stderr = command_output.stderr
self.cmd_return_code = command_output.return_code
def __str__(self):
return repr(self.msg)
|
intel-ctrlsys/actsys
|
actsys/control/provisioner/provisioner.py
|
Python
|
apache-2.0
| 3,948
|
# Copyright 2021 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tensorflow as tf
from .retrieval_metric import RetrievalMetric
from tensorflow_similarity.types import FloatTensor, IntTensor, BoolTensor
class BNDCG(RetrievalMetric):
"""Binary normalized discounted cumulative gain.
This is normalized discounted cumulative gain where the relevancy weights
are binary, i.e., either a correct match or an incorrect match.
The NDCG is a score between [0,1] representing the rank weighted results.
The DCG represents the sum of the correct matches weighted by the log2 of
the rank and is normalized by the 'ideal DCG'. The IDCG is computed as the
match_mask, sorted descending, weighted by the log2 of the post sorting rank
order. This metric takes into account both the correctness of the match and
the position.
The normalized DCG is computed as:
$$
nDCG_{p} = \frac{DCG_{p}}{IDCG_{p}}
$$
The DCG is computed for each query using the match_mask as:
$$
DCG_{p} = \sum_{i=1}^{p} \frac{match_mask_{i}}{\log_{2}(i+1)}
$$
The IDCG uses the same equation but sorts the match_mask descending
along axis=-1.
Additionally, all positive matches with a distance above the threshold are
set to 0, and the closest K matches are taken.
Args:
name: Name associated with the metric object, e.g., precision@5
canonical_name: The canonical name associated with metric,
e.g., precision@K
k: The number of nearest neighbors over which the metric is computed.
distance_threshold: The max distance below which a nearest neighbor is
considered a valid match.
average: {'micro', 'macro'} Determines the type of averaging performed
on the data.
* 'micro': Calculates metrics globally over all data.
* 'macro': Calculates metrics for each label and takes the unweighted
mean.
"""
def __init__(
self,
name: str = "ndcg",
k: int = 5,
distance_threshold: float = math.inf,
**kwargs,
) -> None:
if "canonical_name" not in kwargs:
kwargs["canonical_name"] = "ndcg@k"
super().__init__(
name=name, k=k, distance_threshold=distance_threshold, **kwargs
)
def compute(
self,
*, # keyword only arguments see PEP-570
query_labels: IntTensor,
lookup_distances: FloatTensor,
match_mask: BoolTensor,
**kwargs,
) -> FloatTensor:
"""Compute the metric
Computes the binary NDCG. The query labels are only used when the
averaging is set to "macro".
Args:
query_labels: A 1D array of the labels associated with the
embedding queries.
lookup_distances: A 2D array where the jth row is the distances
between the jth query and the set of k neighbors.
match_mask: A 2D mask where a 1 indicates a match between the
jth query and the kth neighbor and a 0 indicates a mismatch.
Returns:
A rank 0 tensor containing the metric.
"""
self._check_shape(query_labels, match_mask)
if tf.shape(lookup_distances)[0] != tf.shape(query_labels)[0]:
raise ValueError(
"The number of lookup distance rows must equal the number "
"of query labels. Number of lookup distance rows is "
f"{tf.shape(lookup_distances)[0]} but the number of query "
f"labels is {tf.shape(query_labels)[0]}."
)
dist_mask = tf.math.less_equal(
lookup_distances, self.distance_threshold
)
k_slice = tf.math.multiply(
tf.cast(match_mask, dtype="float"),
tf.cast(dist_mask, dtype="float"),
)[:, : self.k]
rank = tf.range(1, self.k + 1, dtype="float")
rank_weights = tf.math.divide(tf.math.log1p(rank), tf.math.log(2.0))
# the numerator is simplier here because we are using binary weights
dcg = tf.math.reduce_sum(k_slice / rank_weights, axis=1)
# generate the "ideal ordering".
ideal_ordering = tf.sort(k_slice, direction="DESCENDING", axis=1)
idcg = tf.math.reduce_sum(ideal_ordering / rank_weights, axis=1)
per_example_ndcg = tf.math.divide_no_nan(dcg, idcg)
if self.average == "micro":
ndcg = tf.math.reduce_mean(per_example_ndcg)
elif self.average == "macro":
per_class_metrics = 0
class_labels = tf.unique(query_labels)[0]
for label in class_labels:
idxs = tf.where(query_labels == label)
c_slice = tf.gather(per_example_ndcg, indices=idxs)
per_class_metrics += tf.math.reduce_mean(c_slice)
ndcg = tf.math.divide(per_class_metrics, len(class_labels))
else:
raise ValueError(
f"{self.average} is not a supported average " "option"
)
result: FloatTensor = ndcg
return result
|
tensorflow/similarity
|
tensorflow_similarity/retrieval_metrics/bndcg.py
|
Python
|
apache-2.0
| 5,659
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet-Sketch dataset."""
from tensorflow_datasets.image_classification.imagenet_sketch.imagenet_sketch import ImagenetSketch
|
tensorflow/datasets
|
tensorflow_datasets/image_classification/imagenet_sketch/__init__.py
|
Python
|
apache-2.0
| 744
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Python provides the base64 module as a core module but this is mostly
limited to encoding and decoding base64 and it's variants. It is often
useful to be able to perform other operations on base64 text. This
module is meant to be used in conjunction with the core base64 module.
Standarized base64 is defined in
RFC-4648 "The Base16, Base32, and Base64 Data Encodings".
This module provides the following base64 utility functionality:
* tests if text is valid base64
* filter formatting from base64
* convert base64 between different alphabets
* Handle padding issues
- test if base64 is padded
- removes padding
- restores padding
* wraps base64 text into formatted blocks
- via iterator
- return formatted string
"""
import re
import string
import six
from six.moves import urllib
from keystone.i18n import _
class InvalidBase64Error(ValueError):
pass
base64_alphabet_re = re.compile(r'^[^A-Za-z0-9+/=]+$')
base64url_alphabet_re = re.compile(r'^[^A-Za-z0-9---_=]+$')
base64_non_alphabet_re = re.compile(r'[^A-Za-z0-9+/=]+')
base64url_non_alphabet_re = re.compile(r'[^A-Za-z0-9---_=]+')
_strip_formatting_re = re.compile(r'\s+')
_base64_to_base64url_trans = string.maketrans('+/', '-_')
_base64url_to_base64_trans = string.maketrans('-_', '+/')
def is_valid_base64(text):
"""Test if input text can be base64 decoded.
:param text: input base64 text
:type text: string
:returns: bool -- True if text can be decoded as base64, False otherwise
"""
text = filter_formatting(text)
if base64_non_alphabet_re.search(text):
return False
try:
return base64_is_padded(text)
except InvalidBase64Error:
return False
def is_valid_base64url(text):
"""Test if input text can be base64url decoded.
:param text: input base64 text
:type text: string
:returns: bool -- True if text can be decoded as base64url,
False otherwise
"""
text = filter_formatting(text)
if base64url_non_alphabet_re.search(text):
return False
try:
return base64_is_padded(text)
except InvalidBase64Error:
return False
def filter_formatting(text):
"""Return base64 text without any formatting, just the base64.
Base64 text is often formatted with whitespace, line endings,
etc. This function strips out any formatting, the result will
contain only base64 characters.
Note, this function does not filter out all non-base64 alphabet
characters, it only removes characters used for formatting.
:param text: input text to filter
:type text: string
:returns: string -- filtered text without formatting
"""
return _strip_formatting_re.sub('', text)
def base64_to_base64url(text):
"""Convert base64 text to base64url text.
base64url text is designed to be safe for use in filenames and
URL's. It is defined in RFC-4648 Section 5.
base64url differs from base64 in the last two alphabet characters
at index 62 and 63, these are sometimes referred as the
altchars. The '+' character at index 62 is replaced by '-'
(hyphen) and the '/' character at index 63 is replaced by '_'
(underscore).
This function only translates the altchars, non-alphabet
characters are not filtered out.
WARNING::
base64url continues to use the '=' pad character which is NOT URL
safe. RFC-4648 suggests two alternate methods to deal with this:
percent-encode
percent-encode the pad character (e.g. '=' becomes
'%3D'). This makes the base64url text fully safe. But
percent-encoding has the downside of requiring
percent-decoding prior to feeding the base64url text into a
base64url decoder since most base64url decoders do not
recognize %3D as a pad character and most decoders require
correct padding.
no-padding
padding is not strictly necessary to decode base64 or
base64url text, the pad can be computed from the input text
length. However many decoders demand padding and will consider
non-padded text to be malformed. If one wants to omit the
trailing pad character(s) for use in URL's it can be added back
using the base64_assure_padding() function.
This function makes no decisions about which padding methodology to
use. One can either call base64_strip_padding() to remove any pad
characters (restoring later with base64_assure_padding()) or call
base64url_percent_encode() to percent-encode the pad characters.
:param text: input base64 text
:type text: string
:returns: string -- base64url text
"""
return text.translate(_base64_to_base64url_trans)
def base64url_to_base64(text):
"""Convert base64url text to base64 text.
See base64_to_base64url() for a description of base64url text and
it's issues.
This function does NOT handle percent-encoded pad characters, they
will be left intact. If the input base64url text is
percent-encoded you should call
:param text: text in base64url alphabet
:type text: string
:returns: string -- text in base64 alphabet
"""
return text.translate(_base64url_to_base64_trans)
def base64_is_padded(text, pad='='):
"""Test if the text is base64 padded.
The input text must be in a base64 alphabet. The pad must be a
single character. If the text has been percent-encoded (e.g. pad
is the string '%3D') you must convert the text back to a base64
alphabet (e.g. if percent-encoded use the function
base64url_percent_decode()).
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param pad: pad character (must be single character) (default: '=')
:type pad: string
:returns: bool -- True if padded, False otherwise
:raises: ValueError, InvalidBase64Error
"""
if len(pad) != 1:
raise ValueError(_('pad must be single character'))
text_len = len(text)
if text_len > 0 and text_len % 4 == 0:
pad_index = text.find(pad)
if pad_index >= 0 and pad_index < text_len - 2:
raise InvalidBase64Error(_('text is multiple of 4, '
'but pad "%s" occurs before '
'2nd to last char') % pad)
if pad_index == text_len - 2 and text[-1] != pad:
raise InvalidBase64Error(_('text is multiple of 4, '
'but pad "%s" occurs before '
'non-pad last char') % pad)
return True
if text.find(pad) >= 0:
raise InvalidBase64Error(_('text is not a multiple of 4, '
'but contains pad "%s"') % pad)
return False
def base64url_percent_encode(text):
"""Percent-encode base64url padding.
The input text should only contain base64url alphabet
characters. Any non-base64url alphabet characters will also be
subject to percent-encoding.
:param text: text containing ONLY characters in the base64url alphabet
:type text: string
:returns: string -- percent-encoded base64url text
:raises: InvalidBase64Error
"""
if len(text) % 4 != 0:
raise InvalidBase64Error(_('padded base64url text must be '
'multiple of 4 characters'))
return urllib.parse.quote(text)
def base64url_percent_decode(text):
"""Percent-decode base64url padding.
The input text should only contain base64url alphabet
characters and the percent-encoded pad character. Any other
percent-encoded characters will be subject to percent-decoding.
:param text: base64url alphabet text
:type text: string
:returns: string -- percent-decoded base64url text
"""
decoded_text = urllib.parse.unquote(text)
if len(decoded_text) % 4 != 0:
raise InvalidBase64Error(_('padded base64url text must be '
'multiple of 4 characters'))
return decoded_text
def base64_strip_padding(text, pad='='):
"""Remove padding from input base64 text.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param pad: pad character (must be single character) (default: '=')
:type pad: string
:returns: string -- base64 text without padding
:raises: ValueError
"""
if len(pad) != 1:
raise ValueError(_('pad must be single character'))
# Can't be padded if text is less than 4 characters.
if len(text) < 4:
return text
if text[-1] == pad:
if text[-2] == pad:
return text[0:-2]
else:
return text[0:-1]
else:
return text
def base64_assure_padding(text, pad='='):
"""Assure the input text ends with padding.
Base64 text is normally expected to be a multiple of 4
characters. Each 4 character base64 sequence produces 3 octets of
binary data. If the binary data is not a multiple of 3 the base64
text is padded at the end with a pad character such that it is
always a multiple of 4. Padding is ignored and does not alter the
binary data nor it's length.
In some circumstances it is desirable to omit the padding
character due to transport encoding conflicts. Base64 text can
still be correctly decoded if the length of the base64 text
(consisting only of characters in the desired base64 alphabet) is
known, padding is not absolutely necessary.
Some base64 decoders demand correct padding or one may wish to
format RFC compliant base64, this function performs this action.
Input is assumed to consist only of members of a base64
alphabet (i.e no whitespace). Iteration yields a sequence of lines.
The line does NOT terminate with a line ending.
Use the filter_formatting() function to assure the input text
contains only the members of the alphabet.
If the text ends with the pad it is assumed to already be
padded. Otherwise the binary length is computed from the input
text length and correct number of pad characters are appended.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param pad: pad character (must be single character) (default: '=')
:type pad: string
:returns: string -- input base64 text with padding
:raises: ValueError
"""
if len(pad) != 1:
raise ValueError(_('pad must be single character'))
if text.endswith(pad):
return text
n = len(text) % 4
if n == 0:
return text
n = 4 - n
padding = pad * n
return text + padding
def base64_wrap_iter(text, width=64):
"""Fold text into lines of text with max line length.
Input is assumed to consist only of members of a base64
alphabet (i.e no whitespace). Iteration yields a sequence of lines.
The line does NOT terminate with a line ending.
Use the filter_formatting() function to assure the input text
contains only the members of the alphabet.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param width: number of characters in each wrapped line (default: 64)
:type width: int
:returns: generator -- sequence of lines of base64 text.
"""
text = six.text_type(text)
for x in six.moves.range(0, len(text), width):
yield text[x:x + width]
def base64_wrap(text, width=64):
"""Fold text into lines of text with max line length.
Input is assumed to consist only of members of a base64
alphabet (i.e no whitespace). Fold the text into lines whose
line length is width chars long, terminate each line with line
ending (default is '\\n'). Return the wrapped text as a single
string.
Use the filter_formatting() function to assure the input text
contains only the members of the alphabet.
:param text: text containing ONLY characters in a base64 alphabet
:type text: string
:param width: number of characters in each wrapped line (default: 64)
:type width: int
:returns: string -- wrapped text.
"""
buf = six.StringIO()
for line in base64_wrap_iter(text, width):
buf.write(line)
buf.write(u'\n')
text = buf.getvalue()
buf.close()
return text
|
sajeeshcs/nested_projects_keystone
|
keystone/common/base64utils.py
|
Python
|
apache-2.0
| 13,107
|
#!/usr/bin/env python3
import argparse
import random
import time
def bin(number):
return "{0:5b}".format(number).replace(' ','0')
def initialize(population):
return [bin(random.randint(0,31)) for x in range(0, population)]
def evaluate(population):
tuples = []
suma = 0
end = False
for chaval in population:
value = int(chaval, 2)
y = value**2
tuples.append((value, y, 0))
suma += y
if value == 31:
end = True
return tuples, suma, end
def generate_prob(population,suma):
tuples = []
for chaval in population:
probability = round(chaval[1] / suma,2)
tuples.append((chaval[0], chaval[1], probability))
return tuples
def ruleta(population):
random.shuffle(population)
random.shuffle(population)
rand_num = random.randint(1,100)
try:
rand_inv = 1 / rand_num
except ZeroDivisionError:
rand_inv = 0
#print("random_ruleta: %f" % rand_inv)
suma = 0
chaval = population[-1]
for idx,chaval in enumerate(population):
suma += chaval[2]
if rand_inv <= suma:
break
return chaval
def crossover(mom,dad):
point = random.randint(0,4)
mom_bin = bin(mom[0])
dad_bin = bin(dad[0])
f_half_dad = dad_bin[:point]
s_half_dad = dad_bin[point:]
f_half_mom = mom_bin[:point]
s_half_mom = mom_bin[point:]
child_1 = f_half_mom + s_half_dad
child_2 = f_half_dad + s_half_mom
return child_1,child_2
def main(ngenerations):
initial = initialize(4)
evaluated,suma,end = evaluate(initial)
evaluated_with_p = generate_prob(evaluated,suma)
generations = {}
last_generation = 0
for x in range(0, ngenerations):
last_generation += 1
try:
generations[str(initial)] += 1
except KeyError:
generations[str(initial)] = 1
child_1,child_2 = crossover(ruleta(evaluated_with_p),ruleta(evaluated_with_p))
child_3,child_4 = crossover(ruleta(evaluated_with_p),ruleta(evaluated_with_p))
initial = [child_1, child_2,
child_3, child_4]
evaluated,suma,end = evaluate(initial)
evaluated_with_p = generate_prob(evaluated,suma)
if end:
break
print("Last Generation: #%d" % last_generation)
for child in evaluated_with_p:
print(child)
for generation in generations.items():
print(generation)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', "--generations", help="Generations", type=int)
args = parser.parse_args()
if args.generations:
main(args.generations)
else:
parser.print_help()
|
VictorRodriguez/personal
|
ec-ea/practices/pract2/sga.py
|
Python
|
apache-2.0
| 2,812
|
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
row = len(grid)
col = len(grid[0])
dp = [[0]*col for i in range(row)]
minPath = 0
return self.findPath(grid, row-1, col-1, dp)
def findPath(self, grid, i, j, dp):
#print(i,j, minPath)
if dp[i][j]:
return dp[i][j]
if i == 0 and j == 0:
dp[i][j] = grid[i][j]
return grid[i][j]
elif i == 0:
dp[i][j] = grid[i][j] + self.findPath(grid, i,max(0,j-1), dp)
return dp[i][j] #element to left
elif j == 0:
return self.findPath(grid, max(0,i-1),j, dp)+grid[i][j] #element on top
else:
a = self.findPath(grid, i,max(0,j-1), dp)+grid[i][j] #element to left
b = self.findPath(grid, max(0,i-1),j, dp)+grid[i][j] #element on top
dp[i][j] = min(a,b)
return dp[i][j]
|
saisankargochhayat/algo_quest
|
leetcode/64.MinSumPath/soln.py
|
Python
|
apache-2.0
| 951
|
# Copyright 2017 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import pecan
import wsme
from wsme import types as wtypes
from pecan import rest
class APIBase(wtypes.Base):
created_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is created"""
updated_at = wsme.wsattr(datetime.datetime, readonly=True)
"""The time in UTC at which the object is updated"""
def as_dict(self):
"""Render this object as a dict of its fields."""
return dict((k, getattr(self, k))
for k in self.fields
if hasattr(self, k) and getattr(self, k) != wsme.Unset)
class CyborgController(rest.RestController):
def _handle_patch(self, method, remainder, request=None):
"""Routes ``PATCH`` _custom_actions."""
# route to a patch_all or get if no additional parts are available
if not remainder or remainder == ['']:
controller = self._find_controller('patch_all', 'patch')
if controller:
return controller, []
pecan.abort(404)
controller = getattr(self, remainder[0], None)
if controller and not inspect.ismethod(controller):
return pecan.routing.lookup_controller(controller, remainder[1:])
# route to custom_action
match = self._handle_custom_action(method, remainder, request)
if match:
return match
# finally, check for the regular patch_one/patch requests
controller = self._find_controller('patch_one', 'patch')
if controller:
return controller, remainder
pecan.abort(405)
|
openstack/nomad
|
cyborg/api/controllers/base.py
|
Python
|
apache-2.0
| 2,260
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Formatter for ASL securityd log file."""
from plaso.formatters import interface
class MacSecuritydLogFormatter(interface.ConditionalEventFormatter):
"""Formatter for ASL Securityd file."""
DATA_TYPE = 'mac:asl:securityd:line'
FORMAT_STRING_PIECES = [
u'Sender: {sender}',
u'({sender_pid})',
u'Level: {level}',
u'Facility: {facility}',
u'Text: {message}']
FORMAT_STRING_SHORT_PIECES = [u'Text: {message}']
SOURCE_LONG = 'Mac ASL Securityd Log'
SOURCE_SHORT = 'LOG'
|
cvandeplas/plaso
|
plaso/formatters/mac_securityd.py
|
Python
|
apache-2.0
| 1,215
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import py4j
class CapturedException(Exception):
def __init__(self, desc, stackTrace):
self.desc = desc
self.stackTrace = stackTrace
def __str__(self):
return repr(self.desc)
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
def capture_sql_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
s = e.java_exception.toString()
stackTrace = '\n\t at '.join(map(lambda x: x.toString(),
e.java_exception.getStackTrace()))
if s.startswith('org.apache.spark.sql.AnalysisException: '):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.analysis'):
raise AnalysisException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.catalyst.parser.ParseException: '):
raise ParseException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.streaming.StreamingQueryException: '):
raise StreamingQueryException(s.split(': ', 1)[1], stackTrace)
if s.startswith('org.apache.spark.sql.execution.QueryExecutionException: '):
raise QueryExecutionException(s.split(': ', 1)[1], stackTrace)
if s.startswith('java.lang.IllegalArgumentException: '):
raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
raise
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway, jtype, arr):
"""
Convert python list to java type array
:param gateway: Py4j Gateway
:param jtype: java type of element in array
:param arr: python type list
"""
jarr = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarr[i] = arr[i]
return jarr
|
wangyixiaohuihui/spark2-annotation
|
python/pyspark/sql/utils.py
|
Python
|
apache-2.0
| 4,112
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import os
import sqlite3
import sys
from collections import OrderedDict, defaultdict
from functools import wraps
from warnings import warn
import numpy as np
import pyproj
import regex
from cf_units import Unit
from compliance_checker import cfutil
from compliance_checker.base import BaseCheck, BaseNCCheck, Result, TestCtx
from compliance_checker.cf import util
from compliance_checker.cf.appendix_d import (
dimless_vertical_coordinates_1_6,
dimless_vertical_coordinates_1_7,
no_missing_terms,
)
from compliance_checker.cf.appendix_e import cell_methods16, cell_methods17
from compliance_checker.cf.appendix_f import (
ellipsoid_names17,
grid_mapping_attr_types16,
grid_mapping_attr_types17,
grid_mapping_dict16,
grid_mapping_dict17,
horizontal_datum_names17,
prime_meridian_names17,
)
logger = logging.getLogger(__name__)
def print_exceptions(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
from traceback import print_exc
print_exc()
return wrapper
# helper to see if we should do DSG tests
def is_likely_dsg(func):
@wraps(func)
def _dec(s, ds):
if hasattr(ds, "featureType"):
return func(s, ds)
# @TODO: skips if we have formalized skips
return None
return _dec
class CFBaseCheck(BaseCheck):
"""
CF Convention Checker Base
"""
def __init__(self, options=None):
# The compliance checker can be run on multiple datasets in a single
# instantiation, so caching values has be done by the unique identifier
# for each dataset loaded.
# Each default dict is a key, value mapping from the dataset object to
# a list of variables
super(CFBaseCheck, self).__init__(options)
self._coord_vars = defaultdict(list)
self._ancillary_vars = defaultdict(list)
self._clim_vars = defaultdict(list)
self._metadata_vars = defaultdict(list)
self._boundary_vars = defaultdict(list)
self._geophysical_vars = defaultdict(list)
self._aux_coords = defaultdict(list)
self._std_names = util.StandardNameTable()
self.section_titles = { # dict of section headers shared by grouped checks
"2.2": "§2.2 Data Types",
"2.3": "§2.3 Naming Conventions",
"2.4": "§2.4 Dimensions",
"2.5": "§2.5 Variables",
"2.6": "§2.6 Attributes",
"3.1": "§3.1 Units",
"3.2": "§3.2 Long Name",
"3.3": "§3.3 Standard Name",
"3.4": "§3.4 Ancillary Data",
"3.5": "§3.5 Flags",
"4": "§4 Coordinate Types",
"4.1": "§4.1 Latitude Coordinate",
"4.2": "§4.2 Longitude Coordinate",
"4.3": "§4.3 Vertical Coordinate",
"4.4": "§4.4 Time Coordinate",
"4.5": "§4.5 Discrete Axis",
"5": "§5 Coordinate Systems",
"5.1": "§5.1 Independent Latitude, Longitude, Vertical, and Time Axes",
"5.2": "§5.2 2-D Latitude, Longitude, Coordinate Variables",
"5.3": "§5.3 Reduced Horizontal Grid",
"5.4": "§5.4 Timeseries of Station Data",
"5.5": "§5.5 Trajectories",
"5.6": "§5.6 Horizontal Coordinate Reference Systems, Grid Mappings, Projections",
"5.7": "§5.7 Scalar Coordinate Variables",
"6.1": "§6.1 Labels",
"6.2": "§6.2 Alternative Coordinates",
"7.1": "§7.1 Cell Boundaries",
"7.2": "§7.2 Cell Measures",
"7.3": "§7.3 Cell Methods",
"7.4": "§7.4 Climatological Statistics",
"8.1": "§8.1 Packed Data",
"8.2": "§8.2 Compression by Gathering",
"9.1": "§9.1 Features and feature types",
"9.2": "§9.2 Collections, instances, and elements",
"9.3": "§9.3 Representations of Collections of features in data variables",
"9.4": "§9.4 The featureType attribute",
"9.5": "§9.5 Coordinates and metadata",
"9.6": "§9.6 Missing Data",
}
################################################################################
# Helper Methods - var classifications, etc
################################################################################
def setup(self, ds):
"""
Initialize various special variable types within the class.
Mutates a number of instance variables.
:param netCDF4.Dataset ds: An open netCDF dataset
"""
self.coord_vars = self._find_coord_vars(ds)
self._find_aux_coord_vars(ds)
self._find_ancillary_vars(ds)
self._find_clim_vars(ds)
self._find_boundary_vars(ds)
self._find_metadata_vars(ds)
self._find_cf_standard_name_table(ds)
self._find_geophysical_vars(ds)
coord_containing_vars = ds.get_variables_by_attributes(
coordinates=lambda val: isinstance(val, str)
)
# coordinate data variables
# Excerpt from "§1.3 Overview" on coordinate data
# There are two methods used to identify variables that contain
# coordinate data. The first is to use the NUG-defined "coordinate
# variables." The use of coordinate variables is required for all
# dimensions that correspond to one dimensional space or time
# coordinates . In cases where coordinate variables are not applicable,
# the variables containing coordinate data are identified by the
# coordinates attribute.
# first read in variables referred to in coordinates which exist
# in the dataset
self.coord_data_vars = set()
for var in coord_containing_vars:
for coord_var_name in var.coordinates.strip().split(" "):
if coord_var_name in ds.variables:
self.coord_data_vars.add(coord_var_name)
# then add in the NUG coordinate variables -- single dimension with
# dimension name the same as coordinates
self.coord_data_vars.update(self.coord_vars)
def check_grid_mapping(self, ds):
"""
5.6 When the coordinate variables for a horizontal grid are not
longitude and latitude, it is required that the true latitude and
longitude coordinates be supplied via the coordinates attribute. If in
addition it is desired to describe the mapping between the given
coordinate variables and the true latitude and longitude coordinates,
the attribute grid_mapping may be used to supply this description.
This attribute is attached to data variables so that variables with
different mappings may be present in a single file. The attribute takes
a string value which is the name of another variable in the file that
provides the description of the mapping via a collection of attached
attributes. This variable is called a grid mapping variable and is of
arbitrary type since it contains no data. Its purpose is to act as a
container for the attributes that define the mapping.
The one attribute that all grid mapping variables must have is
grid_mapping_name which takes a string value that contains the mapping's
name. The other attributes that define a specific mapping depend on the
value of grid_mapping_name. The valid values of grid_mapping_name along
with the attributes that provide specific map parameter values are
described in Appendix F, Grid Mappings.
When the coordinate variables for a horizontal grid are longitude and
latitude, a grid mapping variable with grid_mapping_name of
latitude_longitude may be used to specify the ellipsoid and prime
meridian.
In order to make use of a grid mapping to directly calculate latitude
and longitude values it is necessary to associate the coordinate
variables with the independent variables of the mapping. This is done by
assigning a standard_name to the coordinate variable. The appropriate
values of the standard_name depend on the grid mapping and are given in
Appendix F, Grid Mappings.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = OrderedDict()
grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)
# Check the grid_mapping attribute to be a non-empty string and that its reference exists
for variable in ds.get_variables_by_attributes(
grid_mapping=lambda x: x is not None
):
grid_mapping = getattr(variable, "grid_mapping", None)
defines_grid_mapping = self.get_test_ctx(
BaseCheck.HIGH, self.section_titles["5.6"], variable.name
)
defines_grid_mapping.assert_true(
(isinstance(grid_mapping, str) and grid_mapping),
"{}'s grid_mapping attribute must be a "
+ "space-separated non-empty string".format(variable.name),
)
if isinstance(grid_mapping, str):
# TODO (badams): refactor functionality to split functionality
# into requisite classes
if ":" in grid_mapping and self._cc_spec_version >= "1.7":
colon_count = grid_mapping.count(":")
re_all = regex.findall(
r"(\w+):\s*((?:\w+\s+)*(?:\w+)(?![\w:]))", grid_mapping
)
if colon_count != len(re_all):
defines_grid_mapping.out_of += 1
defines_grid_mapping.messages.append(
"Could not consume entire grid_mapping expression, please check for well-formedness"
)
else:
for grid_var_name, coord_var_str in re_all:
defines_grid_mapping.assert_true(
grid_var_name in ds.variables,
"grid mapping variable {} must exist in this dataset".format(
grid_var_name
),
)
for ref_var in coord_var_str.split():
defines_grid_mapping.assert_true(
ref_var in ds.variables,
"Coordinate-related variable {} referenced by grid_mapping variable {} must exist in this dataset".format(
ref_var, grid_var_name
),
)
else:
for grid_var_name in grid_mapping.split():
defines_grid_mapping.assert_true(
grid_var_name in ds.variables,
"grid mapping variable {} must exist in this dataset".format(
grid_var_name
),
)
ret_val[variable.name] = defines_grid_mapping.to_result()
# Check the grid mapping variables themselves
for grid_var_name in grid_mapping_variables:
valid_grid_mapping = self.get_test_ctx(
BaseCheck.HIGH, self.section_titles["5.6"], grid_var_name
)
grid_var = ds.variables[grid_var_name]
grid_mapping_name = getattr(grid_var, "grid_mapping_name", None)
# Grid mapping name must be in appendix F
valid_grid_mapping.assert_true(
grid_mapping_name in self.grid_mapping_dict,
"{} is not a valid grid_mapping_name.".format(grid_mapping_name)
+ " See Appendix F for valid grid mappings",
)
# The self.grid_mapping_dict has a values of:
# - required attributes
# - optional attributes (can't check)
# - required standard_names defined
# - at least one of these attributes must be defined
# We can't do any of the other grid mapping checks if it's not a valid grid mapping name
if grid_mapping_name not in self.grid_mapping_dict:
ret_val[grid_mapping_name] = valid_grid_mapping.to_result()
continue
grid_mapping = self.grid_mapping_dict[grid_mapping_name]
required_attrs = grid_mapping[0]
# Make sure all the required attributes are defined
for req in required_attrs:
valid_grid_mapping.assert_true(
hasattr(grid_var, req),
"{} is a required attribute for grid mapping {}".format(
req, grid_mapping_name
),
)
# Make sure that exactly one of the exclusive attributes exist
if len(grid_mapping) == 4:
at_least_attr = grid_mapping[3]
number_found = 0
for attr in at_least_attr:
if hasattr(grid_var, attr):
number_found += 1
valid_grid_mapping.assert_true(
number_found == 1,
"grid mapping {}".format(grid_mapping_name)
+ "must define exactly one of these attributes: "
+ "{}".format(" or ".join(at_least_attr)),
)
# Make sure that exactly one variable is defined for each of the required standard_names
expected_std_names = grid_mapping[2]
for expected_std_name in expected_std_names:
found_vars = ds.get_variables_by_attributes(
standard_name=expected_std_name
)
valid_grid_mapping.assert_true(
len(found_vars) == 1,
"grid mapping {} requires exactly ".format(grid_mapping_name)
+ "one variable with standard_name "
+ "{} to be defined".format(expected_std_name),
)
ret_val[grid_var_name] = valid_grid_mapping.to_result()
return ret_val
def check_conventions_version(self, ds):
"""
CF §2.6.1 the NUG defined global attribute Conventions to the string
value "CF-<version_number>"; check the Conventions attribute contains
the appropriate string.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
valid = False
reasoning = []
correct_version_string = "{}-{}".format(
self._cc_spec, self._cc_spec_version
).upper()
if hasattr(ds, "Conventions"):
conventions = regex.split(r",|\s+", getattr(ds, "Conventions", ""))
for convention in conventions:
if convention == correct_version_string:
valid = True
break
else:
reasoning = [
"§2.6.1 Conventions global attribute does not contain "
'"{}"'.format(correct_version_string)
]
else:
valid = False
reasoning = ["§2.6.1 Conventions field is not present"]
return Result(
BaseCheck.MEDIUM, valid, self.section_titles["2.6"], msgs=reasoning
)
def _check_dimensionless_vertical_coordinates(
self,
ds,
deprecated_units,
version_specific_check,
version_specific_dimless_vertical_coord_dict,
):
"""
Check the validity of dimensionless coordinates under CF
:param netCDF4.Dataset ds: An open netCDF dataset
:param list deprecated_units: list of string names of deprecated units
:param function version_specific_check: version-specific implementation to check dimensionless vertical coord
:param dict version_specific_dimless_coord_dict: version-specific dict of dimensionless vertical coords and computed standard names
:return: List of results
"""
ret_val = []
z_variables = cfutil.get_z_variables(ds)
# call version-specific implementation
for name in z_variables:
version_specific_check(
ds,
name,
deprecated_units,
ret_val,
version_specific_dimless_vertical_coord_dict,
)
return ret_val
def _check_formula_terms(self, ds, coord, dimless_coords_dict):
"""
Checks a dimensionless vertical coordinate contains valid formula_terms
- formula_terms is a non-empty string
- formula_terms matches regdimless_coords_dictx
- every variable defined in formula_terms exists
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
variable = ds.variables[coord]
standard_name = getattr(variable, "standard_name", None)
formula_terms = getattr(variable, "formula_terms", None)
valid_formula_terms = TestCtx(BaseCheck.HIGH, self.section_titles["4.3"])
valid_formula_terms.assert_true(
isinstance(formula_terms, str) and formula_terms,
"§4.3.2: {}'s formula_terms is a required attribute and must be a non-empty string"
"".format(coord),
)
# We can't check any more
if not formula_terms:
return valid_formula_terms.to_result()
# check that the formula_terms are well formed and are present
# The pattern for formula terms is always component: variable_name
# the regex grouping always has component names in even positions and
# the corresponding variable name in odd positions.
matches = regex.findall(
r"([A-Za-z][A-Za-z0-9_]*: )([A-Za-z][A-Za-z0-9_]*)", variable.formula_terms
)
terms = set(m[0][:-2] for m in matches)
# get the variables named in the formula terms and check if any
# are not present in the dataset
missing_vars = sorted(set(m[1] for m in matches) - set(ds.variables))
missing_fmt = "The following variable(s) referenced in {}:formula_terms are not present in the dataset: {}"
valid_formula_terms.assert_true(
len(missing_vars) == 0, missing_fmt.format(coord, ", ".join(missing_vars))
)
# try to reconstruct formula_terms by adding space in between the regex
# matches. If it doesn't exactly match the original, the formatting
# of the attribute is incorrect
reconstructed_formula = " ".join(m[0] + m[1] for m in matches)
valid_formula_terms.assert_true(
reconstructed_formula == formula_terms,
"Attribute formula_terms is not well-formed",
)
valid_formula_terms.assert_true(
standard_name in dimless_coords_dict,
"unknown standard_name '{}' for dimensionless vertical coordinate {}"
"".format(standard_name, coord),
)
if standard_name not in dimless_coords_dict:
return valid_formula_terms.to_result()
valid_formula_terms.assert_true(
no_missing_terms(standard_name, terms, dimless_coords_dict),
"{}'s formula_terms are invalid for {}, please see appendix D of CF 1.6"
"".format(coord, standard_name),
)
return valid_formula_terms.to_result()
def _check_grid_mapping_attr_condition(self, attr, attr_name, ret_val):
"""
Evaluate a condition (or series of conditions) for a particular
attribute. Designed to be overloaded in subclass implementations.
:param attr: attribute to teset condition for
:param str attr_name: name of the attribute
:param list ret_val: list of results to append to
:rtype None
:return None
"""
raise NotImplementedError
def _dims_in_order(self, dimension_order):
"""
:param list dimension_order: A list of axes
:rtype: bool
:return: Returns True if the dimensions are in order U*, T, Z, Y, X,
False otherwise
"""
regx = regex.compile(r"^[^TZYX]*T?Z?Y?X?$")
dimension_string = "".join(dimension_order)
return regx.match(dimension_string) is not None
def _parent_var_attr_type_check(self, attr_name, var, ctx):
"""
Checks that an attribute has an equivalent value to a parent variable.
Takes an attribute name, variable, and test context on which to operate.
:param str attr_name: The name of the attribute to be checked
:param netCDF4.Variable var: The variable against which to be checked
:param compliance_checker.base.TestCtx ctx: The associated test context to modify
:rtype None
:return None
"""
attr_val = var.getncattr(attr_name)
if isinstance(attr_val, (str, bytes)):
type_match = (var.dtype is str) or (var.dtype.kind == "S")
val_type = type(attr_val)
else:
val_type = attr_val.dtype.type
type_match = val_type == var.dtype.type
ctx.assert_true(
type_match,
"Attribute '{}' (type: {}) and parent variable '{}' (type: {}) "
"must have equivalent datatypes".format(
attr_name, val_type, var.name, var.dtype.type
),
)
def _find_aux_coord_vars(self, ds, refresh=False):
"""
Returns a list of auxiliary coordinate variables
An auxiliary coordinate variable is any netCDF variable that contains
coordinate data, but is not a coordinate variable (in the sense of the term
defined by CF).
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: List of variable names (str) that are defined to be auxiliary
coordinate variables.
"""
if self._aux_coords.get(ds, None) and refresh is False:
return self._aux_coords[ds]
self._aux_coords[ds] = cfutil.get_auxiliary_coordinate_variables(ds)
return self._aux_coords[ds]
def _find_boundary_vars(self, ds, refresh=False):
"""
Returns dictionary of boundary variables mapping the variable instance
to the name of the variable acting as a boundary variable.
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: A list containing strings with boundary variable names.
"""
if self._boundary_vars.get(ds, None) and refresh is False:
return self._boundary_vars[ds]
self._boundary_vars[ds] = cfutil.get_cell_boundary_variables(ds)
return self._boundary_vars[ds]
def _find_ancillary_vars(self, ds, refresh=False):
"""
Returns a list of variable names that are defined as ancillary
variables in the dataset ds.
An ancillary variable generally is a metadata container and referenced
from other variables via a string reference in an attribute.
- via ancillary_variables (3.4)
- "grid mapping var" (5.6)
- TODO: more?
The result is cached by the passed in dataset object inside of this
checker. Pass refresh=True to redo the cached value.
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: List of variable names (str) that are defined as ancillary
variables in the dataset ds.
"""
# Used the cached version if it exists and is not empty
if self._ancillary_vars.get(ds, None) and refresh is False:
return self._ancillary_vars[ds]
# Invalidate the cache at all costs
self._ancillary_vars[ds] = []
for name, var in ds.variables.items():
if hasattr(var, "ancillary_variables"):
for anc_name in var.ancillary_variables.split(" "):
if anc_name in ds.variables:
self._ancillary_vars[ds].append(anc_name)
if hasattr(var, "grid_mapping"):
gm_name = var.grid_mapping
if gm_name in ds.variables:
self._ancillary_vars[ds].append(gm_name)
return self._ancillary_vars[ds]
def _find_clim_vars(self, ds, refresh=False):
"""
Returns a list of variables that are likely to be climatology variables based on CF §7.4
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: A list containing strings with geophysical variable
names.
"""
if self._clim_vars.get(ds, None) and refresh is False:
return self._clim_vars[ds]
climatology_variable = cfutil.get_climatology_variable(ds)
if climatology_variable:
self._clim_vars[ds].append(climatology_variable)
return self._clim_vars[ds]
def _find_cf_standard_name_table(self, ds):
"""
Parse out the `standard_name_vocabulary` attribute and download that
version of the cf standard name table. If the standard name table has
already been downloaded, use the cached version. Modifies `_std_names`
attribute to store standard names. Returns True if the file exists and
False if it fails to download.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: bool
"""
# Get the standard name vocab
standard_name_vocabulary = getattr(ds, "standard_name_vocabulary", "")
# Try to parse this attribute to get version
version = None
try:
if "cf standard name table" in standard_name_vocabulary.lower():
version = [
s.strip("(").strip(")").strip("v").strip(",")
for s in standard_name_vocabulary.split()
]
# This assumes that table version number won't start with 0.
version = [
s
for s in version
if s.isdigit() and len(s) <= 2 and not s.startswith("0")
]
if len(version) > 1:
return False
else:
try:
version = version[0]
except IndexError:
warn(
"Cannot extract CF standard name version number "
"from standard_name_vocabulary string"
)
return False
else:
# Can't parse the attribute, use the packaged version
return False
# usually raised from .lower() with an incompatible (non-string)
# data type
except AttributeError:
warn(
"Cannot convert standard name table to lowercase. This can "
"occur if a non-string standard_name_vocabulary global "
"attribute is supplied"
)
return False
if version.startswith("v"): # i.e 'v34' -> '34' drop the v
version = version[1:]
# If the packaged version is what we're after, then we're good
if version == self._std_names._version:
print(
"Using packaged standard name table v{0}".format(version),
file=sys.stderr,
)
return False
# Try to download the version specified
try:
data_directory = util.create_cached_data_dir()
location = os.path.join(
data_directory, "cf-standard-name-table-test-{0}.xml".format(version)
)
# Did we already download this before?
if not os.path.isfile(location):
util.download_cf_standard_name_table(version, location)
print(
"Using downloaded standard name table v{0}".format(version),
file=sys.stderr,
)
else:
print(
"Using cached standard name table v{0} from {1}".format(
version, location
),
file=sys.stderr,
)
self._std_names = util.StandardNameTable(location)
return True
except Exception as e:
# There was an error downloading the CF table. That's ok, we'll just use the packaged version
warn(
"Problem fetching standard name table:\n{0}\n"
"Using packaged v{1}".format(e, self._std_names._version)
)
return False
def _find_coord_vars(self, ds, refresh=False):
"""
Returns a list of variable names that identify as coordinate variables.
The result is cached by the passed in dataset object inside of this
checker. Pass refresh=True to redo the cached value.
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: A list of variables names (str) that are defined as coordinate
variables in the dataset ds.
"""
if ds in self._coord_vars and refresh is False:
return self._coord_vars[ds]
self._coord_vars[ds] = cfutil.get_coordinate_variables(ds)
return self._coord_vars[ds]
def _find_geophysical_vars(self, ds, refresh=False):
"""
Returns a list of geophysical variables. Modifies
`self._geophysical_vars`
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: A list containing strings with geophysical variable
names.
"""
if self._geophysical_vars.get(ds, None) and refresh is False:
return self._geophysical_vars[ds]
self._geophysical_vars[ds] = cfutil.get_geophysical_variables(ds)
return self._geophysical_vars[ds]
def _find_metadata_vars(self, ds, refresh=False):
"""
Returns a list of netCDF variable instances for those that are likely metadata variables
:param netCDF4.Dataset ds: An open netCDF dataset
:param bool refresh: if refresh is set to True, the cache is
invalidated.
:rtype: list
:return: List of variable names (str) that are likely metadata
variable candidates.
"""
if self._metadata_vars.get(ds, None) and refresh is False:
return self._metadata_vars[ds]
self._metadata_vars[ds] = []
for name, var in ds.variables.items():
if name in self._find_ancillary_vars(ds) or name in self._find_coord_vars(
ds
):
continue
if name in (
"platform_name",
"station_name",
"instrument_name",
"station_id",
"platform_id",
"surface_altitude",
):
self._metadata_vars[ds].append(name)
elif getattr(var, "cf_role", "") != "":
self._metadata_vars[ds].append(name)
elif (
getattr(var, "standard_name", None) is None and len(var.dimensions) == 0
):
self._metadata_vars[ds].append(name)
return self._metadata_vars[ds]
def _get_coord_axis_map(self, ds):
"""
Returns a dictionary mapping each coordinate to a letter identifier
describing the _kind_ of coordinate.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: dict
:return: A dictionary with variable names mapped to axis abbreviations,
i.e. {'longitude': 'X', ... 'pressure': 'Z'}
"""
expected = ["T", "Z", "Y", "X"]
coord_vars = self._find_coord_vars(ds)
coord_axis_map = {}
# L - Unlimited Coordinates
# T - Time coordinates
# Z - Depth/Altitude Coordinate
# Y - Y-Coordinate (latitude)
# X - X-Coordinate (longitude)
# A - Auxiliary Coordinate
# I - Instance Coordinate
time_variables = cfutil.get_time_variables(ds)
lat_variables = cfutil.get_latitude_variables(ds)
lon_variables = cfutil.get_longitude_variables(ds)
z_variables = cfutil.get_z_variables(ds)
for coord_name in coord_vars:
coord_var = ds.variables[coord_name]
axis = getattr(coord_var, "axis", None)
standard_name = getattr(coord_var, "standard_name", None)
# Unlimited dimensions must come first
if ds.dimensions[coord_name].isunlimited():
coord_axis_map[coord_name] = "L"
# axis takes precedence over standard_name
elif axis in expected:
coord_axis_map[coord_name] = axis
elif standard_name == "time":
coord_axis_map[coord_name] = "T"
elif standard_name == "longitude":
coord_axis_map[coord_name] = "X"
elif standard_name == "latitude":
coord_axis_map[coord_name] = "Y"
elif standard_name in ["height", "depth", "altitude"]:
coord_axis_map[coord_name] = "Z"
elif cfutil.is_compression_coordinate(ds, coord_name):
coord_axis_map[coord_name] = "C"
elif coord_name in time_variables:
coord_axis_map[coord_name] = "T"
elif coord_name in z_variables:
coord_axis_map[coord_name] = "Z"
elif coord_name in lat_variables:
coord_axis_map[coord_name] = "Y"
elif coord_name in lon_variables:
coord_axis_map[coord_name] = "X"
else:
# mark the coordinate variable as unknown
coord_axis_map[coord_name] = "U"
for dimension in self._get_instance_dimensions(ds):
if dimension not in coord_axis_map:
coord_axis_map[dimension] = "I"
# Dimensions of auxiliary coordinate variables will be marked with A.
# This is useful to help determine if the dimensions are used like a
# mapping from grid coordinates to physical lat/lon
for coord_name in self._find_aux_coord_vars(ds):
coord_var = ds.variables[coord_name]
# Skip label auxiliary coordinates
if hasattr(coord_var.dtype, "char") and coord_var.dtype.char == "S":
continue
elif coord_var.dtype == str:
continue
for dimension in coord_var.dimensions:
if dimension not in coord_axis_map:
coord_axis_map[dimension] = "A"
# If a dimension does not have a coordinate variable mark it as unknown
# 'U'
for dimension in ds.dimensions:
if dimension not in coord_axis_map:
coord_axis_map[dimension] = "U"
return coord_axis_map
def _get_coord_vars(self, ds):
coord_vars = []
for name, var in ds.variables.items():
if (name,) == var.dimensions:
coord_vars.append(name)
return coord_vars
def _get_dimension_order(self, ds, name, coord_axis_map):
"""
Returns a list of strings corresponding to the named axis of the dimensions for a variable.
Example::
self._get_dimension_order(ds, 'temperature', coord_axis_map)
--> ['T', 'Y', 'X']
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Name of the variable
:param dict coord_axis_map: A dictionary mapping each coordinate variable and dimension to a named axis
:rtype: list
:return: A list of strings corresponding to the named axis of the dimensions for a variable
"""
retval = []
variable = ds.variables[name]
for dim in variable.dimensions:
retval.append(coord_axis_map[dim])
return retval
def _get_instance_dimensions(self, ds):
"""
Returns a list of dimensions marked as instance dimensions
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:returns: A list of variable dimensions
"""
ret_val = []
for variable in ds.get_variables_by_attributes(
cf_role=lambda x: isinstance(x, str)
):
if variable.ndim > 0:
ret_val.append(variable.dimensions[0])
return ret_val
def _get_pretty_dimension_order(self, ds, name):
"""
Returns a comma separated string of the dimensions for a specified
variable
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: A string with a valid NetCDF variable name for the
dataset
:rtype: str
:return: A comma separated string of the variable's dimensions
"""
dim_names = []
for dim in ds.variables[name].dimensions:
dim_name = dim
if ds.dimensions[dim].isunlimited():
dim_name += " (Unlimited)"
dim_names.append(dim_name)
return ", ".join(dim_names)
def _get_pretty_dimension_order_with_type(self, ds, name, dim_types):
"""
Returns a comma separated string of the dimensions for a specified
variable of format "DIMENSIONS_NAME (DIMENSION_TYPE[, unlimited])"
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: A string with a valid NetCDF variable name for the
dataset
:param list dim_types: A list of strings returned by
_get_dimension_order for the same "name"
:rtype: str
:return: A comma separated string of the variable's dimensions
"""
dim_names = []
for dim, dim_type in zip(ds.variables[name].dimensions, dim_types):
dim_name = "{} ({}".format(dim, dim_type)
if ds.dimensions[dim].isunlimited():
dim_name += ", unlimited)"
else:
dim_name += ")"
dim_names.append(dim_name)
return ", ".join(dim_names)
def _is_station_var(self, var):
"""
Returns True if the NetCDF variable is associated with a station, False
otherwise.
:param netCDF4.Variable var: a variable in an existing NetCDF dataset
:rtype: bool
:return: Status of whether variable appears to be associated with a
station
"""
if getattr(var, "standard_name", None) in (
"platform_name",
"station_name",
"instrument_name",
):
return True
return False
def _split_standard_name(self, standard_name):
"""
Returns a tuple of the standard_name and standard_name modifier
Nones are used to represent the absence of a modifier or standard_name
:rtype: tuple
:return: 2-tuple of standard_name and modifier as strings
"""
if isinstance(standard_name, str) and " " in standard_name:
return standard_name.split(" ", 1)
# if this isn't a string, then it doesn't make sense to split
# -- treat value as standard name with no modifier
else:
return standard_name, None
def check_appendix_a(self, ds):
"""
Validates a CF dataset against the contents of its Appendix A table for
attribute types and locations. Returns a list of results with the
outcomes of the Appendix A validation results against the existing
attributes in the docstring.
:param netCDF4.Variable var: a variable in an existing NetCDF dataset
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: A list of results corresponding to the results returned
"""
# if 'enable_appendix_a_checks' isn't specified in the checks,
# don't do anything on this check
results = []
if "enable_appendix_a_checks" not in self.options:
return results
possible_global_atts = set(ds.ncattrs()).intersection(self.appendix_a.keys())
attr_location_ident = {
"G": "global attributes",
"C": "coordinate data",
"D": "non-coordinate data",
}
def att_loc_print_helper(att_letter):
"""
Returns a string corresponding to attr_location ident in
human-readable form. E.g. an input of 'G' will return
"global attributes (G)"
:param str att_letter: An attribute letter corresponding to the
"Use" column in CF Appendix A
:rtype: str
:return: A string with a human-readable name followed by the input
letter specified
"""
return "{} ({})".format(
attr_location_ident.get(att_letter, "other"), att_letter
)
def _att_loc_msg(att_loc):
"""
Helper method for formatting an error message when an attribute
appears in the improper location corresponding to the "Use" column
in CF Appendix A.
:param set att_loc: A set with the possible valid locations of the
attribute corresponding to the "Use" column
in CF Appendix A
:rtype: str
:return: A human-readable string with the possible valid locations
of the attribute
"""
att_loc_len = len(att_loc)
# this is a fallback in case an empty att_loc is passed
# it generally should not occur
valid_loc = "no locations in the dataset"
loc_sort = sorted(att_loc)
if att_loc_len == 1:
valid_loc = att_loc_print_helper(loc_sort[0])
elif att_loc_len == 2:
valid_loc = "{} and {}".format(
att_loc_print_helper(loc_sort[0]), att_loc_print_helper(loc_sort[1])
)
# shouldn't be reached under normal circumstances, as any attribute
# should be either G, C, or D but if another
# category is added, this will be useful.
else:
valid_loc = ", ".join(loc_sort[:-1]) + ", and {}".format(
att_loc_print_helper(loc_sort[-1])
)
return "This attribute may only appear in {}.".format(valid_loc)
for global_att_name in possible_global_atts:
global_att = ds.getncattr(global_att_name)
att_dict = self.appendix_a[global_att_name]
att_loc = att_dict["attr_loc"]
valid_loc_warn = _att_loc_msg(att_loc)
if att_dict["cf_section"] is not None:
subsection_test = ".".join(att_dict["cf_section"].split(".")[:2])
section_loc = self.section_titles.get(
subsection_test, att_dict["cf_section"]
)
else:
section_loc = None
test_ctx = TestCtx(BaseCheck.HIGH, section_loc)
test_ctx.out_of += 1
if "G" not in att_loc:
test_ctx.messages.append(
'[Appendix A] Attribute "{}" should not be present in global (G) '
"attributes. {}".format(global_att_name, valid_loc_warn)
)
else:
result = self._handle_dtype_check(global_att, global_att_name, att_dict)
if not result[0]:
test_ctx.messages.append(result[1])
else:
test_ctx.score += 1
results.append(test_ctx.to_result())
noncoord_vars = set(ds.variables) - set(self.coord_data_vars)
for var_set, coord_letter in (
(self.coord_data_vars, "C"),
(noncoord_vars, "D"),
):
for var_name in var_set:
var = ds.variables[var_name]
possible_attrs = set(var.ncattrs()).intersection(self.appendix_a.keys())
for att_name in possible_attrs:
att_dict = self.appendix_a[att_name]
if att_dict["cf_section"] is not None:
subsection_test = ".".join(
att_dict["cf_section"].split(".")[:2]
)
section_loc = self.section_titles.get(
subsection_test, att_dict["cf_section"]
)
else:
section_loc = None
test_ctx = TestCtx(BaseCheck.HIGH, section_loc, variable=var_name)
att_loc = att_dict["attr_loc"]
valid_loc_warn = _att_loc_msg(att_loc)
att = var.getncattr(att_name)
test_ctx.out_of += 1
if coord_letter not in att_loc:
test_ctx.messages.append(
'[Appendix A] Attribute "{}" should not be present in {} '
'variable "{}". {}'.format(
att_name,
att_loc_print_helper(coord_letter),
var_name,
valid_loc_warn,
)
)
else:
result = self._handle_dtype_check(att, att_name, att_dict, var)
if not result[0]:
test_ctx.messages.append(result[1])
else:
test_ctx.score += 1
results.append(test_ctx.to_result())
return results
def _check_attr_type(self, attr_name, attr_type, attribute, variable=None):
"""
Check if an attribute `attr` is of the type `attr_type`. Upon getting
a data type of 'D', the attr must have the same data type as the
variable it is assigned to.
Attributes designated type 'S' must be of type `str`. 'N' require
numeric types, and 'D' requires the attribute type match the type
of the variable it is assigned to.
:param str attr_name: name of attr being checked (to format message)
:param str attr_type: the correct type of the attribute
:param attribute: attribute to check
:param variable: if given, type should match attr
:rtype tuple
:return A two-tuple that contains pass/fail status as a boolean and
a message string (or None if unset) as the second element.
"""
if attr_type == "S":
if not isinstance(attribute, str):
return [False, "{} must be a string".format(attr_name)]
else:
# if it's not a string, it should have a numpy dtype
underlying_dtype = getattr(attribute, "dtype", None)
# TODO check for np.nan separately
if underlying_dtype is None:
return [False, "{} must be a numeric type".format(attr_name)]
# both D and N should be some kind of numeric value
is_numeric = np.issubdtype(underlying_dtype, np.number)
if attr_type == "N":
if not is_numeric:
return [False, "{} must be a numeric type".format(attr_name)]
elif attr_type == "D":
# TODO: handle edge case where variable is unset here
temp_ctx = TestCtx()
self._parent_var_attr_type_check(attr_name, variable, temp_ctx)
var_dtype = getattr(variable, "dtype", None)
if temp_ctx.messages:
return (
False,
"{} must be numeric and must be equivalent to {} dtype".format(
attr_name, var_dtype
),
)
else:
# If we reached here, we fell off with an unrecognized type
return (
False,
"{} has unrecognized type '{}'".format(attr_name, attr_type),
)
# pass if all other possible failure conditions have been evaluated
return (True, None)
def _handle_dtype_check(self, attribute, attr_name, attr_dict, variable=None):
"""
Helper function for Appendix A checks.
:param attribute: The value of the attribute being checked
:param str attr_name: The name of the attribute being processed
:param dict attr_dict: The dict entry with type and attribute location
information corresponding to this attribute
:param variable: if given, the variable whose type to check against
:rtype: tuple
:return: A two-tuple that contains pass/fail status as a boolean and
a message string (or None if unset) as the second element.
"""
attr_type = attr_dict["Type"]
if variable is None and "G" not in attr_dict["attr_loc"]:
raise ValueError(
"Non-global attributes must be associated with a " " variable"
)
attr_str = (
"Global attribute {}".format(attr_name)
if "G" in attr_dict["attr_loc"] and variable is None
else "Attribute {} in variable {}".format(attr_name, variable.name)
)
# check the type
return_value = self._check_attr_type(attr_name, attr_type, attribute, variable)
# if the second element is a string, format it
if isinstance(return_value[1], str):
return_value[1] = return_value[1].format(attr_str)
# convert to tuple for immutability and return
return tuple(return_value)
class CFNCCheck(BaseNCCheck, CFBaseCheck):
"""Inherits from both BaseNCCheck and CFBaseCheck to support
checking netCDF datasets. Must inherit in this order, or certain
attributes from BaseNCCheck (like supported_ds) will not be passed to
CFNCCheck."""
pass
appendix_a_base = {
"Conventions": {"Type": "S", "attr_loc": {"G"}, "cf_section": None},
"_FillValue": {"Type": "D", "attr_loc": {"D", "C"}, "cf_section": None},
"add_offset": {"Type": "N", "attr_loc": {"D"}, "cf_section": "8.1"},
"ancillary_variables": {"Type": "S", "attr_loc": {"D"}, "cf_section": "3.4"},
"axis": {"Type": "S", "attr_loc": {"C"}, "cf_section": "4"},
"bounds": {"Type": "S", "attr_loc": {"C"}, "cf_section": "7.1"},
"calendar": {"Type": "S", "attr_loc": {"C"}, "cf_section": "4.4.1"},
"cell_measures": {"Type": "S", "attr_loc": {"D"}, "cf_section": "7.2"},
"cell_methods": {"Type": "S", "attr_loc": {"D"}, "cf_section": "7.3"},
# cf_role type is "C" in document, which does not correspond
# to types used, replaced with "S"
"cf_role": {"Type": "S", "attr_loc": {"C"}, "cf_section": "9.5"},
"climatology": {"Type": "S", "attr_loc": {"C"}, "cf_section": "7.4"},
# comment was removed in this implementation
"compress": {"Type": "S", "attr_loc": {"C"}, "cf_section": "8.2"},
"coordinates": {"Type": "S", "attr_loc": {"D"}, "cf_section": "5"},
# featureType type is "C" in document, which does not
# correspond to types used, replaced with "S"
"featureType": {"Type": "S", "attr_loc": {"G"}, "cf_section": "9.4"},
"flag_masks": {"Type": "D", "attr_loc": {"D"}, "cf_section": "3.5"},
"flag_meanings": {"Type": "S", "attr_loc": {"D"}, "cf_section": "3.5"},
"flag_values": {"Type": "D", "attr_loc": {"D"}, "cf_section": "3.5"},
"formula_terms": {"Type": "S", "attr_loc": {"C"}, "cf_section": "4.3.2"},
"grid_mapping": {"Type": "S", "attr_loc": {"D"}, "cf_section": "5.6"},
"history": {"Type": "S", "attr_loc": {"G"}, "cf_section": None},
#'instance_dimension': {'Type': 'N', 'attr_loc': {'D'}, 'cf_section': '9.3'},
"institution": {"Type": "S", "attr_loc": {"G", "D"}, "cf_section": "2.6.2"},
"leap_month": {"Type": "N", "attr_loc": {"C"}, "cf_section": "4.4.1"},
"leap_year": {"Type": "N", "attr_loc": {"C"}, "cf_section": "4.4.1"},
"long_name": {"Type": "S", "attr_loc": {"D", "C"}, "cf_section": "3.2"},
"missing_value": {"Type": "D", "attr_loc": {"D", "C"}, "cf_section": "2.5.1"},
"month_lengths": {"Type": "N", "attr_loc": {"C"}, "cf_section": "4.4.1"},
"positive": {"Type": "S", "attr_loc": {"C"}, "cf_section": None},
"references": {"Type": "S", "attr_loc": {"G", "D"}, "cf_section": "2.6.2"},
#'sample_dimension': {'Type': 'N', 'attr_loc': {'D'}, 'cf_section': '9.3'},
"scale_factor": {"Type": "N", "attr_loc": {"D"}, "cf_section": "8.1"},
"source": {"Type": "S", "attr_loc": {"G", "D"}, "cf_section": "2.6.2"},
"standard_error_multiplier": {"Type": "N", "attr_loc": {"D"}, "cf_section": None},
"standard_name": {"Type": "S", "attr_loc": {"D", "C"}, "cf_section": "3.3"},
"title": {"Type": "S", "attr_loc": {"G"}, "cf_section": None},
"units": {"Type": "S", "attr_loc": {"D", "C"}, "cf_section": "3.1"},
"valid_max": {"Type": "N", "attr_loc": {"D", "C"}, "cf_section": None},
"valid_min": {"Type": "N", "attr_loc": {"D", "C"}, "cf_section": None},
"valid_range": {"Type": "N", "attr_loc": {"D", "C"}, "cf_section": None},
}
class CF1_6Check(CFNCCheck):
"""CF-1.6-specific implementation of CFBaseCheck; supports checking
netCDF datasets.
These checks are translated documents:
http://cf-pcmdi.llnl.gov/documents/cf-conventions/1.6/cf-conventions.html
http://cf-pcmdi.llnl.gov/conformance/requirements-and-recommendations/1.6/"""
register_checker = True
_cc_spec = "cf"
_cc_spec_version = "1.6"
_cc_description = "Climate and Forecast Conventions (CF)"
_cc_url = "http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html"
_cc_display_headers = {3: "Errors", 2: "Warnings", 1: "Info"}
appendix_a = appendix_a_base
def __init__(self, options=None): # initialize with parent methods and data
super(CF1_6Check, self).__init__(options)
self.cell_methods = cell_methods16
self.grid_mapping_dict = grid_mapping_dict16
self.grid_mapping_attr_types = grid_mapping_attr_types16
###############################################################################
# Chapter 2: NetCDF Files and Components
###############################################################################
def check_data_types(self, ds):
"""
Checks the data type of all netCDF variables to ensure they are valid
data types under CF.
CF §2.2 The netCDF data types char, byte, short, int, float or real, and
double are all acceptable
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
fails = []
total = len(ds.variables)
for k, v in ds.variables.items():
if (
v.dtype is not str
and v.dtype.kind != "S"
and all(
v.dtype.type != t
for t in (
np.character,
np.dtype("|S1"),
np.dtype("b"),
np.dtype("i2"),
np.dtype("i4"),
np.float32,
np.double,
)
)
):
fails.append(
"The variable {} failed because the datatype is {}".format(
k, v.datatype
)
)
return Result(
BaseCheck.HIGH,
(total - len(fails), total),
self.section_titles["2.2"],
msgs=fails,
)
def check_child_attr_data_types(self, ds):
"""
For any variables which contain any of the following attributes:
- valid_min/valid_max
- valid_range
- scale_factor
- add_offset
- _FillValue
the data type of the attribute must match the type of its parent variable as specified in the
NetCDF User Guide (NUG) https://www.unidata.ucar.edu/software/netcdf/docs/attribute_conventions.html,
referenced in the CF Conventions in Section 2.5.2
(http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html#missing-data)
:param netCDF4.Dataset ds: open netCDF dataset object
:rtype: compliance_checker.base.Result
"""
ctx = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.5"])
special_attrs = {
"actual_range",
"valid_min",
"valid_max",
"valid_range",
"_FillValue",
}
for var_name, var in ds.variables.items():
for att_name in special_attrs.intersection(var.ncattrs()):
self._parent_var_attr_type_check(att_name, var, ctx)
return ctx.to_result()
def _check_add_offset_scale_factor_type(self, variable, attr_name):
"""
Reusable function for checking both add_offset and scale_factor.
"""
msg = (
f"Variable {variable.name} and {attr_name} must be quivalent "
"data types or {variable.name} must be of type byte, short, or int "
"and {attr_name} must be float or double"
)
att = getattr(variable, attr_name, None)
if not (isinstance(att, (np.number, float))): # can't compare dtypes
val = False
else:
val = (
att.dtype == variable.dtype
) or ( # will short-circuit or if first condition is true
isinstance(att.dtype, (np.float, np.double, float))
and isinstance(variable.dtype, (np.byte, np.short, np.int, int))
)
return Result(BaseCheck.MEDIUM, val, self.section_titles["8.1"], [msg])
def check_add_offset_scale_factor_type(self, ds):
"""
If a variable has the attributes add_offset and scale_factor,
check that the variables and attributes are of the same type
OR that the variable is of type byte, short or int and the
attributes are of type float or double.
"""
results = []
add_offset_vars = ds.get_variables_by_attributes(
add_offset=lambda x: x is not None
)
scale_factor_vars = ds.get_variables_by_attributes(
scale_factor=lambda x: x is not None
)
for _att_vars_tup in (
("add_offset", add_offset_vars),
("scale_factor", scale_factor_vars),
):
results.extend(
list(
map(
lambda x: self._check_scale_factor_add_offset(
ds.variables[x], _att_vars_tup[0]
),
_att_vars_tup[1],
)
)
)
return results
def check_naming_conventions(self, ds):
"""
Checks the variable names to ensure they are valid CF variable names under CF.
CF §2.3 Variable, dimension and attribute names should begin with a letter
and be composed of letters, digits, and underscores.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
ret_val = []
variable_naming = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.3"])
dimension_naming = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.3"])
attribute_naming = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.3"])
ignore_attributes = [
"_FillValue",
"DODS",
"_ChunkSizes",
"_Coordinate",
"_Unsigned",
]
rname = regex.compile("^[A-Za-z][A-Za-z0-9_]*$")
for name, variable in ds.variables.items():
variable_naming.assert_true(
rname.match(name) is not None,
"variable {} should begin with a letter and be composed of "
"letters, digits, and underscores".format(name),
)
# Keep track of all the attributes, we'll need to check them
for attr in variable.ncattrs():
if attr in ignore_attributes:
continue
# Special attributes made by THREDDS
if attr.startswith("DODS"):
continue
# Ignore model produced attributes
if attr.startswith("_Coordinate"):
continue
attribute_naming.assert_true(
rname.match(attr) is not None,
"attribute {}:{} should begin with a letter and be composed of "
"letters, digits, and underscores".format(name, attr),
)
ret_val.append(variable_naming.to_result())
for dimension in ds.dimensions:
dimension_naming.assert_true(
rname.match(dimension) is not None,
"dimension {} should begin with a latter and be composed of "
"letters, digits, and underscores".format(dimension),
)
ret_val.append(dimension_naming.to_result())
for global_attr in ds.ncattrs():
# Special attributes made by THREDDS
if global_attr.startswith("DODS"):
continue
if global_attr.startswith("EXTRA_DIMENSION"):
continue
attribute_naming.assert_true(
rname.match(global_attr) is not None,
"global attribute {} should begin with a letter and be composed of "
"letters, digits, and underscores".format(global_attr),
)
ret_val.append(attribute_naming.to_result())
return ret_val
def check_names_unique(self, ds):
"""
Checks the variable names for uniqueness regardless of case.
CF §2.3 names should not be distinguished purely by case, i.e., if case
is disregarded, no two names should be the same.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
fails = []
total = len(ds.variables)
names = defaultdict(int)
for k in ds.variables:
names[k.lower()] += 1
fails = [
"Variables are not case sensitive. Duplicate variables named: %s" % k
for k, v in names.items()
if v > 1
]
return Result(
BaseCheck.MEDIUM,
(total - len(fails), total),
self.section_titles["2.3"],
msgs=fails,
)
def check_dimension_names(self, ds):
"""
Checks variables contain no duplicate dimension names.
CF §2.4 A variable may have any number of dimensions, including zero,
and the dimensions must all have different names.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
fails = []
total = len(ds.variables)
for k, v in ds.variables.items():
dims = defaultdict(int)
for d in v.dimensions:
dims[d] += 1
for dimension, count in dims.items():
if count > 1:
fails.append(
"%s has two or more dimensions named %s" % (k, dimension)
)
return Result(
BaseCheck.HIGH,
(total - len(fails), total),
self.section_titles["2.4"],
msgs=fails,
)
def check_dimension_order(self, ds):
"""
Checks each variable's dimension order to ensure that the order is
consistent and in order under CF §2.4
CF §2.4 If any or all of the dimensions of a variable have the
interpretations of "date or time" (T), "height or depth" (Z),
"latitude" (Y), or "longitude" (X) then we recommend, those dimensions
to appear in the relative order T, then Z, then Y, then X in the CDL
definition corresponding to the file. All other dimensions should,
whenever possible, be placed to the left of the spatiotemporal
dimensions.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
valid_dimension_order = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.4"])
# Build a map from coordinate variable to axis
coord_axis_map = self._get_coord_axis_map(ds)
# Check each variable's dimension order, excluding climatology and
# bounds variables
any_clim = cfutil.get_climatology_variable(ds)
any_bounds = cfutil.get_cell_boundary_variables(ds)
for name, variable in ds.variables.items():
# Skip bounds/climatology variables, as they should implicitly
# have the same order except for the bounds specific dimension.
# This is tested later in the respective checks
if name in any_bounds or name == any_clim:
continue
# Skip strings/labels
if hasattr(variable.dtype, "char") and variable.dtype.char == "S":
continue
elif variable.dtype == str:
continue
if variable.dimensions:
dimension_order = self._get_dimension_order(ds, name, coord_axis_map)
valid_dimension_order.assert_true(
self._dims_in_order(dimension_order),
"{}'s spatio-temporal dimensions are not in the "
"recommended order T, Z, Y, X and/or further dimensions "
"are not located left of T, Z, Y, X. The dimensions (and "
"their guessed types) are {} (with U: other/unknown; L: "
"unlimited).".format(
name,
self._get_pretty_dimension_order_with_type(
ds, name, dimension_order
),
),
)
return valid_dimension_order.to_result()
def check_fill_value_outside_valid_range(self, ds):
"""
Checks each variable's _FillValue to ensure that it's in valid_range or
between valid_min and valid_max according to CF §2.5.1
CF §2.5.1 The _FillValue should be outside the range specified by
valid_range (if used) for a variable.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of Results
"""
valid_fill_range = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.5"])
for name, variable in ds.variables.items():
# If the variable doesn't have a defined _FillValue don't check it.
if not hasattr(variable, "_FillValue"):
continue
fill_value = variable._FillValue
attrs = variable.ncattrs()
if "valid_range" in attrs:
if isinstance(variable.valid_range, str):
m = "§2.5.1 Fill Values should be outside the range specified by valid_range" # subsection message
valid_fill_range.assert_true(
False,
"{};\n\t{}:valid_range must be a numeric type not a string".format(
m, name
),
)
continue
rmin, rmax = variable.valid_range
spec_by = "valid_range"
elif "valid_min" in attrs and "valid_max" in attrs:
if isinstance(variable.valid_min, str):
valid_fill_range.assert_true(
False,
"{}:valid_min must be a numeric type not a string".format(name),
)
if isinstance(variable.valid_max, str):
valid_fill_range.assert_true(
False,
"{}:valid_max must be a numeric type not a string".format(name),
)
if isinstance(variable.valid_min, str) or isinstance(
variable.valid_max, str
):
continue
rmin = variable.valid_min
rmax = variable.valid_max
spec_by = "valid_min/valid_max"
else:
continue
if np.isnan(fill_value):
valid = True
else:
valid = fill_value < rmin or fill_value > rmax
valid_fill_range.assert_true(
valid,
"{}:_FillValue ({}) should be outside the range specified by {} ({}, {})"
"".format(name, fill_value, spec_by, rmin, rmax),
)
return valid_fill_range.to_result()
def check_convention_globals(self, ds):
"""
Check the common global attributes are strings if they exist.
CF §2.6.2 title/history global attributes, must be strings. Do not need
to exist.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of Results
"""
attrs = ["title", "history"]
valid_globals = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.6"])
for attr in attrs:
dataset_attr = getattr(ds, attr, None)
is_string = isinstance(dataset_attr, str)
valid_globals.assert_true(
is_string and len(dataset_attr),
"§2.6.2 global attribute {} should exist and be a non-empty string" # subsection message
"".format(attr),
)
return valid_globals.to_result()
def check_convention_possibly_var_attrs(self, ds):
"""
Check variable and global attributes are strings for recommended attributes under CF §2.6.2
CF §2.6.2 institution, source, references, and comment, either global
or assigned to individual variables. When an attribute appears both
globally and as a variable attribute, the variable's version has
precedence. Must be strings.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of Results
"""
# The attrs are optional and only needs to be a string and non-empty if it
# exists.
attrs = ["institution", "source", "references", "comment"]
valid_attributes = TestCtx(BaseCheck.MEDIUM, self.section_titles["2.6"])
attr_bin = set()
# If the attribute is defined for any variable, check it and mark in
# the set that we've seen it at least once.
for name, variable in ds.variables.items():
for attribute in variable.ncattrs():
varattr = getattr(variable, attribute)
if attribute in attrs:
is_string = isinstance(varattr, str)
valid_attributes.assert_true(
is_string and len(varattr) > 0,
"§2.6.2 {}:{} should be a non-empty string"
"".format(name, attribute),
)
attr_bin.add(attribute)
# Check all the global attributes too and mark if we've seen them
for attribute in ds.ncattrs():
dsattr = getattr(ds, attribute)
if attribute in attrs:
is_string = isinstance(dsattr, str)
valid_attributes.assert_true(
is_string and len(dsattr) > 0,
"§2.6.2 {} global attribute should be a non-empty string"
"".format(attribute),
)
attr_bin.add(attribute)
return valid_attributes.to_result()
###############################################################################
# Chapter 3: Description of the Data
###############################################################################
def check_units(self, ds):
"""
Check the units attribute for all variables to ensure they are CF
compliant under CF §3.1
CF §3.1 The units attribute is required for all variables that represent dimensional quantities
(except for boundary variables defined in Section 7.1, "Cell Boundaries" and climatology variables
defined in Section 7.4, "Climatological Statistics").
Units are not required for dimensionless quantities. A variable with no units attribute is assumed
to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be
included.
- units required
- type must be recognized by udunits
- if standard name specified, must be consistent with standard name table, must also be consistent with a
specified cell_methods attribute if present
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
coordinate_variables = self._find_coord_vars(ds)
auxiliary_coordinates = self._find_aux_coord_vars(ds)
geophysical_variables = self._find_geophysical_vars(ds)
forecast_variables = cfutil.get_forecast_metadata_variables(ds)
unit_required_variables = set(
coordinate_variables
+ auxiliary_coordinates
+ geophysical_variables
+ forecast_variables
)
for name in unit_required_variables:
# For reduced horizontal grids, the compression index variable does
# not require units.
if cfutil.is_compression_coordinate(ds, name):
continue
variable = ds.variables[name]
# Skip instance coordinate variables
if getattr(variable, "cf_role", None) is not None:
continue
# Skip labels
if (
hasattr(variable.dtype, "char") and variable.dtype.char == "S"
) or variable.dtype == str:
continue
standard_name = getattr(variable, "standard_name", None)
standard_name, standard_name_modifier = self._split_standard_name(
standard_name
)
units = getattr(variable, "units", None)
valid_units = self._check_valid_cf_units(ds, name)
ret_val.append(valid_units)
units_attr_is_string = TestCtx(BaseCheck.MEDIUM, self.section_titles["3.1"])
# side effects, but better than teasing out the individual result
if units_attr_is_string.assert_true(
isinstance(units, str),
"units ({}) attribute of '{}' must be a string compatible with UDUNITS".format(
units, variable.name
),
):
valid_udunits = self._check_valid_udunits(ds, name)
ret_val.append(valid_udunits)
ret_val.append(units_attr_is_string.to_result())
if isinstance(standard_name, str):
valid_standard_units = self._check_valid_standard_units(ds, name)
ret_val.append(valid_standard_units)
return ret_val
def _check_valid_cf_units(self, ds, variable_name):
"""
Checks that the variable contains units attribute, the attribute is a
string and the value is not deprecated by CF
:param netCDF4.Dataset ds: An open netCDF dataset
:param str variable_name: Name of the variable to be checked
:rtype:
:return: List of results
"""
# This list is straight from section 3
deprecated = ["level", "layer", "sigma_level"]
variable = ds.variables[variable_name]
units = getattr(variable, "units", None)
standard_name_full = getattr(variable, "standard_name", None)
standard_name, standard_name_modifier = self._split_standard_name(
standard_name_full
)
std_name_units_dimensionless = cfutil.is_dimensionless_standard_name(
self._std_names._root, standard_name
)
# Is this even in the database? also, if there is no standard_name,
# there's no way to know if it is dimensionless.
should_be_dimensionless = (
variable.dtype is str
or (hasattr(variable.dtype, "char") and variable.dtype.char == "S")
or std_name_units_dimensionless
or standard_name is None
)
# 1) Units must exist
valid_units = TestCtx(BaseCheck.HIGH, self.section_titles["3.1"])
valid_units.assert_true(
should_be_dimensionless or units is not None,
"units attribute is required for {} when variable is not a dimensionless quantity".format(
variable_name
),
)
# Don't bother checking the rest
if units is None and not should_be_dimensionless:
return valid_units.to_result()
# 2) units attribute must be a string
valid_units.assert_true(
should_be_dimensionless or isinstance(units, str),
"units attribute for {} needs to be a string".format(variable_name),
)
# 3) units are not deprecated
valid_units.assert_true(
units not in deprecated,
'units for {}, "{}" are deprecated by CF 1.6'.format(variable_name, units),
)
return valid_units.to_result()
def _check_valid_udunits(self, ds, variable_name):
"""
Checks that the variable's units are contained in UDUnits
:param netCDF4.Dataset ds: An open netCDF dataset
:param str variable_name: Name of the variable to be checked
"""
variable = ds.variables[variable_name]
units = getattr(variable, "units", None)
standard_name = getattr(variable, "standard_name", None)
standard_name, standard_name_modifier = self._split_standard_name(standard_name)
std_name_units_dimensionless = cfutil.is_dimensionless_standard_name(
self._std_names._root, standard_name
)
# If the variable is supposed to be dimensionless, it automatically passes
should_be_dimensionless = (
variable.dtype is str
or (hasattr(variable.dtype, "char") and variable.dtype.char == "S")
or std_name_units_dimensionless
)
valid_udunits = TestCtx(BaseCheck.HIGH, self.section_titles["3.1"])
are_udunits = units is not None and util.units_known(units)
valid_udunits.assert_true(
should_be_dimensionless or are_udunits,
'units for {}, "{}" are not recognized by UDUNITS'.format(
variable_name, units
),
)
return valid_udunits.to_result()
def _check_valid_standard_units(self, ds, variable_name):
"""
Checks that the variable's units are appropriate for the standard name
according to the CF standard name table and coordinate sections in CF
1.6
:param netCDF4.Dataset ds: An open netCDF dataset
:param str variable_name: Name of the variable to be checked
"""
variable = ds.variables[variable_name]
units = getattr(variable, "units", None)
standard_name = getattr(variable, "standard_name", None)
valid_standard_units = TestCtx(BaseCheck.HIGH, self.section_titles["3.1"])
# If the variable is supposed to be dimensionless, it automatically passes
std_name_units_dimensionless = cfutil.is_dimensionless_standard_name(
self._std_names._root, standard_name
)
standard_name, standard_name_modifier = self._split_standard_name(standard_name)
standard_entry = self._std_names.get(standard_name, None)
if standard_entry is not None:
canonical_units = standard_entry.canonical_units
else:
# Any unit comparisons with None returns False
canonical_units = None
# Other standard_name modifiers have the same units as the
# unmodified standard name or are not checked for units.
if standard_name_modifier == "number_of_observations":
canonical_units = "1"
# This section represents the different cases where simple udunits
# comparison isn't comprehensive enough to determine if the units are
# appropriate under CF
# UDUnits accepts "s" as a unit of time but it should be <unit> since <epoch>
if standard_name == "time":
valid_standard_units.assert_true(
util.units_convertible(units, "seconds since 1970-01-01"),
"time must be in a valid units format <unit> since <epoch> "
"not {}".format(units),
)
# UDunits can't tell the difference between east and north facing coordinates
elif standard_name == "latitude":
# degrees is allowed if using a transformed grid
allowed_units = cfutil.VALID_LAT_UNITS | {"degrees"}
valid_standard_units.assert_true(
units.lower() in allowed_units,
'variables defining latitude ("{}") must use degrees_north '
"or degrees if defining a transformed grid. Currently "
"{}".format(variable_name, units),
)
# UDunits can't tell the difference between east and north facing coordinates
elif standard_name == "longitude":
# degrees is allowed if using a transformed grid
allowed_units = cfutil.VALID_LON_UNITS | {"degrees"}
valid_standard_units.assert_true(
units.lower() in allowed_units,
'variables defining longitude ("{}") must use degrees_east '
"or degrees if defining a transformed grid. Currently "
"{}".format(variable_name, units),
)
# Standard Name table agrees the unit should be dimensionless
elif std_name_units_dimensionless:
valid_standard_units.assert_true(True, "")
elif canonical_units is not None:
valid_standard_units.assert_true(
util.units_convertible(canonical_units, units),
"units for variable {} must be convertible to {} "
"currently they are {}".format(variable_name, canonical_units, units),
)
return valid_standard_units.to_result()
def check_standard_name(self, ds):
"""
Check a variables's standard_name attribute to ensure that it meets CF
compliance.
CF §3.3 A standard name is associated with a variable via the attribute
standard_name which takes a string value comprised of a standard name
optionally followed by one or more blanks and a standard name modifier
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
coord_vars = self._find_coord_vars(ds)
aux_coord_vars = self._find_aux_coord_vars(ds)
axis_vars = cfutil.get_axis_variables(ds)
flag_vars = cfutil.get_flag_variables(ds)
geophysical_vars = self._find_geophysical_vars(ds)
variables_requiring_standard_names = (
coord_vars + aux_coord_vars + axis_vars + flag_vars + geophysical_vars
)
for name in set(variables_requiring_standard_names):
# Compression indices used in reduced horizontal grids or
# compression schemes do not require attributes other than compress
if cfutil.is_compression_coordinate(ds, name):
continue
ncvar = ds.variables[name]
# §9 doesn't explicitly allow instance variables as coordinates but
# it's loosely implied. Just in case, skip it.
if hasattr(ncvar, "cf_role"):
continue
# Unfortunately, §6.1 allows for string types to be listed as
# coordinates.
if hasattr(ncvar.dtype, "char") and ncvar.dtype.char == "S":
continue
elif ncvar.dtype == str:
continue
standard_name = getattr(ncvar, "standard_name", None)
standard_name, standard_name_modifier = self._split_standard_name(
standard_name
)
long_name = getattr(ncvar, "long_name", None)
long_or_std_name = TestCtx(BaseCheck.HIGH, self.section_titles["3.3"])
if long_name is not None:
long_name_present = True
long_or_std_name.assert_true(
isinstance(long_name, str),
"Attribute long_name for variable {} must be a string".format(name),
)
else:
long_name_present = False
# §1.3 The long_name and standard_name attributes are used to
# describe the content of each variable. For backwards
# compatibility with COARDS neither is required, but use of at
# least one of them is strongly recommended.
# If standard_name is not defined but long_name is, don't continue
# the check for this variable
if standard_name is not None:
standard_name_present = True
valid_std_name = TestCtx(BaseCheck.HIGH, self.section_titles["3.3"])
valid_std_name.assert_true(
isinstance(standard_name, str),
"Attribute standard_name for variable {} must be a string".format(
name
),
)
if isinstance(standard_name, str):
valid_std_name.assert_true(
standard_name in self._std_names,
"standard_name {} is not defined in Standard Name Table v{}".format(
standard_name or "undefined", self._std_names._version
),
)
ret_val.append(valid_std_name.to_result())
# 2) optional - if modifiers, should be in table
if standard_name_modifier is not None:
valid_modifier = TestCtx(BaseCheck.HIGH, self.section_titles["3.3"])
allowed = [
"detection_minimum",
"number_of_observations",
"standard_error",
"status_flag",
]
valid_modifier.assert_true(
standard_name_modifier in allowed,
"standard_name modifier {} for variable {} is not a valid modifier "
"according to appendix C".format(standard_name_modifier, name),
)
ret_val.append(valid_modifier.to_result())
else:
standard_name_present = False
long_or_std_name.assert_true(
long_name_present or standard_name_present,
"Attribute long_name or/and standard_name is highly recommended for variable {}".format(
name
),
)
ret_val.append(long_or_std_name.to_result())
return ret_val
def check_ancillary_variables(self, ds):
"""
Checks the ancillary_variable attribute for all variables to ensure
they are CF compliant.
CF §3.4 It is a string attribute whose value is a blank separated list
of variable names. The nature of the relationship between variables
associated via ancillary_variables must be determined by other
attributes. The variables listed by the ancillary_variables attribute
will often have the standard name of the variable which points to them
including a modifier (Appendix C, Standard Name Modifiers) to indicate
the relationship.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for ncvar in ds.get_variables_by_attributes(
ancillary_variables=lambda x: x is not None
):
name = ncvar.name
valid_ancillary = TestCtx(BaseCheck.HIGH, self.section_titles["3.4"])
ancillary_variables = ncvar.ancillary_variables
valid_ancillary.assert_true(
isinstance(ancillary_variables, str),
"ancillary_variables attribute defined by {} "
"should be string".format(name),
)
# Can't perform the second check if it's not a string
if not isinstance(ancillary_variables, str):
ret_val.append(valid_ancillary.to_result())
continue
for ancillary_variable in ancillary_variables.split():
valid_ancillary.assert_true(
ancillary_variable in ds.variables,
"{} is not a variable in this dataset".format(ancillary_variable),
)
ret_val.append(valid_ancillary.to_result())
return ret_val
def check_flags(self, ds):
"""
Check the flag_values, flag_masks and flag_meanings attributes for
variables to ensure they are CF compliant.
CF §3.5 The attributes flag_values, flag_masks and flag_meanings are
intended to make variables that contain flag values self describing.
Status codes and Boolean (binary) condition flags may be expressed with
different combinations of flag_values and flag_masks attribute
definitions.
The flag_values and flag_meanings attributes describe a status flag
consisting of mutually exclusive coded values.
The flag_meanings attribute is a string whose value is a blank
separated list of descriptive words or phrases, one for each flag
value. Each word or phrase should consist of characters from the
alphanumeric set and the following five: '_', '-', '.', '+', '@'.
The flag_masks and flag_meanings attributes describe a number of
independent Boolean conditions using bit field notation by setting
unique bits in each flag_masks value.
The flag_masks, flag_values and flag_meanings attributes, used
together, describe a blend of independent Boolean conditions and
enumerated status codes. A flagged condition is identified by a bitwise
AND of the variable value and each flag_masks value; a result that
matches the flag_values value indicates a true condition.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for name in cfutil.get_flag_variables(ds):
variable = ds.variables[name]
flag_values = getattr(variable, "flag_values", None)
flag_masks = getattr(variable, "flag_masks", None)
valid_flags_var = TestCtx(BaseCheck.HIGH, self.section_titles["3.5"])
# Check that the variable defines mask or values
valid_flags_var.assert_true(
flag_values is not None or flag_masks is not None,
"{} does not define either flag_masks or flag_values".format(name),
)
ret_val.append(valid_flags_var.to_result())
valid_meanings = self._check_flag_meanings(ds, name)
ret_val.append(valid_meanings)
# check flag_values
if flag_values is not None:
valid_values = self._check_flag_values(ds, name)
ret_val.append(valid_values)
# check flag_masks
if flag_masks is not None:
valid_masks = self._check_flag_masks(ds, name)
ret_val.append(valid_masks)
if flag_values is not None and flag_masks is not None:
allv = list(
map(lambda a, b: a & b == a, list(zip(flag_values, flag_masks)))
)
allvr = Result(BaseCheck.MEDIUM, all(allv), self.section_titles["3.5"])
if not allvr.value:
allvr.msgs = [
"flag masks and flag values for '{}' combined don't equal flag value".format(
name
)
]
ret_val.append(allvr)
return ret_val
def _check_flag_values(self, ds, name):
"""
Checks a variable's flag_values attribute for compliance under CF
- flag_values exists as an array
- unique elements in flag_values
- flag_values si the same dtype as the variable
- flag_values is the same length as flag_meanings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Name of variable to check
:rtype: compliance_checker.base.Result
"""
variable = ds.variables[name]
flag_values = getattr(variable, "flag_values", None)
flag_meanings = getattr(variable, "flag_meanings", None)
valid_values = TestCtx(BaseCheck.HIGH, self.section_titles["3.5"])
# flag_values must be a list of values, not a string or anything else
valid_values.assert_true(
isinstance(flag_values, np.ndarray),
"{}'s flag_values must be an array of values not {}".format(
name, type(flag_values)
),
)
# We can't perform any more checks
if not isinstance(flag_values, np.ndarray):
return valid_values.to_result()
# the flag values must be independent, no repeating values
flag_set = set(flag_values)
valid_values.assert_true(
len(flag_set) == len(flag_values),
"{}'s flag_values must be independent and can not be repeated".format(name),
)
# the data type for flag_values should be the same as the variable
valid_values.assert_true(
variable.dtype.type == flag_values.dtype.type,
"flag_values ({}) must be the same data type as {} ({})"
"".format(flag_values.dtype.type, name, variable.dtype.type),
)
if isinstance(flag_meanings, str):
flag_meanings = flag_meanings.split()
valid_values.assert_true(
len(flag_meanings) == len(flag_values),
"{}'s flag_meanings and flag_values should have the same number ".format(
name
)
+ "of elements.",
)
return valid_values.to_result()
def _check_flag_masks(self, ds, name):
"""
Check a variable's flag_masks attribute for compliance under CF
- flag_masks exists as an array
- flag_masks is the same dtype as the variable
- variable's dtype can support bit-field
- flag_masks is the same length as flag_meanings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
"""
variable = ds.variables[name]
flag_masks = variable.flag_masks
flag_meanings = getattr(ds, "flag_meanings", None)
valid_masks = TestCtx(BaseCheck.HIGH, self.section_titles["3.5"])
valid_masks.assert_true(
isinstance(flag_masks, np.ndarray),
"{}'s flag_masks must be an array of values not {}".format(
name, type(flag_masks).__name__
),
)
if not isinstance(flag_masks, np.ndarray):
return valid_masks.to_result()
valid_masks.assert_true(
variable.dtype.type == flag_masks.dtype.type,
"flag_masks ({}) mustbe the same data type as {} ({})"
"".format(flag_masks.dtype.type, name, variable.dtype.type),
)
type_ok = (
np.issubdtype(variable.dtype, np.integer)
or np.issubdtype(variable.dtype, "S")
or np.issubdtype(variable.dtype, "b")
)
valid_masks.assert_true(
type_ok,
"{}'s data type must be capable of bit-field expression".format(name),
)
if isinstance(flag_meanings, str):
flag_meanings = flag_meanings.split()
valid_masks.assert_true(
len(flag_meanings) == len(flag_masks),
"{} flag_meanings and flag_masks should have the same number ".format(
name
)
+ "of elements.",
)
return valid_masks.to_result()
def _check_flag_meanings(self, ds, name):
"""
Check a variable's flag_meanings attribute for compliance under CF
- flag_meanings exists
- flag_meanings is a string
- flag_meanings elements are valid strings
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Variable name
:rtype: compliance_checker.base.Result
"""
variable = ds.variables[name]
flag_meanings = getattr(variable, "flag_meanings", None)
valid_meanings = TestCtx(BaseCheck.HIGH, self.section_titles["3.5"])
valid_meanings.assert_true(
flag_meanings is not None,
"{}'s flag_meanings attribute is required for flag variables".format(name),
)
valid_meanings.assert_true(
isinstance(flag_meanings, str),
"{}'s flag_meanings attribute must be a string".format(name),
)
# We can't perform any additional checks if it's not a string
if not isinstance(flag_meanings, str):
return valid_meanings.to_result()
valid_meanings.assert_true(
len(flag_meanings) > 0, "{}'s flag_meanings can't be empty".format(name)
)
flag_regx = regex.compile(r"^[0-9A-Za-z_\-.+@]+$")
meanings = flag_meanings.split()
for meaning in meanings:
if flag_regx.match(meaning) is None:
valid_meanings.assert_true(
False,
"{}'s flag_meanings attribute defined an illegal flag meaning ".format(
name
)
+ "{}".format(meaning),
)
return valid_meanings.to_result()
###############################################################################
# Chapter 4: Coordinate Types
###############################################################################
def check_coordinate_types(self, ds):
"""
Check the axis attribute of coordinate variables
CF §4 The attribute axis may be attached to a coordinate variable and
given one of the values X, Y, Z or T which stand for a longitude,
latitude, vertical, or time axis respectively. Alternatively the
standard_name attribute may be used for direct identification.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for variable in ds.get_variables_by_attributes(axis=lambda x: x is not None):
name = variable.name
# Coordinate compressions should not be checked as a valid
# coordinate, which they are not. They are a mechanism to project
# an array of indices onto a 2-d grid containing valid coordinates.
if cfutil.is_compression_coordinate(ds, name):
continue
variable = ds.variables[name]
# Even though it's not allowed in CF 1.6, it is allowed in CF 1.7
# and we see people do it, often.
if hasattr(variable, "cf_role"):
continue
# §6.1 allows for labels to be referenced as auxiliary coordinate
# variables, which should not be checked like the rest of the
# coordinates.
if hasattr(variable.dtype, "char") and variable.dtype.char == "S":
continue
elif variable.dtype == str:
continue
axis = getattr(variable, "axis", None)
if axis is not None:
valid_axis = self._check_axis(ds, name)
ret_val.append(valid_axis)
return ret_val
def _check_axis(self, ds, name):
"""
Checks that the axis attribute is a string and an allowed value, namely
one of 'T', 'X', 'Y', or 'Z'.
:param netCDF4.Dataset ds: An open netCDF dataset
:param str name: Name of the variable
:rtype: compliance_checker.base.Result
"""
allowed_axis = ["T", "X", "Y", "Z"]
variable = ds.variables[name]
axis = variable.axis
valid_axis = TestCtx(BaseCheck.HIGH, self.section_titles["4"])
axis_is_string = (isinstance(axis, str),)
valid_axis.assert_true(
axis_is_string and len(axis) > 0,
"{}'s axis attribute must be a non-empty string".format(name),
)
# If axis isn't a string we can't continue any checks
if not axis_is_string or len(axis) == 0:
return valid_axis.to_result()
valid_axis.assert_true(
axis in allowed_axis,
"{}'s axis attribute must be T, X, Y, or Z, ".format(name)
+ "currently {}".format(axis),
)
return valid_axis.to_result()
def check_latitude(self, ds):
"""
Check variable(s) that define latitude and are defined correctly according to CF.
CF §4.1 Variables representing latitude must always explicitly include
the units attribute; there is no default value. The recommended unit
of latitude is degrees_north. Also acceptable are degree_north,
degree_N, degrees_N, degreeN, and degreesN.
Optionally, the latitude type may be indicated additionally by
providing the standard_name attribute with the value latitude, and/or
the axis attribute with the value Y.
- Four checks per latitude variable
- (H) latitude has units attribute
- (M) latitude has an allowed units attribute
- (L) latitude uses degrees_north (if not in rotated pole)
- (M) latitude defines either standard_name or axis
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
allowed_lat_units = [
"degrees_north",
"degree_north",
"degree_n",
"degrees_n",
"degreen",
"degreesn",
]
# Determine the grid mappings in this dataset
grid_mapping = []
grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)
for name in grid_mapping_variables:
variable = ds.variables[name]
grid_mapping_name = getattr(variable, "grid_mapping_name", None)
if grid_mapping_name:
grid_mapping.append(grid_mapping_name)
latitude_variables = cfutil.get_latitude_variables(ds)
for latitude in latitude_variables:
variable = ds.variables[latitude]
units = getattr(variable, "units", None)
units_is_string = isinstance(units, str)
standard_name = getattr(variable, "standard_name", None)
axis = getattr(variable, "axis", None)
# Check that latitude defines units
valid_latitude = TestCtx(BaseCheck.HIGH, self.section_titles["4.1"])
valid_latitude.assert_true(
units is not None,
"latitude variable '{}' must define units".format(latitude),
)
ret_val.append(valid_latitude.to_result())
# Check that latitude uses allowed units
allowed_units = TestCtx(BaseCheck.MEDIUM, self.section_titles["4.1"])
if standard_name == "grid_latitude":
e_n_units = cfutil.VALID_LAT_UNITS | cfutil.VALID_LON_UNITS
# check that the units aren't in east and north degrees units,
# but are convertible to angular units
allowed_units.assert_true(
units not in e_n_units and Unit(units) == Unit("degree"),
"Grid latitude variable '{}' should use degree equivalent units without east or north components. "
"Current units are {}".format(latitude, units),
)
else:
allowed_units.assert_true(
units_is_string and units.lower() in allowed_lat_units,
"latitude variable '{}' should define valid units for latitude"
"".format(latitude),
)
ret_val.append(allowed_units.to_result())
# Check that latitude uses degrees_north
if standard_name == "latitude" and units != "degrees_north":
# This is only a recommendation and we won't penalize but we
# will include a recommended action.
msg = (
"CF recommends latitude variable '{}' to use units degrees_north"
"".format(latitude)
)
recommended_units = Result(
BaseCheck.LOW, (1, 1), self.section_titles["4.1"], [msg]
)
ret_val.append(recommended_units)
y_variables = ds.get_variables_by_attributes(axis="Y")
# Check that latitude defines either standard_name or axis
definition = TestCtx(BaseCheck.MEDIUM, self.section_titles["4.1"])
definition.assert_true(
standard_name == "latitude" or axis == "Y" or y_variables != [],
"latitude variable '{}' should define standard_name='latitude' or axis='Y'"
"".format(latitude),
)
ret_val.append(definition.to_result())
return ret_val
def check_longitude(self, ds):
"""
Check variable(s) that define longitude and are defined correctly according to CF.
CF §4.2 Variables representing longitude must always explicitly include
the units attribute; there is no default value. The recommended unit
of longitude is degrees_east. Also acceptable are degree_east,
degree_E, degrees_E, degreeE, and degreesE.
Optionally, the longitude type may be indicated additionally by
providing the standard_name attribute with the value longitude, and/or
the axis attribute with the value X.
- Four checks per longitude variable
- (H) longitude has units attribute
- (M) longitude has an allowed units attribute
- (L) longitude uses degrees_east (if not in rotated pole)
- (M) longitude defines either standard_name or axis
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
# TODO we already have a check_latitude... I'm sure we can make DRYer
ret_val = []
allowed_lon_units = [
"degrees_east",
"degree_east",
"degree_e",
"degrees_e",
"degreee",
"degreese",
]
# Determine the grid mappings in this dataset
grid_mapping = []
grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)
for name in grid_mapping_variables:
variable = ds.variables[name]
grid_mapping_name = getattr(variable, "grid_mapping_name", None)
if grid_mapping_name:
grid_mapping.append(grid_mapping_name)
longitude_variables = cfutil.get_longitude_variables(ds)
for longitude in longitude_variables:
variable = ds.variables[longitude]
units = getattr(variable, "units", None)
units_is_string = isinstance(units, str)
standard_name = getattr(variable, "standard_name", None)
axis = getattr(variable, "axis", None)
# NOTE see docstring--should below be 4.1 or 4.2?
# Check that longitude defines units
valid_longitude = TestCtx(BaseCheck.HIGH, self.section_titles["4.2"])
valid_longitude.assert_true(
units is not None,
"longitude variable '{}' must define units".format(longitude),
)
ret_val.append(valid_longitude.to_result())
# Check that longitude uses allowed units
allowed_units = TestCtx(BaseCheck.MEDIUM, self.section_titles["4.2"])
if standard_name == "grid_longitude":
e_n_units = cfutil.VALID_LAT_UNITS | cfutil.VALID_LON_UNITS
# check that the units aren't in east and north degrees units,
# but are convertible to angular units
allowed_units.assert_true(
units not in e_n_units and Unit(units) == Unit("degree"),
"Grid longitude variable '{}' should use degree equivalent units without east or north components. "
"Current units are {}".format(longitude, units),
)
else:
allowed_units.assert_true(
units_is_string and units.lower() in allowed_lon_units,
"longitude variable '{}' should define valid units for longitude"
"".format(longitude),
)
ret_val.append(allowed_units.to_result())
# Check that longitude uses degrees_east
if standard_name == "longitude" and units != "degrees_east":
# This is only a recommendation and we won't penalize but we
# will include a recommended action.
msg = (
"CF recommends longitude variable '{}' to use units degrees_east"
"".format(longitude)
)
recommended_units = Result(
BaseCheck.LOW, (1, 1), self.section_titles["4.2"], [msg]
)
ret_val.append(recommended_units)
x_variables = ds.get_variables_by_attributes(axis="X")
# Check that longitude defines either standard_name or axis
definition = TestCtx(BaseCheck.MEDIUM, self.section_titles["4.2"])
definition.assert_true(
standard_name == "longitude" or axis == "X" or x_variables != [],
"longitude variable '{}' should define standard_name='longitude' or axis='X'"
"".format(longitude),
)
ret_val.append(definition.to_result())
return ret_val
def check_dimensional_vertical_coordinate(
self, ds, dimless_vertical_coordinates=dimless_vertical_coordinates_1_6
):
"""
Check units for variables defining vertical position are valid under
CF.
CF §4.3.1 The units attribute for dimensional coordinates will be a string
formatted as per the udunits.dat file.
The acceptable units for vertical (depth or height) coordinate variables
are:
- units of pressure as listed in the file udunits.dat. For vertical axes
the most commonly used of these include include bar, millibar,
decibar, atmosphere (atm), pascal (Pa), and hPa.
- units of length as listed in the file udunits.dat. For vertical axes
the most commonly used of these include meter (metre, m), and
kilometer (km).
- other units listed in the file udunits.dat that may under certain
circumstances reference vertical position such as units of density or
temperature.
Plural forms are also acceptable.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
z_variables = cfutil.get_z_variables(ds)
# dimless_standard_names = [name for name, regx in dimless_vertical_coordinates]
for name in z_variables:
variable = ds.variables[name]
standard_name = getattr(variable, "standard_name", None)
units = getattr(variable, "units", None)
positive = getattr(variable, "positive", None)
# Skip the variable if it's dimensionless
if (
hasattr(variable, "formula_terms")
or standard_name in dimless_vertical_coordinates
):
continue
valid_vertical_coord = TestCtx(BaseCheck.HIGH, self.section_titles["4.3"])
valid_vertical_coord.assert_true(
isinstance(units, str) and units,
"§4.3.1 {}'s units must be defined for vertical coordinates, "
"there is no default".format(name),
)
if not util.units_convertible("bar", units):
valid_vertical_coord.assert_true(
positive in ("up", "down"),
"{}: vertical coordinates not defining pressure must include "
"a positive attribute that is either 'up' or 'down'".format(name),
)
# _check_valid_standard_units, part of the Chapter 3 checks,
# already verifies that this coordinate has valid units
ret_val.append(valid_vertical_coord.to_result())
return ret_val
def _check_dimensionless_vertical_coordinate_1_6(
self, ds, vname, deprecated_units, ret_val, dim_vert_coords_dict
):
"""
Check that a dimensionless vertical coordinate variable is valid under
CF-1.6.
:param netCDF4.Dataset ds: open netCDF4 dataset
:param str name: variable name
:param list ret_val: array to append Results to
:rtype None
"""
variable = ds.variables[vname]
standard_name = getattr(variable, "standard_name", None)
units = getattr(variable, "units", None)
formula_terms = getattr(variable, "formula_terms", None)
# Skip the variable if it's dimensional
if formula_terms is None and standard_name not in dim_vert_coords_dict:
return
is_not_deprecated = TestCtx(BaseCheck.LOW, self.section_titles["4.3"])
is_not_deprecated.assert_true(
units not in deprecated_units,
"§4.3.2: units are deprecated by CF in variable {}: {}"
"".format(vname, units),
)
# check the vertical coordinates
ret_val.append(is_not_deprecated.to_result())
ret_val.append(self._check_formula_terms(ds, vname, dim_vert_coords_dict))
def check_dimensionless_vertical_coordinates(self, ds):
"""
Check the validity of dimensionless coordinates under CF
CF §4.3.2 The units attribute is not required for dimensionless
coordinates.
The standard_name attribute associates a coordinate with its definition
from Appendix D, Dimensionless Vertical Coordinates. The definition
provides a mapping between the dimensionless coordinate values and
dimensional values that can positively and uniquely indicate the
location of the data.
A new attribute, formula_terms, is used to associate terms in the
definitions with variables in a netCDF file. To maintain backwards
compatibility with COARDS the use of these attributes is not required,
but is strongly recommended.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
z_variables = cfutil.get_z_variables(ds)
deprecated_units = ["level", "layer", "sigma_level"]
ret_val.extend(
self._check_dimensionless_vertical_coordinates(
ds,
deprecated_units,
self._check_dimensionless_vertical_coordinate_1_6,
dimless_vertical_coordinates_1_6,
)
)
return ret_val
def check_time_coordinate(self, ds):
"""
Check variables defining time are valid under CF
CF §4.4 Variables representing time must always explicitly include the
units attribute; there is no default value.
The units attribute takes a string value formatted as per the
recommendations in the Udunits package.
The acceptable units for time are listed in the udunits.dat file. The
most commonly used of these strings (and their abbreviations) includes
day (d), hour (hr, h), minute (min) and second (sec, s). Plural forms
are also acceptable. The reference time string (appearing after the
identifier since) may include date alone; date and time; or date, time,
and time zone. The reference time is required. A reference time in year
0 has a special meaning (see Section 7.4, "Climatological Statistics").
Recommend that the unit year be used with caution. It is not a calendar
year. For similar reasons the unit month should also be used with
caution.
A time coordinate is identifiable from its units string alone.
Optionally, the time coordinate may be indicated additionally by
providing the standard_name attribute with an appropriate value, and/or
the axis attribute with the value T.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for name in cfutil.get_time_variables(ds):
variable = ds.variables[name]
# Has units
has_units = hasattr(variable, "units")
if not has_units:
result = Result(
BaseCheck.HIGH,
False,
self.section_titles["4.4"],
["%s does not have units" % name],
)
ret_val.append(result)
continue
# Correct and identifiable units
result = Result(BaseCheck.HIGH, True, self.section_titles["4.4"])
ret_val.append(result)
correct_units = util.units_temporal(variable.units)
reasoning = None
if not correct_units:
reasoning = ["%s does not have correct time units" % name]
result = Result(
BaseCheck.HIGH, correct_units, self.section_titles["4.4"], reasoning
)
ret_val.append(result)
return ret_val
def check_calendar(self, ds):
"""
Check the calendar attribute for variables defining time and ensure it
is a valid calendar prescribed by CF.
CF §4.4.1 In order to calculate a new date and time given a base date, base
time and a time increment one must know what calendar to use.
The values currently defined for calendar are:
- gregorian or standard
- proleptic_gregorian
- noleap or 365_day
- all_leap or 366_day
- 360_day
- julian
- none
The calendar attribute may be set to none in climate experiments that
simulate a fixed time of year.
The time of year is indicated by the date in the reference time of the
units attribute.
If none of the calendars defined above applies, a non-standard calendar
can be defined. The lengths of each month are explicitly defined with
the month_lengths attribute of the time axis.
If leap years are included, then two other attributes of the time axis
should also be defined:
leap_year, leap_month
The calendar attribute is not required when a non-standard calendar is
being used. It is sufficient to define the calendar using the
month_lengths attribute, along with leap_year, and leap_month as
appropriate. However, the calendar attribute is allowed to take
non-standard values and in that case defining the non-standard calendar
using the appropriate attributes is required.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
valid_calendars = [
"gregorian",
"standard",
"proleptic_gregorian",
"noleap",
"365_day",
"all_leap",
"366_day",
"360_day",
"julian",
"none",
]
ret_val = []
# if has a calendar, check that it is within the valid values
# otherwise no calendar is valid
for time_var in ds.get_variables_by_attributes(
calendar=lambda c: c is not None
):
reasoning = None
valid_calendar = time_var.calendar in valid_calendars
if not valid_calendar:
reasoning = [
"§4.4.1 Variable %s should have a valid calendar: '%s' is not a valid calendar"
% (time_var.name, time_var.calendar)
]
# passes if the calendar is valid, otherwise notify of invalid
# calendar
result = Result(
BaseCheck.LOW, valid_calendar, self.section_titles["4.4"], reasoning
)
ret_val.append(result)
return ret_val
###############################################################################
# Chapter 5: Coordinate Systems
###############################################################################
def check_aux_coordinates(self, ds):
"""
Chapter 5 paragraph 3
The dimensions of an auxiliary coordinate variable must be a subset of
the dimensions of the variable with which the coordinate is associated,
with two exceptions. First, string-valued coordinates (Section 6.1,
"Labels") have a dimension for maximum string length. Second, in the
ragged array representations of data (Chapter 9, Discrete Sampling
Geometries), special methods are needed to connect the data and
coordinates.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
# for contiguous ragged array/indexed ragged array representations,
# coordinates are not required to adhere to the same principles;
# these representaitions can be identified by two attributes:
# required for contiguous
count_vars = ds.get_variables_by_attributes(
sample_dimension=lambda x: x is not None
)
# required for indexed
index_vars = ds.get_variables_by_attributes(
instance_dimension=lambda x: x is not None
)
# if these attributes exist, we don't need to test
# the coordinates
if count_vars or index_vars:
return ret_val
geophysical_variables = self._find_geophysical_vars(ds)
for name in geophysical_variables:
variable = ds.variables[name]
coordinates = getattr(variable, "coordinates", None)
# We use a set so we can assert
dim_set = set(variable.dimensions)
# No auxiliary coordinates, no check
if not isinstance(coordinates, str) or coordinates == "":
continue
valid_aux_coords = TestCtx(BaseCheck.HIGH, self.section_titles["5"])
for aux_coord in coordinates.split():
valid_aux_coords.assert_true(
aux_coord in ds.variables,
"{}'s auxiliary coordinate specified by the coordinates attribute, {}, "
"is not a variable in this dataset"
"".format(name, aux_coord),
)
if aux_coord not in ds.variables:
continue
# §6.1 Allows for "labels" to be referenced as coordinates
if (
hasattr(ds.variables[aux_coord].dtype, "char")
and ds.variables[aux_coord].dtype.char == "S"
):
continue
elif ds.variables[aux_coord].dtype == str:
continue
aux_coord_dims = set(ds.variables[aux_coord].dimensions)
valid_aux_coords.assert_true(
aux_coord_dims.issubset(dim_set),
"dimensions for auxiliary coordinate variable {} ({}) "
"are not a subset of dimensions for variable {} ({})"
"".format(
aux_coord, ", ".join(aux_coord_dims), name, ", ".join(dim_set)
),
)
ret_val.append(valid_aux_coords.to_result())
return ret_val
def check_duplicate_axis(self, ds):
"""
Checks that no variable contains two coordinates defining the same
axis.
Chapter 5 paragraph 6
If an axis attribute is attached to an auxiliary coordinate variable,
it can be used by applications in the same way the `axis` attribute
attached to a coordinate variable is used. However, it is not
permissible for a [geophysical variable] to have both a coordinate
variable and an auxiliary coordinate variable, or more than one of
either type of variable, having an `axis` attribute with any given
value e.g. there must be no more than one axis attribute for X for any
[geophysical variable].
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
:return: List of results
"""
ret_val = []
geophysical_variables = self._find_geophysical_vars(ds)
for name in geophysical_variables:
no_duplicates = TestCtx(BaseCheck.HIGH, self.section_titles["5"])
axis_map = cfutil.get_axis_map(ds, name)
axes = []
# For every coordinate associated with this variable, keep track of
# which coordinates define an axis and assert that there are no
# duplicate axis attributes defined in the set of associated
# coordinates. axis_map includes coordinates that don't actually have
# an axis attribute, so we need to ignore those here.
for axis, coords in axis_map.items():
coords = [c for c in coords if hasattr(ds.variables[c], "axis")]
no_duplicates.assert_true(
len(coords) <= 1,
"'{}' has duplicate axis {} defined by [{}]".format(
name, axis, ", ".join(sorted(coords))
),
)
ret_val.append(no_duplicates.to_result())
return ret_val
def check_multi_dimensional_coords(self, ds):
"""
Checks that no multidimensional coordinate shares a name with its
dimensions.
Chapter 5 paragraph 4
We recommend that the name of a [multidimensional coordinate] should
not match the name of any of its dimensions.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
# This can only apply to auxiliary coordinate variables
for coord in self._find_aux_coord_vars(ds):
variable = ds.variables[coord]
if variable.ndim < 2:
continue
not_matching = TestCtx(BaseCheck.MEDIUM, self.section_titles["5"])
not_matching.assert_true(
coord not in variable.dimensions,
"{} shares the same name as one of its dimensions" "".format(coord),
)
ret_val.append(not_matching.to_result())
return ret_val
# NOTE **********
# IS THIS EVEN NEEDED ANYMORE?
# ***************
def check_grid_coordinates(self, ds):
# def _check_grid_coordinates(self, ds):
"""
5.6 When the coordinate variables for a horizontal grid are not
longitude and latitude, it is required that the true latitude and
longitude coordinates be supplied via the coordinates attribute.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
latitudes = cfutil.get_true_latitude_variables(ds)
longitudes = cfutil.get_true_longitude_variables(ds)
check_featues = [
"2d-regular-grid",
"2d-static-grid",
"3d-regular-grid",
"3d-static-grid",
"mapped-grid",
"reduced-grid",
]
# This one is tricky because there's a very subtle difference between
# latitude as defined in Chapter 4 and "true" latitude as defined in
# chapter 5.
# For each geophysical variable that defines a grid, assert it is
# associated with a true latitude or longitude coordinate.
for variable in self._find_geophysical_vars(ds):
# We use a set so we can do set-wise comparisons with coordinate
# dimensions
dimensions = set(ds.variables[variable].dimensions)
# If it's not a grid, skip it
if cfutil.guess_feature_type(ds, variable) not in check_featues:
continue
has_coords = TestCtx(BaseCheck.HIGH, self.section_titles["5.6"])
# axis_map is a defaultdict(list) mapping the axis to a list of
# coordinate names. For example:
# {'X': ['lon'], 'Y':['lat'], 'Z':['lev']}
# The mapping comes from the dimensions of the variable and the
# contents of the `coordinates` attribute only.
axis_map = cfutil.get_axis_map(ds, variable)
msg = (
'{}\'s coordinate variable "{}" is not one of the variables identifying true '
+ "latitude/longitude and its dimensions are not a subset of {}'s dimensions"
)
alt = (
"{} has no coordinate associated with a variable identified as true latitude/longitude; "
+ "its coordinate variable should also share a subset of {}'s dimensions"
)
# Make sure we can find latitude and its dimensions are a subset
_lat = None
found_lat = False
for lat in axis_map["Y"]:
_lat = lat
is_subset_dims = set(ds.variables[lat].dimensions).issubset(dimensions)
if is_subset_dims and lat in latitudes:
found_lat = True
break
if _lat:
has_coords.assert_true(found_lat, msg.format(variable, _lat, variable))
else:
has_coords.assert_true(found_lat, alt.format(variable, variable))
# Make sure we can find longitude and its dimensions are a subset
_lon = None
found_lon = False
for lon in axis_map["X"]:
_lon = lon
is_subset_dims = set(ds.variables[lon].dimensions).issubset(dimensions)
if is_subset_dims and lon in longitudes:
found_lon = True
break
if _lon:
has_coords.assert_true(found_lon, msg.format(variable, _lon, variable))
else:
has_coords.assert_true(found_lon, alt.format(variable, variable))
ret_val.append(has_coords.to_result())
return ret_val
def check_reduced_horizontal_grid(self, ds):
"""
5.3 A "reduced" longitude-latitude grid is one in which the points are
arranged along constant latitude lines with the number of points on a
latitude line decreasing toward the poles.
Recommend that this type of gridded data be stored using the compression
scheme described in Section 8.2, "Compression by Gathering". The
compressed latitude and longitude auxiliary coordinate variables are
identified by the coordinates attribute.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
# Create a set of coordinate variables defining `compress`
lats = set(cfutil.get_latitude_variables(ds))
lons = set(cfutil.get_longitude_variables(ds))
for name in self._find_geophysical_vars(ds):
coords = getattr(ds.variables[name], "coordinates", None)
axis_map = cfutil.get_axis_map(ds, name)
# If this variable has no coordinate that defines compression
if "C" not in axis_map:
continue
valid_rgrid = TestCtx(BaseCheck.HIGH, self.section_titles["5.3"])
# Make sure reduced grid features define coordinates
valid_rgrid.assert_true(
isinstance(coords, str) and coords,
"reduced grid feature {} must define coordinates attribute"
"".format(name),
)
# We can't check anything else if there are no defined coordinates
if not isinstance(coords, str) and coords:
continue
coord_set = set(coords.split())
# Make sure it's associated with valid lat and valid lon
valid_rgrid.assert_true(
len(coord_set.intersection(lons)) > 0,
"{} must be associated with a valid longitude coordinate".format(name),
)
valid_rgrid.assert_true(
len(coord_set.intersection(lats)) > 0,
"{} must be associated with a valid latitude coordinate".format(name),
)
valid_rgrid.assert_true(
len(axis_map["C"]) == 1,
"{} can not be associated with more than one compressed coordinates: "
"({})".format(name, ", ".join(axis_map["C"])),
)
for compressed_coord in axis_map["C"]:
coord = ds.variables[compressed_coord]
compress = getattr(coord, "compress", None)
valid_rgrid.assert_true(
isinstance(compress, str) and compress,
"compress attribute for compression coordinate {} must be a non-empty string"
"".format(compressed_coord),
)
if not isinstance(compress, str):
continue
for dim in compress.split():
valid_rgrid.assert_true(
dim in ds.dimensions,
"dimension {} referenced by {}:compress must exist"
"".format(dim, compressed_coord),
)
ret_val.append(valid_rgrid.to_result())
return ret_val
def _check_grid_mapping_attr_condition(self, attr, attr_name):
"""
Evaluate a condition (or series of conditions) for a particular
attribute. Implementation for CF-1.6.
:param attr: attribute to teset condition for
:param str attr_name: name of the attribute
:rtype tuple
:return two-tuple of (bool, str)
"""
if attr_name == "latitude_of_projection_origin":
return self._evaluate_latitude_of_projection_origin(attr)
elif attr_name == "longitude_of_projection_origin":
return self._evaluate_longitude_of_projection_origin(attr)
elif attr_name == "longitude_of_central_meridian":
return self._evaluate_longitude_of_central_meridian(attr)
elif attr_name == "longitude_of_prime_meridian":
return self._evaluate_longitude_of_prime_meridian(attr)
elif attr_name == "scale_factor_at_central_meridian":
return self._evaluate_scale_factor_at_central_meridian(attr)
elif attr_name == "scale_factor_at_projection_origin":
return self._evaluate_scale_factor_at_projection_origin(attr)
elif attr_name == "standard_parallel":
return self._evaluate_standard_parallel(attr)
elif attr_name == "straight_vertical_longitude_from_pole":
return self._evaluate_straight_vertical_longitude_from_pole(attr)
else:
raise NotImplementedError(
"Evaluation for {} not yet implemented".format(attr_name)
)
def _evaluate_latitude_of_projection_origin(self, val):
"""
Evaluate the condition for `latitude_of_projection_origin` attribute.
Return result. Value must be -90 <= x <= 90.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -90.0) and (val <= 90.0),
"latitude_of_projection_origin must satisfy (-90 <= x <= 90)",
)
def _evaluate_longitude_of_projection_origin(self, val):
"""
Evaluate the condition for `longitude_of_projection_origin` attribute.
Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -180.0) and (val <= 180.0),
"longitude_of_projection_origin must satisfy (-180 <= x <= 180)",
)
def _evaluate_longitude_of_central_meridian(self, val):
"""
Evaluate the condition for `longitude_of_central_meridian` attribute.
Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -180.0) and (val <= 180.0),
"longitude_of_central_meridian must satisfy (-180 <= x <= 180)",
)
def _evaluate_longitude_of_prime_meridian(self, val):
"""
Evaluate the condition for `longitude_of_prime_meridian` attribute.
Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -180.0) and (val <= 180.0),
"longitude_of_prime_meridian must satisfy (-180 <= x <= 180)",
)
def _evaluate_scale_factor_at_central_meridian(self, val):
"""
Evaluate the condition for `scale_factor_at_central_meridian` attribute.
Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (val > 0.0, "scale_factor_at_central_meridian must be > 0.0")
def _evaluate_scale_factor_at_projection_origin(self, val):
"""
Evaluate the condition for `scale_factor_at_projection_origin` attribute.
Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (val > 0.0, "scale_factor_at_projection_origin must be > 0.0")
def _evaluate_standard_parallel(self, val):
"""
Evaluate the condition for `standard_parallel` attribute. Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -90.0) and (val <= 90),
"standard_parallel must satisfy (-90 <= x <= 90)",
)
def _evaluate_straight_vertical_longitude_from_pole(self, val):
"""
Evaluate the condition for `straight_vertical_longitude_from_pole`
attribute. Return result.
:param val: value to be tested
:rtype tuple
:return two-tuple (bool, msg)
"""
return (
(val >= -180.0) and (val <= 180),
"straight_vertical_longitude_from_pole must satisfy (-180 <= x <= 180)",
)
###############################################################################
# Chapter 6: Labels and Alternative Coordinates
###############################################################################
def check_geographic_region(self, ds):
"""
6.1.1 When data is representative of geographic regions which can be identified by names but which have complex
boundaries that cannot practically be specified using longitude and latitude boundary coordinates, a labeled
axis should be used to identify the regions.
Recommend that the names be chosen from the list of standardized region names whenever possible. To indicate
that the label values are standardized the variable that contains the labels must be given the standard_name
attribute with the value region.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
region_list = (
[ # TODO maybe move this (and other info like it) into a config file?
"africa",
"antarctica",
"arabian_sea",
"aral_sea",
"arctic_ocean",
"asia",
"atlantic_ocean",
"australia",
"baltic_sea",
"barents_opening",
"barents_sea",
"beaufort_sea",
"bellingshausen_sea",
"bering_sea",
"bering_strait",
"black_sea",
"canadian_archipelago",
"caribbean_sea",
"caspian_sea",
"central_america",
"chukchi_sea",
"contiguous_united_states",
"denmark_strait",
"drake_passage",
"east_china_sea",
"english_channel",
"eurasia",
"europe",
"faroe_scotland_channel",
"florida_bahamas_strait",
"fram_strait",
"global",
"global_land",
"global_ocean",
"great_lakes",
"greenland",
"gulf_of_alaska",
"gulf_of_mexico",
"hudson_bay",
"iceland_faroe_channel",
"indian_ocean",
"indonesian_throughflow",
"indo_pacific_ocean",
"irish_sea",
"lake_baykal",
"lake_chad",
"lake_malawi",
"lake_tanganyika",
"lake_victoria",
"mediterranean_sea",
"mozambique_channel",
"north_america",
"north_sea",
"norwegian_sea",
"pacific_equatorial_undercurrent",
"pacific_ocean",
"persian_gulf",
"red_sea",
"ross_sea",
"sea_of_japan",
"sea_of_okhotsk",
"south_america",
"south_china_sea",
"southern_ocean",
"taiwan_luzon_straits",
"weddell_sea",
"windward_passage",
"yellow_sea",
]
)
for var in ds.get_variables_by_attributes(standard_name="region"):
valid_region = TestCtx(BaseCheck.MEDIUM, self.section_titles["6.1"])
region = var[:]
if np.ma.isMA(region):
region = region.data
valid_region.assert_true(
"".join(region.astype(str)).lower() in region_list,
"6.1.1 '{}' specified by '{}' is not a valid region".format(
"".join(region.astype(str)), var.name
),
)
ret_val.append(valid_region.to_result())
return ret_val
###############################################################################
# Chapter 7: Data Representative of Cells
###############################################################################
def check_cell_boundaries(self, ds):
"""
Checks the dimensions of cell boundary variables to ensure they are CF compliant.
7.1 To represent cells we add the attribute bounds to the appropriate coordinate variable(s). The value of bounds
is the name of the variable that contains the vertices of the cell boundaries. We refer to this type of variable as
a "boundary variable." A boundary variable will have one more dimension than its associated coordinate or auxiliary
coordinate variable. The additional dimension should be the most rapidly varying one, and its size is the maximum
number of cell vertices.
Applications that process cell boundary data often times need to determine whether or not adjacent cells share an
edge. In order to facilitate this type of processing the following restrictions are placed on the data in boundary
variables:
Bounds for 1-D coordinate variables
For a coordinate variable such as lat(lat) with associated boundary variable latbnd(x,2), the interval endpoints
must be ordered consistently with the associated coordinate, e.g., for an increasing coordinate, lat(1) > lat(0)
implies latbnd(i,1) >= latbnd(i,0) for all i
If adjacent intervals are contiguous, the shared endpoint must be represented identically in each instance where
it occurs in the boundary variable. For example, if the intervals that contain grid points lat(i) and lat(i+1) are
contiguous, then latbnd(i+1,0) = latbnd(i,1).
Bounds for 2-D coordinate variables with 4-sided cells
In the case where the horizontal grid is described by two-dimensional auxiliary coordinate variables in latitude
lat(n,m) and longitude lon(n,m), and the associated cells are four-sided, then the boundary variables are given
in the form latbnd(n,m,4) and lonbnd(n,m,4), where the trailing index runs over the four vertices of the cells.
Bounds for multi-dimensional coordinate variables with p-sided cells
In all other cases, the bounds should be dimensioned (...,n,p), where (...,n) are the dimensions of the auxiliary
coordinate variables, and p the number of vertices of the cells. The vertices must be traversed anticlockwise in the
lon-lat plane as viewed from above. The starting vertex is not specified.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
# Note that test does not check monotonicity
ret_val = []
reasoning = []
for variable_name, boundary_variable_name in cfutil.get_cell_boundary_map(
ds
).items():
variable = ds.variables[variable_name]
valid = True
reasoning = []
if boundary_variable_name not in ds.variables:
valid = False
reasoning.append(
"Boundary variable {} referenced by {} not ".format(
boundary_variable_name, variable.name
)
+ "found in dataset variables"
)
else:
boundary_variable = ds.variables[boundary_variable_name]
# The number of dimensions in the bounds variable should always be
# the number of dimensions in the referring variable + 1
if boundary_variable.ndim < 2:
valid = False
reasoning.append(
"Boundary variable {} specified by {}".format(
boundary_variable.name, variable.name
)
+ " should have at least two dimensions to enclose the base "
+ "case of a one dimensionsal variable"
)
if boundary_variable.ndim != variable.ndim + 1:
valid = False
reasoning.append(
"The number of dimensions of the variable %s is %s, but the "
"number of dimensions of the boundary variable %s is %s. The boundary variable "
"should have %s dimensions"
% (
variable.name,
variable.ndim,
boundary_variable.name,
boundary_variable.ndim,
variable.ndim + 1,
)
)
if variable.dimensions[:] != boundary_variable.dimensions[: variable.ndim]:
valid = False
reasoning.append(
"Boundary variable coordinates (for {}) are in improper order: {}. Bounds-specific dimensions should be last"
"".format(variable.name, boundary_variable.dimensions)
)
# ensure p vertices form a valid simplex given previous a...n
# previous auxiliary coordinates
if (
ds.dimensions[boundary_variable.dimensions[-1]].size
< len(boundary_variable.dimensions[:-1]) + 1
):
valid = False
reasoning.append(
"Dimension {} of boundary variable (for {}) must have at least {} elements to form a simplex/closed cell with previous dimensions {}.".format(
boundary_variable.name,
variable.name,
len(variable.dimensions) + 1,
boundary_variable.dimensions[:-1],
)
)
result = Result(
BaseCheck.MEDIUM, valid, self.section_titles["7.1"], reasoning
)
ret_val.append(result)
return ret_val
def check_cell_measures(self, ds):
"""
7.2 To indicate extra information about the spatial properties of a
variable's grid cells, a cell_measures attribute may be defined for a
variable. This is a string attribute comprising a list of
blank-separated pairs of words of the form "measure: name". "area" and
"volume" are the only defined measures.
The "name" is the name of the variable containing the measure values,
which we refer to as a "measure variable". The dimensions of the
measure variable should be the same as or a subset of the dimensions of
the variable to which they are related, but their order is not
restricted.
The variable must have a units attribute and may have other attributes
such as a standard_name.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
reasoning = []
variables = ds.get_variables_by_attributes(
cell_measures=lambda c: c is not None
)
for var in variables:
search_str = r"^(?:area|volume): (\w+)$"
search_res = regex.search(search_str, var.cell_measures)
if not search_res:
valid = False
reasoning.append(
"The cell_measures attribute for variable {} "
"is formatted incorrectly. It should take the"
" form of either 'area: cell_var' or "
"'volume: cell_var' where cell_var is the "
"variable describing the cell measures".format(var.name)
)
else:
valid = True
cell_meas_var_name = search_res.groups()[0]
# TODO: cache previous results
if cell_meas_var_name not in ds.variables:
valid = False
reasoning.append(
"Cell measure variable {} referred to by "
"{} is not present in dataset variables".format(
cell_meas_var_name, var.name
)
)
else:
cell_meas_var = ds.variables[cell_meas_var_name]
if not hasattr(cell_meas_var, "units"):
valid = False
reasoning.append(
"Cell measure variable {} is required "
"to have units attribute defined.".format(
cell_meas_var_name
)
)
if not set(cell_meas_var.dimensions).issubset(var.dimensions):
valid = False
reasoning.append(
"Cell measure variable {} must have "
"dimensions which are a subset of "
"those defined in variable {}.".format(
cell_meas_var_name, var.name
)
)
result = Result(
BaseCheck.MEDIUM, valid, (self.section_titles["7.2"]), reasoning
)
ret_val.append(result)
return ret_val
def check_cell_methods(self, ds):
"""
7.3 To describe the characteristic of a field that is represented by cell values, we define the cell_methods attribute
of the variable. This is a string attribute comprising a list of blank-separated words of the form "name: method". Each
"name: method" pair indicates that for an axis identified by name, the cell values representing the field have been
determined or derived by the specified method.
name can be a dimension of the variable, a scalar coordinate variable, a valid standard name, or the word "area"
values of method should be selected from the list in Appendix E, Cell Methods, which includes point, sum, mean, maximum,
minimum, mid_range, standard_deviation, variance, mode, and median. Case is not significant in the method name. Some
methods (e.g., variance) imply a change of units of the variable, as is indicated in Appendix E, Cell Methods.
Because the default interpretation for an intensive quantity differs from that of an extensive quantity and because this
distinction may not be understood by some users of the data, it is recommended that every data variable include for each
of its dimensions and each of its scalar coordinate variables the cell_methods information of interest (unless this
information would not be meaningful). It is especially recommended that cell_methods be explicitly specified for each
spatio-temporal dimension and each spatio-temporal scalar coordinate variable.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
psep = regex.compile(
r"(?P<vars>\w+: )+(?P<method>\w+) ?(?P<where>where (?P<wtypevar>\w+) "
r"?(?P<over>over (?P<otypevar>\w+))?| ?)(?:\((?P<paren_contents>[^)]*)\))?"
)
for var in ds.get_variables_by_attributes(cell_methods=lambda x: x is not None):
if not getattr(var, "cell_methods", ""):
continue
method = getattr(var, "cell_methods", "")
valid_attribute = TestCtx(
BaseCheck.HIGH, self.section_titles["7.3"]
) # changed from 7.1 to 7.3
valid_attribute.assert_true(
regex.match(psep, method) is not None,
'"{}" is not a valid format for cell_methods attribute of "{}"'
"".format(method, var.name),
)
ret_val.append(valid_attribute.to_result())
valid_cell_names = TestCtx(BaseCheck.MEDIUM, self.section_titles["7.3"])
# check that the name is valid
for match in regex.finditer(psep, method):
# it is possible to have "var1: var2: ... varn: ...", so handle
# that case
for var_raw_str in match.captures("vars"):
# strip off the ' :' at the end of each match
var_str = var_raw_str[:-2]
if (
var_str in var.dimensions
or var_str == "area"
or var_str in getattr(var, "coordinates", "")
):
valid = True
else:
valid = False
valid_cell_names.assert_true(
valid,
"{}'s cell_methods name component {} does not match a dimension, "
"area or auxiliary coordinate".format(var.name, var_str),
)
ret_val.append(valid_cell_names.to_result())
# Checks if the method value of the 'name: method' pair is acceptable
valid_cell_methods = TestCtx(BaseCheck.MEDIUM, self.section_titles["7.3"])
for match in regex.finditer(psep, method):
# CF section 7.3 - "Case is not significant in the method name."
valid_cell_methods.assert_true(
match.group("method").lower() in self.cell_methods,
"{}:cell_methods contains an invalid method: {}"
"".format(var.name, match.group("method")),
)
ret_val.append(valid_cell_methods.to_result())
for match in regex.finditer(psep, method):
if match.group("paren_contents") is not None:
# split along spaces followed by words with a colon
# not sure what to do if a comment contains a colon!
ret_val.append(
self._check_cell_methods_paren_info(
match.group("paren_contents"), var
).to_result()
)
return ret_val
def _check_cell_methods_paren_info(self, paren_contents, var):
"""
Checks that the spacing and/or comment info contained inside the
parentheses in cell_methods is well-formed
"""
valid_info = TestCtx(BaseCheck.MEDIUM, self.section_titles["7.3"])
# if there are no colons, this is a simple comment
# TODO: are empty comments considered valid?
if ":" not in paren_contents:
valid_info.out_of += 1
valid_info.score += 1
return valid_info
# otherwise, split into k/v pairs
kv_pair_pat = r"(\S+:)\s+(.*(?=\s+\w+:)|[^:]+$)\s*"
# otherwise, we must split further with intervals coming
# first, followed by non-standard comments
# we need the count of the matches, and re.findall() only returns
# groups if they are present and we wish to see if the entire match
# object concatenated together is the same as the original string
pmatches = [m for m in regex.finditer(kv_pair_pat, paren_contents)]
for i, pmatch in enumerate(pmatches):
keyword, val = pmatch.groups()
if keyword == "interval:":
valid_info.out_of += 2
interval_matches = regex.match(
r"^\s*(?P<interval_number>\S+)\s+(?P<interval_units>\S+)\s*$", val
)
# attempt to get the number for the interval
if not interval_matches:
valid_info.messages.append(
'§7.3.3 {}:cell_methods contains an interval specification that does not parse: "{}". Should be in format "interval: <number> <units>"'.format(
var.name, val
)
)
else:
try:
float(interval_matches.group("interval_number"))
except ValueError:
valid_info.messages.append(
'§7.3.3 {}:cell_methods contains an interval value that does not parse as a numeric value: "{}".'.format(
var.name, interval_matches.group("interval_number")
)
)
else:
valid_info.score += 1
# then the units
try:
Unit(interval_matches.group("interval_units"))
except ValueError:
valid_info.messages.append(
'§7.3.3 {}:cell_methods interval units "{}" is not parsable by UDUNITS.'.format(
var.name, interval_matches.group("interval_units")
)
)
else:
valid_info.score += 1
elif keyword == "comment:":
# comments can't really be invalid, except
# if they come first or aren't last, and
# maybe if they contain colons embedded in the
# comment string
valid_info.out_of += 1
if len(pmatches) == 1:
valid_info.messages.append(
"§7.3.3 If there is no standardized information, the keyword comment: should be omitted for variable {}".format(
var.name
)
)
# otherwise check that the comment is the last
# item in the parentheses
elif i != len(pmatches) - 1:
valid_info.messages.append(
'§7.3.3 The non-standard "comment:" element must come after any standard elements in cell_methods for variable {}'.format(
var.name
)
)
#
else:
valid_info.score += 1
else:
valid_info.out_of += 1
valid_info.messages.append(
'§7.3.3 Invalid cell_methods keyword "{}" for variable {}. Must be one of [interval, comment]'.format(
keyword, var.name
)
)
# Ensure concatenated reconstructed matches are the same as the
# original string. If they're not, there's likely a formatting error
valid_info.assert_true(
"".join(m.group(0) for m in pmatches) == paren_contents,
"§7.3.3 Parenthetical content inside {}:cell_methods is not well formed: {}".format(
var.name, paren_contents
),
)
return valid_info
def check_climatological_statistics(self, ds):
"""
7.4 A climatological time coordinate variable does not have a bounds attribute. Instead, it has a climatology
attribute, which names a variable with dimensions (n,2), n being the dimension of the climatological time axis.
Using the units and calendar of the time coordinate variable, element (i,0) of the climatology variable specifies
the beginning of the first subinterval and element (i,1) the end of the last subinterval used to evaluate the
climatological statistics with index i in the time dimension. The time coordinates should be values that are
representative of the climatological time intervals, such that an application which does not recognise climatological
time will nonetheless be able to make a reasonable interpretation.
A climatological axis may use different statistical methods to measure variation among years, within years, and within
days. The methods which can be specified are those listed in Appendix E, Cell Methods and each entry in the cell_methods
attribute may also contain non-standardised information in parentheses after the method. The value of the cell_method
attribute must be in one of the following forms:
- time: method1 within years time: method2 over years
- time: method1 within days time: method2 over days
- time: method1 within days time: method2 over days time: method3 over years
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
reasoning = []
ret_val = []
total_climate_count = 0
valid_climate_count = 0
all_clim_coord_var_names = []
methods = [
"point", # TODO change to appendix import once cf1.7 merged
"sum",
"mean",
"maximum",
"minimum",
"mid_range",
"standard_deviation",
"variance",
"mode",
"median",
]
# find any climatology axies variables; any variables which contain climatological stats will use
# these variables as coordinates
clim_time_coord_vars = ds.get_variables_by_attributes(
climatology=lambda s: s is not None
)
# first, to determine whether or not we have a valid climatological time
# coordinate variable, we need to make sure it has the attribute "climatology",
# but not the attribute "bounds"
for clim_coord_var in clim_time_coord_vars:
if hasattr(clim_coord_var, "bounds"):
reasoning.append(
"Variable {} has a climatology attribute and cannot also have a bounds attribute.".format(
clim_coord_var.name
)
)
result = Result(
BaseCheck.MEDIUM, False, (self.section_titles["7.4"]), reasoning
)
ret_val.append(result)
return ret_val
# make sure the climatology variable referenced actually exists
elif clim_coord_var.climatology not in ds.variables:
reasoning.append(
"Variable {} referenced in time's climatology attribute does not exist".format(
ds.variables["time"].climatology
)
)
result = Result(
BaseCheck.MEDIUM, False, (self.section_titles["7.4"]), reasoning
)
ret_val.append(result)
return ret_val
# check that coordinate bounds are in the proper order.
# make sure last elements are boundary variable specific dimensions
if (
clim_coord_var.dimensions[:]
!= ds.variables[clim_coord_var.climatology].dimensions[
: clim_coord_var.ndim
]
):
reasoning.append(
"Climatology variable coordinates are in improper order: {}. Bounds-specific dimensions should be last".format(
ds.variables[clim_coord_var.climatology].dimensions
)
)
return ret_val
elif (
ds.dimensions[
ds.variables[clim_coord_var.climatology].dimensions[-1]
].size
!= 2
):
reasoning.append(
"Climatology dimension {} should only contain two elements".format(
boundary_variable.dimensions
)
)
# passed all these checks, so we can add this clim_coord_var to our total list
all_clim_coord_var_names.append(clim_coord_var.name)
# for any variables which use a climatology time coordinate variable as a coordinate,
# if they have a cell_methods attribute, it must comply with the form:
# time: method1 within years time: method2 over years
# time: method1 within days time: method2 over days
# time: method1 within days time: method2 over days time: method3 over years
# optionally followed by parentheses for explaining additional
# info, e.g.
# "time: method1 within years time: method2 over years (sidereal years)"
meth_regex = "(?:{})".format(
"|".join(methods)
) # "or" comparison for the methods
re_string = (
r"^time: {0} within (years|days)" # regex string to test
r" time: {0} over \1(?<=days)(?: time: {0} over years)?"
r"(?: \([^)]+\))?$".format(meth_regex)
)
# find any variables with a valid climatological cell_methods
for cell_method_var in ds.get_variables_by_attributes(
cell_methods=lambda s: s is not None
):
if any(
[dim in all_clim_coord_var_names for dim in cell_method_var.dimensions]
):
total_climate_count += 1
if not regex.search(re_string, cell_method_var.cell_methods):
reasoning.append(
'The "time: method within years/days over years/days" format is not correct in variable {}.'.format(
cell_method_var.name
)
)
else:
valid_climate_count += 1
result = Result(
BaseCheck.MEDIUM,
(valid_climate_count, total_climate_count),
(self.section_titles["7.4"]),
reasoning,
)
ret_val.append(result)
return ret_val
###############################################################################
# Chapter 8: Reduction of Dataset Size
###############################################################################
def check_packed_data(self, ds):
"""
8.1 Simple packing may be achieved through the use of the optional NUG defined attributes scale_factor and
add_offset. After the data values of a variable have been read, they are to be multiplied by the scale_factor,
and have add_offset added to them.
The units of a variable should be representative of the unpacked data.
If the scale_factor and add_offset attributes are of the same data type as the associated variable, the unpacked
data is assumed to be of the same data type as the packed data. However, if the scale_factor and add_offset
attributes are of a different data type from the variable (containing the packed data) then the unpacked data
should match the type of these attributes, which must both be of type float or both be of type double. An additional
restriction in this case is that the variable containing the packed data must be of type byte, short or int. It is
not advised to unpack an int into a float as there is a potential precision loss.
When data to be packed contains missing values the attributes that indicate missing values (_FillValue, valid_min,
valid_max, valid_range) must be of the same data type as the packed data.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for name, var in ds.variables.items():
add_offset = getattr(var, "add_offset", None)
scale_factor = getattr(var, "scale_factor", None)
if not (add_offset or scale_factor):
continue
valid = True
reasoning = []
# if only one of these attributes is defined, assume they
# are the same type (value doesn't matter here)
if not add_offset:
add_offset = scale_factor
if not scale_factor:
scale_factor = add_offset
if type(add_offset) != type(scale_factor):
valid = False
reasoning.append(
"Attributes add_offset and scale_factor have different data type."
)
elif type(scale_factor) != var.dtype.type:
# Check both attributes are type float or double
if not isinstance(scale_factor, (float, np.floating)):
valid = False
reasoning.append(
"Attributes add_offset and scale_factor are not of type float or double."
)
else:
# Check variable type is byte, short or int
if var.dtype.type not in [
np.int,
np.int8,
np.int16,
np.int32,
np.int64,
]:
valid = False
reasoning.append("Variable is not of type byte, short, or int.")
result = Result(
BaseCheck.MEDIUM, valid, self.section_titles["8.1"], reasoning
)
ret_val.append(result)
reasoning = []
valid = True
# test further with _FillValue , valid_min , valid_max , valid_range
if hasattr(var, "_FillValue"):
if var._FillValue.dtype.type != var.dtype.type:
valid = False
reasoning.append(
"Type of %s:_FillValue attribute (%s) does not match variable type (%s)"
% (name, var._FillValue.dtype.name, var.dtype.name)
)
if hasattr(var, "valid_min"):
if var.valid_min.dtype.type != var.dtype.type:
valid = False
reasoning.append(
"Type of %svalid_min attribute (%s) does not match variable type (%s)"
% (name, var.valid_min.dtype.name, var.dtype.name)
)
if hasattr(var, "valid_max"):
if var.valid_max.dtype.type != var.dtype.type:
valid = False
reasoning.append(
"Type of %s:valid_max attribute (%s) does not match variable type (%s)"
% (name, var.valid_max.dtype.name, var.dtype.name)
)
if hasattr(var, "valid_range"):
if var.valid_range.dtype.type != var.dtype.type:
valid = False
reasoning.append(
"Type of %s:valid_range attribute (%s) does not match variable type (%s)"
% (name, var.valid_range.dtype.name, var.dtype.name)
)
result = Result(
BaseCheck.MEDIUM, valid, self.section_titles["8.1"], reasoning
)
ret_val.append(result)
return ret_val
def check_compression_gathering(self, ds):
"""
At the current time the netCDF interface does not provide for packing
data. However a simple packing may be achieved through the use of the
optional NUG defined attributes scale_factor and add_offset . After the
data values of a variable have been read, they are to be multiplied by
the scale_factor , and have add_offset added to them. If both
attributes are present, the data are scaled before the offset is added.
When scaled data are written, the application should first subtract the
offset and then divide by the scale factor. The units of a variable
should be representative of the unpacked data.
This standard is more restrictive than the NUG with respect to the use
of the scale_factor and add_offset attributes; ambiguities and
precision problems related to data type conversions are resolved by
these restrictions. If the scale_factor and add_offset attributes are
of the same data type as the associated variable, the unpacked data is
assumed to be of the same data type as the packed data. However, if the
scale_factor and add_offset attributes are of a different data type
from the variable (containing the packed data) then the unpacked data
should match the type of these attributes, which must both be of type
float or both be of type double . An additional restriction in this
case is that the variable containing the packed data must be of type
byte , short or int . It is not advised to unpack an int into a float
as there is a potential precision loss.
When data to be packed contains missing values the attributes that
indicate missing values ( _FillValue , valid_min , valid_max ,
valid_range ) must be of the same data type as
the packed data. See Section 2.5.1, “Missing Data” for a discussion of
how applications should treat variables that have attributes indicating
both missing values and transformations defined by a scale and/or
offset.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
for compress_var in ds.get_variables_by_attributes(
compress=lambda s: s is not None
):
valid = True
reasoning = []
# puts the referenced variable being compressed into a set
compress_set = set(compress_var.compress.split(" "))
if compress_var.ndim != 1:
valid = False
reasoning.append(
"Compression variable {} may only have one dimension".format(
compress_var.name
)
)
# ensure compression variable is a proper index, and thus is an
# signed or unsigned integer type of some sort
if (compress_var.dtype is str) or (
compress_var.dtype.kind not in {"i", "u"}
):
valid = False
reasoning.append(
"Compression variable {} must be an integer type to form a proper array index".format(
compress_var.name
)
)
# make sure all the variables referred to are contained by the
# variables.
if not compress_set.issubset(ds.dimensions):
not_in_dims = sorted(compress_set.difference(ds.dimensions))
valid = False
reasoning.append(
"The following dimensions referenced by the compress attribute of variable {} do not exist: {}".format(
compress_var.name, not_in_dims
)
)
result = Result(
BaseCheck.MEDIUM, valid, self.section_titles["8.2"], reasoning
)
ret_val.append(result)
return ret_val
###############################################################################
# Chapter 9: Discrete Sampling Geometries
###############################################################################
def check_feature_type(self, ds):
"""
Check the global attribute featureType for valid CF featureTypes
9.4 A global attribute, featureType, is required for all Discrete Geometry representations except the orthogonal
multidimensional array representation, for which it is highly recommended.
The value assigned to the featureType attribute is case-insensitive.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
# Due to case insensitive requirement, we list the possible featuretypes
# in lower case and check using the .lower() method
feature_list = [
"point",
"timeseries",
"trajectory",
"profile",
"timeseriesprofile",
"trajectoryprofile",
]
feature_type = getattr(ds, "featureType", None)
valid_feature_type = TestCtx(
BaseCheck.HIGH, "§9.1 Dataset contains a valid featureType"
)
valid_feature_type.assert_true(
feature_type is None or feature_type.lower() in feature_list,
"{} is not a valid CF featureType. It must be one of {}"
"".format(feature_type, ", ".join(feature_list)),
)
return valid_feature_type.to_result()
def check_cf_role(self, ds):
"""
Check variables defining cf_role for legal cf_role values.
§9.5 The only acceptable values of cf_role for Discrete Geometry CF
data sets are timeseries_id, profile_id, and trajectory_id
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: compliance_checker.base.Result
"""
valid_roles = ["timeseries_id", "profile_id", "trajectory_id"]
variable_count = 0
for variable in ds.get_variables_by_attributes(cf_role=lambda x: x is not None):
variable_count += 1
name = variable.name
valid_cf_role = TestCtx(BaseCheck.HIGH, self.section_titles["9.5"])
cf_role = variable.cf_role
valid_cf_role.assert_true(
cf_role in valid_roles,
"{} is not a valid cf_role value. It must be one of {}"
"".format(cf_role, ", ".join(valid_roles)),
)
if variable_count > 0:
m = (
"§9.5 The only acceptable values of cf_role for Discrete Geometry CF"
+ " data sets are timeseries_id, profile_id, and trajectory_id"
)
valid_cf_role.assert_true(variable_count < 3, m)
return valid_cf_role.to_result()
def check_variable_features(self, ds):
"""
Checks the variable feature types match the dataset featureType attribute.
If more than one unique feature type is found, report this as an error.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
feature_types_found = defaultdict(list)
ret_val = []
feature_list = {
"point",
"timeseries",
"trajectory",
"profile",
"timeseriesprofile",
"trajectoryprofile",
}
# Don't bother checking if it's not a legal featureType
# if the featureType attribute doesn't exist
feature_type = getattr(ds, "featureType", "")
if (feature_type is not None and
feature_type.lower() not in feature_list):
return []
_feature = feature_type.lower()
for name in self._find_geophysical_vars(ds):
variable_feature = cfutil.guess_feature_type(ds, name)
# If we can't figure it out, don't check it.
if variable_feature is None:
continue
feature_types_found[variable_feature].append(name)
matching_feature = TestCtx(BaseCheck.MEDIUM,
self.section_titles["9.1"])
matching_feature.assert_true(
variable_feature.lower() == _feature,
"{} is not a {}, it is detected as a {}"
"".format(name, _feature, variable_feature),
)
ret_val.append(matching_feature.to_result())
# create explanation of all of the different featureTypes
# found in the dataset
feature_description = ", ".join(
[
"{} ({})".format(ftr, ", ".join(vrs))
for ftr, vrs in feature_types_found.items()
]
)
all_same_features = TestCtx(BaseCheck.HIGH, self.section_titles["9.1"])
all_same_features.assert_true(
len(feature_types_found) < 2,
"Different feature types discovered in this dataset: {}"
"".format(feature_description),
)
ret_val.append(all_same_features.to_result())
return ret_val
def check_hints(self, ds):
"""
Checks for potentially mislabeled metadata and makes suggestions for how to correct
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
ret_val.extend(self._check_hint_bounds(ds))
return ret_val
def _check_hint_bounds(self, ds):
"""
Checks for variables ending with _bounds, if they are not cell methods,
make the recommendation
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
boundary_variables = cfutil.get_cell_boundary_variables(ds)
for name in ds.variables:
if name.endswith("_bounds") and name not in boundary_variables:
msg = (
"{} might be a cell boundary variable but there are no variables that define it "
"as a boundary using the `bounds` attribute.".format(name)
)
result = Result(BaseCheck.LOW, True, self.section_titles["7.1"], [msg])
ret_val.append(result)
return ret_val
class CF1_7Check(CF1_6Check):
"""Implementation for CF v1.7. Inherits from CF1_6Check as most of the
checks are the same."""
# things that are specific to 1.7
_cc_spec_version = "1.7"
_cc_url = "http://cfconventions.org/Data/cf-conventions/cf-conventions-1.7/cf-conventions.html"
appendix_a = appendix_a_base.copy()
appendix_a.update(
{
"actual_range": {
"Type": "N",
"attr_loc": {"D", "C"},
"cf_section": "2.5.1",
},
"comment": {
"Type": "S",
"attr_loc": {"G", "D", "C"},
"cf_section": "2.6.2",
},
"external_variables": {
"Type": "S",
"attr_loc": {"G"},
"cf_section": "2.6.3",
},
"actual_range": {
"Type": "N",
"attr_loc": {"D", "C"},
"cf_section": "2.5.1",
},
"scale_factor": {"Type": "N", "attr_loc": {"D", "C"}, "cf_section": "8.1"},
}
)
def __init__(self, options=None):
super(CF1_7Check, self).__init__(options)
self.cell_methods = cell_methods17
self.grid_mapping_dict = grid_mapping_dict17
self.grid_mapping_attr_types = grid_mapping_attr_types17
def check_actual_range(self, ds):
"""Check the actual_range attribute of variables. As stated in
section 2.5.1 of version 1.7, this convention defines a two-element
vector attribute designed to describe the actual minimum and actual
maximum values of variables containing numeric data. Conditions:
- the fist value of the two-element vector must be equal to the
minimum of the data, and the second element equal to the maximum
- if the data is packed, the elements of actual_range should have
the same data type as the *unpacked* data
- if valid_range is specified, both elements of actual_range should
be within valid_range
If a variable does not have an actual_range attribute, let it pass;
including this attribute is only suggested. However, if the user is
specifying the actual_range, the Result will be considered
high-priority."""
ret_val = []
for name, variable in ds.variables.items():
msgs = []
score = 0
out_of = 0
if not hasattr(variable, "actual_range"):
continue # having this attr is only suggested, no Result needed
else:
out_of += 1
try:
if (
len(variable.actual_range) != 2
): # TODO is the attr also a numpy array? if so, .size
msgs.append(
"actual_range of '{}' must be 2 elements".format(name)
)
ret_val.append(
Result( # putting result into list
BaseCheck.HIGH,
(score, out_of),
self.section_titles["2.5"],
msgs,
)
)
continue # no need to keep checking if already completely wrong
else:
score += 1
except TypeError: # in case it's just a single number
msgs.append("actual_range of '{}' must be 2 elements".format(name))
ret_val.append(
Result( # putting result into list
BaseCheck.HIGH,
(score, out_of),
self.section_titles["2.5"],
msgs,
)
)
continue
# check equality to existing min/max values
# NOTE this is a data check
# If every value is masked, a data check of actual_range isn't
# appropriate, so skip.
if not (hasattr(variable[:], "mask") and variable[:].mask.all()):
# if min/max values aren't close to actual_range bounds,
# fail.
out_of += 1
if not np.isclose(
variable.actual_range[0], variable[:].min()
) or not np.isclose(variable.actual_range[1], variable[:].max()):
msgs.append(
"actual_range elements of '{}' inconsistent with its min/max values".format(
name
)
)
else:
score += 1
# check that the actual range is within the valid range
if hasattr(variable, "valid_range"): # check within valid_range
out_of += 1
if (variable.actual_range[0] < variable.valid_range[0]) or (
variable.actual_range[1] > variable.valid_range[1]
):
msgs.append(
'"{}"\'s actual_range must be within valid_range'.format(
name
)
)
else:
score += 1
# check the elements of the actual range have the appropriate
# relationship to the valid_min and valid_max
if hasattr(variable, "valid_min"):
out_of += 1
if variable.actual_range[0] < variable.valid_min:
msgs.append(
'"{}"\'s actual_range first element must be >= valid_min ({})'.format(
name, variable.valid_min
)
)
else:
score += 1
if hasattr(variable, "valid_max"):
out_of += 1
if variable.actual_range[1] > variable.valid_max:
msgs.append(
'"{}"\'s actual_range second element must be <= valid_max ({})'.format(
name, variable.valid_max
)
)
else:
score += 1
ret_val.append(
Result( # putting result into list
BaseCheck.HIGH, (score, out_of), self.section_titles["2.5"], msgs
)
)
return ret_val
def check_cell_boundaries(self, ds):
"""
Checks the dimensions of cell boundary variables to ensure they are CF compliant
per section 7.1.
This method extends the CF1_6Check method; please see the original method for the
complete doc string.
If any variable contains both a formula_terms attribute *and* a bounding variable,
that bounds variable must also have a formula_terms attribute.
:param netCDF4.Dataset ds: An open netCDF dataset
:returns list: List of results
"""
# Note that test does not check monotonicity
ret_val = []
reasoning = []
for variable_name, boundary_variable_name in cfutil.get_cell_boundary_map(
ds
).items():
variable = ds.variables[variable_name]
valid = True
reasoning = []
if boundary_variable_name not in ds.variables:
valid = False
reasoning.append(
"Boundary variable {} referenced by {} not ".format(
boundary_variable_name, variable.name
)
+ "found in dataset variables"
)
else:
boundary_variable = ds.variables[boundary_variable_name]
# The number of dimensions in the bounds variable should always be
# the number of dimensions in the referring variable + 1
if boundary_variable.ndim < 2:
valid = False
reasoning.append(
"Boundary variable {} specified by {}".format(
boundary_variable.name, variable.name
)
+ " should have at least two dimensions to enclose the base "
+ "case of a one dimensionsal variable"
)
if boundary_variable.ndim != variable.ndim + 1:
valid = False
reasoning.append(
"The number of dimensions of the variable %s is %s, but the "
"number of dimensions of the boundary variable %s is %s. The boundary variable "
"should have %s dimensions"
% (
variable.name,
variable.ndim,
boundary_variable.name,
boundary_variable.ndim,
variable.ndim + 1,
)
)
if variable.dimensions[:] != boundary_variable.dimensions[: variable.ndim]:
valid = False
reasoning.append(
"Boundary variable coordinates (for {}) are in improper order: {}. Bounds-specific dimensions should be last"
"".format(variable.name, boundary_variable.dimensions)
)
# ensure p vertices form a valid simplex given previous a...n
# previous auxiliary coordinates
if (
ds.dimensions[boundary_variable.dimensions[-1]].size
< len(boundary_variable.dimensions[:-1]) + 1
):
valid = False
reasoning.append(
"Dimension {} of boundary variable (for {}) must have at least {} elements to form a simplex/closed cell with previous dimensions {}.".format(
boundary_variable.name,
variable.name,
len(variable.dimensions) + 1,
boundary_variable.dimensions[:-1],
)
)
# check if formula_terms is present in the var; if so,
# the bounds variable must also have a formula_terms attr
if hasattr(variable, "formula_terms"):
if not hasattr(boundary_variable, "formula_terms"):
valid = False
reasoning.append(
"'{}' has 'formula_terms' attr, bounds variable '{}' must also have 'formula_terms'".format(
variable_name, boundary_variable_name
)
)
result = Result(
BaseCheck.MEDIUM, valid, self.section_titles["7.1"], reasoning
)
ret_val.append(result)
return ret_val
def check_cell_measures(self, ds):
"""
A method to over-ride the CF1_6Check method. In CF 1.7, it is specified
that variable referenced by cell_measures must be in the dataset OR
referenced by the global attribute "external_variables", which represent
all the variables used in the dataset but not found in the dataset.
7.2 To indicate extra information about the spatial properties of a
variable's grid cells, a cell_measures attribute may be defined for a
variable. This is a string attribute comprising a list of
blank-separated pairs of words of the form "measure: name". "area" and
"volume" are the only defined measures.
The "name" is the name of the variable containing the measure values,
which we refer to as a "measure variable". The dimensions of the
measure variable should be the same as or a subset of the dimensions of
the variable to which they are related, but their order is not
restricted.
The variable must have a units attribute and may have other attributes
such as a standard_name.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
reasoning = []
variables = ds.get_variables_by_attributes(
cell_measures=lambda c: c is not None
)
for var in variables:
search_str = r"^(?:area|volume): (\w+)$"
search_res = regex.search(search_str, var.cell_measures)
if not search_res:
valid = False
reasoning.append(
"The cell_measures attribute for variable {} "
"is formatted incorrectly. It should take the"
" form of either 'area: cell_var' or "
"'volume: cell_var' where cell_var is the "
"variable describing the cell measures".format(var.name)
)
else:
valid = True
cell_meas_var_name = search_res.groups()[0]
# TODO: cache previous results
# if the dataset has external_variables, get it
try:
external_variables = ds.getncattr("external_variables")
except AttributeError:
external_variables = []
if cell_meas_var_name not in ds.variables:
if cell_meas_var_name not in external_variables:
valid = False
reasoning.append(
"Cell measure variable {} referred to by {} is not present in dataset variables".format(
cell_meas_var_name, var.name
)
)
else:
valid = True
# make Result
result = Result(
BaseCheck.MEDIUM, valid, (self.section_titles["7.2"]), reasoning
)
ret_val.append(result)
continue # can't test anything on an external var
else:
cell_meas_var = ds.variables[cell_meas_var_name]
if not hasattr(cell_meas_var, "units"):
valid = False
reasoning.append(
"Cell measure variable {} is required "
"to have units attribute defined.".format(
cell_meas_var_name
)
)
if not set(cell_meas_var.dimensions).issubset(var.dimensions):
valid = False
reasoning.append(
"Cell measure variable {} must have "
"dimensions which are a subset of "
"those defined in variable {}.".format(
cell_meas_var_name, var.name
)
)
result = Result(
BaseCheck.MEDIUM, valid, (self.section_titles["7.2"]), reasoning
)
ret_val.append(result)
return ret_val
def _check_grid_mapping_attr_condition(self, attr, attr_name):
"""
Evaluate a condition (or series of conditions) for a particular
attribute. Implementation for CF-1.7.
:param attr: attribute to teset condition for
:param str attr_name: name of the attribute
:rtype tuple
:return two-tuple of (bool, str)
"""
if attr_name == "geographic_crs_name":
return self._evaluate_geographic_crs_name(attr)
elif attr_name == "geoid_name":
return self._evaluate_geoid_name(attr)
elif attr_name == "geopotential_datum_name":
return self._evaluate_geopotential_datum_name(attr)
elif attr_name == "horizontal_datum_name":
return self._evaluate_horizontal_datum_name(attr)
elif attr_name == "prime_meridian_name":
return self._evaluate_prime_meridian_name(attr)
elif attr_name == "projected_crs_name":
return self._evaluate_projected_crs_name(attr)
elif attr_name == "reference_ellipsoid_name":
return self._evaluate_reference_ellipsoid_name(attr)
elif attr_name == "towgs84":
return self._evaluate_towgs84(attr)
else: # invoke method from 1.6, as these names are all still valid
return super(CF1_7Check, self)._check_grid_mapping_attr_condition(
attr, attr_name
)
def _check_gmattr_existence_condition_geoid_name_geoptl_datum_name(self, var):
"""
Check to see if both geoid_name and geopotential_datum_name exist as attributes
for `var`. They should not.
:param netCDF4.Variable var
:rtype tuple
:return two-tuple (bool, str)
"""
msg = "Both geoid_name and geopotential_datum_name cannot exist"
if ("geoid_name" in var.ncattrs()) and (
"geopotential_datum_name" in var.ncattrs()
):
return (False, msg)
else:
return (True, msg)
def _check_gmattr_existence_condition_ell_pmerid_hdatum(self, var):
"""
If one of reference_ellipsoid_name, prime_meridian_name, or
horizontal_datum_name are defined as grid_mapping attributes,
they must all be defined.
:param netCDF4.Variable var
:rtype tuple
:return two-tuple (bool, str)
"""
msg = (
"If any of reference_ellipsoid_name, prime_meridian_name, "
"or horizontal_datum_name are defined, all must be defined."
)
_ncattrs = set(var.ncattrs())
if any(
[
x in _ncattrs
for x in [
"reference_ellipsoid_name",
"prime_meridian_name",
"horizontal_datum_name",
]
]
) and (
not set(
[
"reference_ellipsoid_name",
"prime_meridian_name",
"horizontal_datum_name",
]
).issubset(_ncattrs)
):
return (False, msg)
else:
return (True, msg)
def _get_projdb_conn(self):
"""
Return a SQLite Connection to the PROJ database.
Returns:
sqlite3.Connection
"""
proj_db_path = os.path.join(pyproj.datadir.get_data_dir(), "proj.db")
return sqlite3.connect(proj_db_path)
def _exec_query_str_with_params(self, qstr, argtuple):
"""
Execute a query string in a database connection with the given argument
tuple. Return a result set.
:param str qstr: desired query to be executed
:param tuple argtuple: tuple of arguments to be supplied to query
:rtype set
"""
conn = self._get_projdb_conn()
return conn.execute(qstr, argtuple)
def _evaluate_geographic_crs_name(self, val):
"""
Evaluate the condition for the geographic_crs_name attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
query_str = (
"SELECT 1 FROM geodetic_crs WHERE name = ? "
"UNION ALL " # need union in case contained in other tables
"SELECT 1 FROM alias_name WHERE alt_name = ? "
"AND table_name = 'geodetic_crs' LIMIT 1"
)
# try to find the value in the database
res_set = self._exec_query_str_with_params(query_str, (val, val))
# does it exist? if so, amt returned be > 1
return (
len(res_set.fetchall()) > 0,
"geographic_crs_name must correspond to a valid OGC WKT GEOGCS name",
)
def _evaluate_geoid_name(self, val):
"""
Evaluate the condition for the geod_name attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
query_str = (
"SELECT 1 FROM vertical_datum WHERE name = ? "
"UNION ALL "
"SELECT 1 FROM alias_name WHERE alt_name = ? "
"AND table_name = 'vertical_datum' LIMIT 1"
)
# try to find the value in the database
res_set = self._exec_query_str_with_params(query_str, (val, val))
return (
len(res_set.fetchall()) > 0,
"geoid_name must correspond to a valid OGC WKT VERT_DATUM name",
)
def _evaluate_geopotential_datum_name(self, val):
"""
Evaluate the condition for the geogpotential_datum_name attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
query_str = (
"SELECT 1 FROM vertical_datum WHERE name = ? "
"UNION ALL "
"SELECT 1 FROM alias_name WHERE alt_name = ? "
"AND table_name = 'vertical_datum' LIMIT 1"
)
# try to find the value in the database
res_set = self._exec_query_str_with_params(query_str, (val, val))
return (
len(res_set.fetchall()) > 0,
"geopotential_datum_name must correspond to a valid OGC WKT VERT_DATUM name",
)
def _evaluate_horizontal_datum_name(self, val):
"""
Evaluate the condition for the horizontal_datum_name attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
return (
val in horizontal_datum_names17,
(
"{} must be a valid Horizontal Datum Name; "
"see https://github.com/cf-convention/cf-conventions/wiki/Mapping-from-CF-Grid-Mapping-Attributes-to-CRS-WKT-Elements."
),
)
def _evaluate_prime_meridian_name(self, val):
"""
Evaluate the condition for the prime_meridian_name.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
return (
val in prime_meridian_names17,
(
"{} must be a valid Prime Meridian name; "
"see https://github.com/cf-convention/cf-conventions/wiki/csv/prime_meridian.csv."
),
)
def _evaluate_projected_crs_name(self, val):
"""
Evaluate the condition for the projected_crs attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
query_str = (
"SELECT 1 FROM projected_crs WHERE name = ? "
"UNION ALL "
"SELECT 1 FROM alias_name WHERE alt_name = ? "
"AND table_name = 'projected_crs' LIMIT 1"
)
# try to find the value in the database
res_set = self._exec_query_str_with_params(query_str, (val, val))
return (
len(res_set.fetchall()) > 0,
"projected_crs_name must correspond to a valid OGC WKT PROJCS name",
)
def _evaluate_reference_ellipsoid_name(self, val):
"""
Evaluate the condition for the reference_ellipsoid_name attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
return (
val in ellipsoid_names17,
(
"{} must be a valid Ellipsoid Name; "
"see https://github.com/cf-convention/cf-conventions/wiki/csv/ellipsoid.csv."
),
)
def _evaluate_towgs84(self, val):
"""
Evaluate the condition for the towgs84 attribute.
:param val: value to be tested
:rtype tuple
:return two-tuple of (bool, str)
"""
msg = (
"towgs84 must be an array of length 3, 6, or 7 of double-precision"
" and correspond to anm OGC WKT TOWGS84 node"
)
# if not numpy type, return false
if not getattr(val, "dtype", None):
return (False, msg)
# must be double-precision array
elif val.dtype != np.float64:
return (False, msg)
# must be of length 3, 6, or 7
elif not val.shape: # single value
return (False, msg)
elif not (val.size in (3, 6, 7)):
return (False, msg)
else:
return (True, msg)
def check_grid_mapping(self, ds):
__doc__ = super(CF1_7Check, self).check_grid_mapping.__doc__
prev_return = super(CF1_7Check, self).check_grid_mapping(ds)
ret_val = []
grid_mapping_variables = cfutil.get_grid_mapping_variables(ds)
for var_name in sorted(grid_mapping_variables):
var = ds.variables[var_name]
test_ctx = self.get_test_ctx(
BaseCheck.HIGH, self.section_titles["5.6"], var.name
)
# TODO: check cases where crs_wkt provides part of a necessary
# grid_mapping attribute, or where a grid_mapping attribute
# overrides what has been provided in crs_wkt.
# attempt to parse crs_wkt if it is present
if "crs_wkt" in var.ncattrs():
crs_wkt = var.crs_wkt
if not isinstance(crs_wkt, str):
test_ctx.messages.append("crs_wkt attribute must be a string")
test_ctx.out_of += 1
else:
try:
pyproj.CRS.from_wkt(crs_wkt)
except pyproj.exceptions.CRSError as crs_error:
test_ctx.messages.append(
"Cannot parse crs_wkt attribute to CRS using Proj4. Proj4 error: {}".format(
str(crs_error)
)
)
else:
test_ctx.score += 1
test_ctx.out_of += 1
# existence_conditions
exist_cond_1 = (
self._check_gmattr_existence_condition_geoid_name_geoptl_datum_name(var)
)
test_ctx.assert_true(exist_cond_1[0], exist_cond_1[1])
exist_cond_2 = self._check_gmattr_existence_condition_ell_pmerid_hdatum(var)
test_ctx.assert_true(exist_cond_2[0], exist_cond_2[1])
# handle vertical datum related grid_mapping attributes
vert_datum_attrs = {}
possible_vert_datum_attrs = {"geoid_name", "geopotential_datum_name"}
vert_datum_attrs = possible_vert_datum_attrs.intersection(var.ncattrs())
len_vdatum_name_attrs = len(vert_datum_attrs)
# check that geoid_name and geopotential_datum_name are not both
# present in the grid_mapping variable
if len_vdatum_name_attrs == 2:
test_ctx.out_of += 1
test_ctx.messages.append(
"Cannot have both 'geoid_name' and "
"'geopotential_datum_name' attributes in "
"grid mapping variable '{}'".format(var.name)
)
elif len_vdatum_name_attrs == 1:
# should be one or zero attrs
proj_db_path = os.path.join(pyproj.datadir.get_data_dir(), "proj.db")
try:
with sqlite3.connect(proj_db_path) as conn:
v_datum_attr = next(iter(vert_datum_attrs))
v_datum_value = getattr(var, v_datum_attr)
v_datum_str_valid = self._process_v_datum_str(
v_datum_value, conn
)
invalid_msg = (
"Vertical datum value '{}' for "
"attribute '{}' in grid mapping "
"variable '{}' is not valid".format(
v_datum_value, v_datum_attr, var.name
)
)
test_ctx.assert_true(v_datum_str_valid, invalid_msg)
except sqlite3.Error as e:
# if we hit an error, skip the check
warn(
"Error occurred while trying to query "
"Proj4 SQLite database at {}: {}".format(proj_db_path, str(e))
)
prev_return[var.name] = test_ctx.to_result()
return prev_return
def _process_v_datum_str(self, v_datum_str, conn):
vdatum_query = """SELECT 1 FROM alias_name WHERE
table_name = 'vertical_datum' AND
alt_name = ?
UNION ALL
SELECT 1 FROM vertical_datum WHERE
name = ?
LIMIT 1"""
res_set = conn.execute(vdatum_query, (v_datum_str, v_datum_str))
return len(res_set.fetchall()) > 0
def _check_dimensionless_vertical_coordinate_1_7(
self, ds, vname, deprecated_units, ret_val, dim_vert_coords_dict
):
"""
Check that a dimensionless vertical coordinate variable is valid under
CF-1.7.
:param netCDF4.Dataset ds: open netCDF4 dataset
:param str name: variable name
:param list ret_val: array to append Results to
:rtype None
"""
variable = ds.variables[vname]
standard_name = getattr(variable, "standard_name", None)
units = getattr(variable, "units", None)
formula_terms = getattr(variable, "formula_terms", None)
# Skip the variable if it's dimensional
if formula_terms is None and standard_name not in dim_vert_coords_dict:
return
# assert that the computed_standard_name is maps to the standard_name correctly
correct_computed_std_name_ctx = TestCtx(
BaseCheck.MEDIUM, self.section_titles["4.3"]
)
_comp_std_name = dim_vert_coords_dict[standard_name][1]
correct_computed_std_name_ctx.assert_true(
getattr(variable, "computed_standard_name", None) in _comp_std_name,
"§4.3.3 The standard_name of `{}` must map to the correct computed_standard_name, `{}`".format(
vname, sorted(_comp_std_name)
),
)
ret_val.append(correct_computed_std_name_ctx.to_result())
def check_dimensionless_vertical_coordinates(self, ds):
"""
Check the validity of dimensionless coordinates under CF
CF §4.3.2 The units attribute is not required for dimensionless
coordinates.
The standard_name attribute associates a coordinate with its definition
from Appendix D, Dimensionless Vertical Coordinates. The definition
provides a mapping between the dimensionless coordinate values and
dimensional values that can positively and uniquely indicate the
location of the data.
A new attribute, formula_terms, is used to associate terms in the
definitions with variables in a netCDF file. To maintain backwards
compatibility with COARDS the use of these attributes is not required,
but is strongly recommended.
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
"""
ret_val = []
z_variables = cfutil.get_z_variables(ds)
deprecated_units = ["level", "layer", "sigma_level"]
# compose this function to use the results from the CF-1.6 check
# and then extend it using a CF-1.7 addition
ret_val.extend(
self._check_dimensionless_vertical_coordinates(
ds,
deprecated_units,
self._check_dimensionless_vertical_coordinate_1_6,
dimless_vertical_coordinates_1_7,
)
)
ret_val.extend(
self._check_dimensionless_vertical_coordinates(
ds,
deprecated_units,
self._check_dimensionless_vertical_coordinate_1_7,
dimless_vertical_coordinates_1_7,
)
)
return ret_val
class CFNCCheck(BaseNCCheck, CFBaseCheck):
@classmethod
def beliefs(cls): # @TODO
return {}
|
aodn/compliance-checker
|
compliance_checker/cf/cf.py
|
Python
|
apache-2.0
| 220,804
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Live value resolution.
Live values are extracted from the known execution context.
Requires activity analysis annotations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.py2tf.pyct import anno
from tensorflow.contrib.py2tf.pyct import transformer
from tensorflow.contrib.py2tf.pyct.static_analysis.annos import NodeAnno
class LiveValueResolver(transformer.Base):
"""Annotates nodes with live values."""
def __init__(self, context, literals):
super(LiveValueResolver, self).__init__(context)
self.literals = literals
def visit_ClassDef(self, node):
self.generic_visit(node)
anno.setanno(node, 'live_val', self.context.namespace[node.name])
return node
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
assert anno.hasanno(node, NodeAnno.IS_LOCAL), node
symbol_is_local = anno.getanno(node, NodeAnno.IS_LOCAL)
assert anno.hasanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY), node
symbol_is_modified = anno.getanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY)
assert anno.hasanno(node, NodeAnno.IS_PARAM), node
symbol_is_param = anno.getanno(node, NodeAnno.IS_PARAM)
if not symbol_is_local and not symbol_is_param:
if node.id in self.literals:
anno.setanno(node, 'live_val', self.literals[node.id])
elif node.id in self.context.namespace:
obj = self.context.namespace[node.id]
anno.setanno(node, 'live_val', obj)
if hasattr(obj, '__name__'):
# If the symbol value is for example a primitive, then it will not
# have a name.
anno.setanno(node, 'fqn', (obj.__name__,))
else:
pass
# TODO(mdan): Should we raise an error here?
# Can encounter this when:
# * a symbol truly lacks reference
# * a symbol is new, like the new name of a function we just renamed.
else:
pass
# TODO(mdan): Attempt to trace its value through the local chain.
# TODO(mdan): Use type annotations as fallback.
if not symbol_is_modified:
if node.id in self.context.arg_values:
obj = self.context.arg_values[node.id]
anno.setanno(node, 'live_val', obj)
anno.setanno(node, 'fqn', (obj.__class__.__name__,))
return node
def visit_Attribute(self, node):
self.generic_visit(node)
if anno.hasanno(node.value, 'live_val'):
assert anno.hasanno(node.value, 'fqn')
parent_object = anno.getanno(node.value, 'live_val')
if not hasattr(parent_object, node.attr):
raise AttributeError('%s has no attribute %s' % (parent_object,
node.attr))
anno.setanno(node, 'parent_type', type(parent_object))
anno.setanno(node, 'live_val', getattr(parent_object, node.attr))
anno.setanno(node, 'fqn', anno.getanno(node.value, 'fqn') + (node.attr,))
# TODO(mdan): Investigate the role built-in annotations can play here.
elif anno.hasanno(node.value, 'type'):
parent_type = anno.getanno(node.value, 'type')
if hasattr(parent_type, node.attr):
# This should hold for static members like methods.
# This would not hold for dynamic members like function attributes.
# For the dynamic case, we simply leave the node without an annotation,
# and let downstream consumers figure out what to do.
anno.setanno(node, 'parent_type', parent_type)
anno.setanno(node, 'live_val', getattr(parent_type, node.attr))
anno.setanno(node, 'fqn',
anno.getanno(node.value, 'type_fqn') + (node.attr,))
elif isinstance(node.value, gast.Name):
stem_name = node.value
# All nonlocal symbols should be fully resolved.
assert anno.hasanno(stem_name, NodeAnno.IS_LOCAL), stem_name
# TODO(mdan): Figure out what to do when calling attribute on local object
# Maybe just leave as-is?
return node
def resolve(node, context, literals):
return LiveValueResolver(context, literals).visit(node)
|
Xeralux/tensorflow
|
tensorflow/contrib/py2tf/pyct/static_analysis/live_values.py
|
Python
|
apache-2.0
| 4,891
|
class Error(object):
def __init__(self, code, msg=None, data=None):
self.code = code
self.msg = msg
self.data = data
def __str__(self):
err = self.to_dict()
return str(err)
def to_dict(self):
err = {}
err['err_code'] = self.code
if self.msg:
err['err_msg'] = self.msg
if self.data:
err['data'] = self.data
return err
def err_code(self):
return self.code
def err_msg(self):
return self.msg
|
827992983/mylib
|
python/error.py
|
Python
|
apache-2.0
| 545
|
#!/bin/python3
# Testscript for template generation and deploying
from cloud_provider.amazon import Amazon
from template.template import CloudFormationTemplate
from pprint import pprint
if __name__ == "__main__":
# Amazon Settings
region = "eu-west-1"
stack_name = 'TestStack'
# Template settings
template_file = '/tmp/template.txt'
template_json_source_file = 'test-cluster.json'
# Create template
cfn_template = CloudFormationTemplate()
cfn_template.load_json_source(template_json_source_file)
cfn_template.save_template_file(template_file)
# pprint(cfn_template.source)
# Connect to Amazon CloudFormation
aws = Amazon(region)
# Deploy CloudFormation Template
aws.deploy_stack(stack_name, template_file=template_file)
# Delete Stack if error occured
# aws.delete_stack(stack_name)
|
magreiner/orchestration-tools
|
template_testing.py
|
Python
|
apache-2.0
| 859
|
#
# Parse tree nodes
#
import cython
cython.declare(sys=object, os=object, time=object, copy=object,
Builtin=object, error=object, warning=object, Naming=object, PyrexTypes=object,
py_object_type=object, ModuleScope=object, LocalScope=object, ClosureScope=object, \
StructOrUnionScope=object, PyClassScope=object, CClassScope=object,
CppClassScope=object, UtilityCode=object, EncodedString=object,
absolute_path_length=cython.Py_ssize_t)
import sys, os, time, copy, textwrap
import Builtin
from Errors import error, warning, InternalError, CompileError
import Naming
import PyrexTypes
import TypeSlots
from PyrexTypes import py_object_type, error_type, CTypedefType, CFuncType, cython_memoryview_ptr_type
from Symtab import ModuleScope, LocalScope, ClosureScope, \
StructOrUnionScope, PyClassScope, CClassScope, CppClassScope
from Cython.Utils import open_new_file, replace_suffix
from Code import UtilityCode, ClosureTempAllocator
from StringEncoding import EncodedString, escape_byte_string, split_string_literal
import Options
import DebugFlags
from itertools import chain
absolute_path_length = 0
def relative_position(pos):
"""
We embed the relative filename in the generated C file, since we
don't want to have to regnerate and compile all the source code
whenever the Python install directory moves (which could happen,
e.g,. when distributing binaries.)
INPUT:
a position tuple -- (absolute filename, line number column position)
OUTPUT:
relative filename
line number
AUTHOR: William Stein
"""
global absolute_path_length
if absolute_path_length==0:
absolute_path_length = len(os.path.abspath(os.getcwd()))
return (pos[0].get_filenametable_entry()[absolute_path_length+1:], pos[1])
def embed_position(pos, docstring):
if not Options.embed_pos_in_docstring:
return docstring
pos_line = u'File: %s (starting at line %s)' % relative_position(pos)
if docstring is None:
# unicode string
return EncodedString(pos_line)
# make sure we can encode the filename in the docstring encoding
# otherwise make the docstring a unicode string
encoding = docstring.encoding
if encoding is not None:
try:
encoded_bytes = pos_line.encode(encoding)
except UnicodeEncodeError:
encoding = None
if not docstring:
# reuse the string encoding of the original docstring
doc = EncodedString(pos_line)
else:
doc = EncodedString(pos_line + u'\n' + docstring)
doc.encoding = encoding
return doc
from Code import CCodeWriter
from types import FunctionType
def write_func_call(func):
def f(*args, **kwds):
if len(args) > 1 and isinstance(args[1], CCodeWriter):
# here we annotate the code with this function call
# but only if new code is generated
node, code = args[:2]
marker = ' /* %s -> %s.%s %s */' % (
' ' * code.call_level,
node.__class__.__name__,
func.__name__,
node.pos[1:])
pristine = code.buffer.stream.tell()
code.putln(marker)
start = code.buffer.stream.tell()
code.call_level += 4
res = func(*args, **kwds)
code.call_level -= 4
if start == code.buffer.stream.tell():
code.buffer.stream.seek(pristine)
else:
marker = marker.replace('->', '<-')
code.putln(marker)
return res
else:
return func(*args, **kwds)
return f
class VerboseCodeWriter(type):
# Set this as a metaclass to trace function calls in code.
# This slows down code generation and makes much larger files.
def __new__(cls, name, bases, attrs):
attrs = dict(attrs)
for mname, m in attrs.items():
if isinstance(m, FunctionType):
attrs[mname] = write_func_call(m)
return super(VerboseCodeWriter, cls).__new__(cls, name, bases, attrs)
class Node(object):
# pos (string, int, int) Source file position
# is_name boolean Is a NameNode
# is_literal boolean Is a ConstNode
if DebugFlags.debug_trace_code_generation:
__metaclass__ = VerboseCodeWriter
is_name = 0
is_none = 0
is_literal = 0
is_terminator = 0
temps = None
# All descandants should set child_attrs to a list of the attributes
# containing nodes considered "children" in the tree. Each such attribute
# can either contain a single node or a list of nodes. See Visitor.py.
child_attrs = None
cf_state = None
# This may be an additional (or 'actual') type that will be checked when
# this node is coerced to another type. This could be useful to set when
# the actual type to which it can coerce is known, but you want to leave
# the type a py_object_type
coercion_type = None
def __init__(self, pos, **kw):
self.pos = pos
self.__dict__.update(kw)
gil_message = "Operation"
nogil_check = None
def gil_error(self, env=None):
error(self.pos, "%s not allowed without gil" % self.gil_message)
cpp_message = "Operation"
def cpp_check(self, env):
if not env.is_cpp():
self.cpp_error()
def cpp_error(self):
error(self.pos, "%s only allowed in c++" % self.cpp_message)
def clone_node(self):
"""Clone the node. This is defined as a shallow copy, except for member lists
amongst the child attributes (from get_child_accessors) which are also
copied. Lists containing child nodes are thus seen as a way for the node
to hold multiple children directly; the list is not treated as a seperate
level in the tree."""
result = copy.copy(self)
for attrname in result.child_attrs:
value = getattr(result, attrname)
if isinstance(value, list):
setattr(result, attrname, [x for x in value])
return result
#
# There are 3 phases of parse tree processing, applied in order to
# all the statements in a given scope-block:
#
# (0) analyse_declarations
# Make symbol table entries for all declarations at the current
# level, both explicit (def, cdef, etc.) and implicit (assignment
# to an otherwise undeclared name).
#
# (1) analyse_expressions
# Determine the result types of expressions and fill in the
# 'type' attribute of each ExprNode. Insert coercion nodes into the
# tree where needed to convert to and from Python objects.
# Allocate temporary locals for intermediate results. Fill
# in the 'result_code' attribute of each ExprNode with a C code
# fragment.
#
# (2) generate_code
# Emit C code for all declarations, statements and expressions.
# Recursively applies the 3 processing phases to the bodies of
# functions.
#
def analyse_declarations(self, env):
pass
def analyse_expressions(self, env):
raise InternalError("analyse_expressions not implemented for %s" % \
self.__class__.__name__)
def generate_code(self, code):
raise InternalError("generate_code not implemented for %s" % \
self.__class__.__name__)
def annotate(self, code):
# mro does the wrong thing
if isinstance(self, BlockNode):
self.body.annotate(code)
def end_pos(self):
try:
return self._end_pos
except AttributeError:
pos = self.pos
if not self.child_attrs:
self._end_pos = pos
return pos
for attr in self.child_attrs:
child = getattr(self, attr)
# Sometimes lists, sometimes nodes
if child is None:
pass
elif isinstance(child, list):
for c in child:
pos = max(pos, c.end_pos())
else:
pos = max(pos, child.end_pos())
self._end_pos = pos
return pos
def dump(self, level=0, filter_out=("pos",), cutoff=100, encountered=None):
if cutoff == 0:
return "<...nesting level cutoff...>"
if encountered is None:
encountered = set()
if id(self) in encountered:
return "<%s (0x%x) -- already output>" % (self.__class__.__name__, id(self))
encountered.add(id(self))
def dump_child(x, level):
if isinstance(x, Node):
return x.dump(level, filter_out, cutoff-1, encountered)
elif isinstance(x, list):
return "[%s]" % ", ".join([dump_child(item, level) for item in x])
else:
return repr(x)
attrs = [(key, value) for key, value in self.__dict__.items() if key not in filter_out]
if len(attrs) == 0:
return "<%s (0x%x)>" % (self.__class__.__name__, id(self))
else:
indent = " " * level
res = "<%s (0x%x)\n" % (self.__class__.__name__, id(self))
for key, value in attrs:
res += "%s %s: %s\n" % (indent, key, dump_child(value, level + 1))
res += "%s>" % indent
return res
class CompilerDirectivesNode(Node):
"""
Sets compiler directives for the children nodes
"""
# directives {string:value} A dictionary holding the right value for
# *all* possible directives.
# body Node
child_attrs = ["body"]
def analyse_declarations(self, env):
old = env.directives
env.directives = self.directives
self.body.analyse_declarations(env)
env.directives = old
def analyse_expressions(self, env):
old = env.directives
env.directives = self.directives
self.body.analyse_expressions(env)
env.directives = old
def generate_function_definitions(self, env, code):
env_old = env.directives
code_old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_function_definitions(env, code)
env.directives = env_old
code.globalstate.directives = code_old
def generate_execution_code(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.generate_execution_code(code)
code.globalstate.directives = old
def annotate(self, code):
old = code.globalstate.directives
code.globalstate.directives = self.directives
self.body.annotate(code)
code.globalstate.directives = old
class BlockNode(object):
# Mixin class for nodes representing a declaration block.
def generate_cached_builtins_decls(self, env, code):
entries = env.global_scope().undeclared_cached_builtins
for entry in entries:
code.globalstate.add_cached_builtin_decl(entry)
del entries[:]
def generate_lambda_definitions(self, env, code):
for node in env.lambda_defs:
node.generate_function_definitions(env, code)
class StatListNode(Node):
# stats a list of StatNode
child_attrs = ["stats"]
def create_analysed(pos, env, *args, **kw):
node = StatListNode(pos, *args, **kw)
return node # No node-specific analysis necesarry
create_analysed = staticmethod(create_analysed)
def analyse_declarations(self, env):
#print "StatListNode.analyse_declarations" ###
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
#print "StatListNode.analyse_expressions" ###
for stat in self.stats:
stat.analyse_expressions(env)
def generate_function_definitions(self, env, code):
#print "StatListNode.generate_function_definitions" ###
for stat in self.stats:
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
#print "StatListNode.generate_execution_code" ###
for stat in self.stats:
code.mark_pos(stat.pos)
stat.generate_execution_code(code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
class StatNode(Node):
#
# Code generation for statements is split into the following subphases:
#
# (1) generate_function_definitions
# Emit C code for the definitions of any structs,
# unions, enums and functions defined in the current
# scope-block.
#
# (2) generate_execution_code
# Emit C code for executable statements.
#
def generate_function_definitions(self, env, code):
pass
def generate_execution_code(self, code):
raise InternalError("generate_execution_code not implemented for %s" % \
self.__class__.__name__)
class CDefExternNode(StatNode):
# include_file string or None
# body StatNode
child_attrs = ["body"]
def analyse_declarations(self, env):
if self.include_file:
env.add_include_file(self.include_file)
old_cinclude_flag = env.in_cinclude
env.in_cinclude = 1
self.body.analyse_declarations(env)
env.in_cinclude = old_cinclude_flag
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
class CDeclaratorNode(Node):
# Part of a C declaration.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns (name, type) pair where name is the
# CNameDeclaratorNode of the name being declared
# and type is the type it is being declared as.
#
# calling_convention string Calling convention of CFuncDeclaratorNode
# for which this is a base
child_attrs = []
calling_convention = ""
class CNameDeclaratorNode(CDeclaratorNode):
# name string The Cython name being declared
# cname string or None C name, if specified
# default ExprNode or None the value assigned on declaration
child_attrs = ['default']
default = None
def analyse(self, base_type, env, nonempty = 0):
if nonempty and self.name == '':
# May have mistaken the name for the type.
if base_type.is_ptr or base_type.is_array or base_type.is_buffer:
error(self.pos, "Missing argument name")
elif base_type.is_void:
error(self.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
else:
self.name = base_type.declaration_code("", for_display=1, pyrex=1)
base_type = py_object_type
self.type = base_type
return self, base_type
class CPtrDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_pyobject:
error(self.pos,
"Pointer base type cannot be a Python object")
ptr_type = PyrexTypes.c_ptr_type(base_type)
return self.base.analyse(ptr_type, env, nonempty = nonempty)
class CReferenceDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
child_attrs = ["base"]
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_pyobject:
error(self.pos,
"Reference base type cannot be a Python object")
ref_type = PyrexTypes.c_ref_type(base_type)
return self.base.analyse(ref_type, env, nonempty = nonempty)
class CArrayDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# dimension ExprNode
child_attrs = ["base", "dimension"]
def analyse(self, base_type, env, nonempty = 0):
if base_type.is_cpp_class:
from ExprNodes import TupleNode
if isinstance(self.dimension, TupleNode):
args = self.dimension.args
else:
args = self.dimension,
values = [v.analyse_as_type(env) for v in args]
if None in values:
ix = values.index(None)
error(args[ix].pos, "Template parameter not a type.")
return error_type
base_type = base_type.specialize_here(self.pos, values)
return self.base.analyse(base_type, env, nonempty = nonempty)
if self.dimension:
self.dimension.analyse_const_expression(env)
if not self.dimension.type.is_int:
error(self.dimension.pos, "Array dimension not integer")
size = self.dimension.get_constant_c_result_code()
if size is not None:
try:
size = int(size)
except ValueError:
# runtime constant?
pass
else:
size = None
if not base_type.is_complete():
error(self.pos,
"Array element type '%s' is incomplete" % base_type)
if base_type.is_pyobject:
error(self.pos,
"Array element cannot be a Python object")
if base_type.is_cfunction:
error(self.pos,
"Array element cannot be a function")
array_type = PyrexTypes.c_array_type(base_type, size)
return self.base.analyse(array_type, env, nonempty = nonempty)
class CFuncDeclaratorNode(CDeclaratorNode):
# base CDeclaratorNode
# args [CArgDeclNode]
# has_varargs boolean
# exception_value ConstNode
# exception_check boolean True if PyErr_Occurred check needed
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
child_attrs = ["base", "args", "exception_value"]
overridable = 0
optional_arg_count = 0
def analyse(self, return_type, env, nonempty = 0, directive_locals = {}):
if nonempty:
nonempty -= 1
func_type_args = []
for i, arg_node in enumerate(self.args):
name_declarator, type = arg_node.analyse(env, nonempty = nonempty,
is_self_arg = (i == 0 and env.is_c_class_scope))
name = name_declarator.name
if name in directive_locals:
type_node = directive_locals[name]
other_type = type_node.analyse_as_type(env)
if other_type is None:
error(type_node.pos, "Not a type")
elif (type is not PyrexTypes.py_object_type
and not type.same_as(other_type)):
error(self.base.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
else:
type = other_type
if name_declarator.cname:
error(self.pos,
"Function argument cannot have C name specification")
if i==0 and env.is_c_class_scope and type.is_unspecified:
# fix the type of self
type = env.parent_type
# Turn *[] argument into **
if type.is_array:
type = PyrexTypes.c_ptr_type(type.base_type)
# Catch attempted C-style func(void) decl
if type.is_void:
error(arg_node.pos, "Use spam() rather than spam(void) to declare a function with no arguments.")
func_type_args.append(
PyrexTypes.CFuncTypeArg(name, type, arg_node.pos))
if arg_node.default:
self.optional_arg_count += 1
elif self.optional_arg_count:
error(self.pos, "Non-default argument follows default argument")
if self.optional_arg_count:
scope = StructOrUnionScope()
arg_count_member = '%sn' % Naming.pyrex_prefix
scope.declare_var(arg_count_member, PyrexTypes.c_int_type, self.pos)
for arg in func_type_args[len(func_type_args)-self.optional_arg_count:]:
scope.declare_var(arg.name, arg.type, arg.pos, allow_pyobject = 1)
struct_cname = env.mangle(Naming.opt_arg_prefix, self.base.name)
self.op_args_struct = env.global_scope().declare_struct_or_union(name = struct_cname,
kind = 'struct',
scope = scope,
typedef_flag = 0,
pos = self.pos,
cname = struct_cname)
self.op_args_struct.defined_in_pxd = 1
self.op_args_struct.used = 1
exc_val = None
exc_check = 0
if self.exception_check == '+':
env.add_include_file('ios') # for std::ios_base::failure
env.add_include_file('new') # for std::bad_alloc
env.add_include_file('stdexcept')
env.add_include_file('typeinfo') # for std::bad_cast
if return_type.is_pyobject \
and (self.exception_value or self.exception_check) \
and self.exception_check != '+':
error(self.pos,
"Exception clause not allowed for function returning Python object")
else:
if self.exception_value:
self.exception_value.analyse_const_expression(env)
if self.exception_check == '+':
self.exception_value.analyse_types(env)
exc_val_type = self.exception_value.type
if not exc_val_type.is_error and \
not exc_val_type.is_pyobject and \
not (exc_val_type.is_cfunction and not exc_val_type.return_type.is_pyobject and len(exc_val_type.args)==0):
error(self.exception_value.pos,
"Exception value must be a Python exception or cdef function with no arguments.")
exc_val = self.exception_value
else:
self.exception_value = self.exception_value.coerce_to(return_type, env)
if self.exception_value.analyse_const_expression(env):
exc_val = self.exception_value.get_constant_c_result_code()
if exc_val is None:
raise InternalError("get_constant_c_result_code not implemented for %s" %
self.exception_value.__class__.__name__)
if not return_type.assignable_from(self.exception_value.type):
error(self.exception_value.pos,
"Exception value incompatible with function return type")
exc_check = self.exception_check
if return_type.is_cfunction:
error(self.pos,
"Function cannot return a function")
func_type = PyrexTypes.CFuncType(
return_type, func_type_args, self.has_varargs,
optional_arg_count = self.optional_arg_count,
exception_value = exc_val, exception_check = exc_check,
calling_convention = self.base.calling_convention,
nogil = self.nogil, with_gil = self.with_gil, is_overridable = self.overridable)
if self.optional_arg_count:
func_type.op_arg_struct = PyrexTypes.c_ptr_type(self.op_args_struct.type)
callspec = env.directives['callspec']
if callspec:
current = func_type.calling_convention
if current and current != callspec:
error(self.pos, "cannot have both '%s' and '%s' "
"calling conventions" % (current, callspec))
func_type.calling_convention = callspec
return self.base.analyse(func_type, env)
class CArgDeclNode(Node):
# Item in a function declaration argument list.
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# not_none boolean Tagged with 'not None'
# or_none boolean Tagged with 'or None'
# accept_none boolean Resolved boolean for not_none/or_none
# default ExprNode or None
# default_value PyObjectConst constant for default value
# annotation ExprNode or None Py3 function arg annotation
# is_self_arg boolean Is the "self" arg of an extension type method
# is_type_arg boolean Is the "class" arg of an extension type classmethod
# is_kw_only boolean Is a keyword-only argument
child_attrs = ["base_type", "declarator", "default"]
is_self_arg = 0
is_type_arg = 0
is_generic = 1
kw_only = 0
not_none = 0
or_none = 0
type = None
name_declarator = None
default_value = None
annotation = None
def analyse(self, env, nonempty = 0, is_self_arg = False):
if is_self_arg:
self.base_type.is_self_arg = self.is_self_arg = True
if self.type is None:
# The parser may missinterpret names as types...
# We fix that here.
if isinstance(self.declarator, CNameDeclaratorNode) and self.declarator.name == '':
if nonempty:
self.declarator.name = self.base_type.name
self.base_type.name = None
self.base_type.is_basic_c_type = False
could_be_name = True
else:
could_be_name = False
base_type = self.base_type.analyse(env, could_be_name = could_be_name)
if hasattr(self.base_type, 'arg_name') and self.base_type.arg_name:
self.declarator.name = self.base_type.arg_name
# The parser is unable to resolve the ambiguity of [] as part of the
# type (e.g. in buffers) or empty declarator (as with arrays).
# This is only arises for empty multi-dimensional arrays.
if (base_type.is_array
and isinstance(self.base_type, TemplatedTypeNode)
and isinstance(self.declarator, CArrayDeclaratorNode)):
declarator = self.declarator
while isinstance(declarator.base, CArrayDeclaratorNode):
declarator = declarator.base
declarator.base = self.base_type.array_declarator
base_type = base_type.base_type
return self.declarator.analyse(base_type, env, nonempty = nonempty)
else:
return self.name_declarator, self.type
def calculate_default_value_code(self, code):
if self.default_value is None:
if self.default:
if self.default.is_literal:
# will not output any code, just assign the result_code
self.default.generate_evaluation_code(code)
return self.type.cast_code(self.default.result())
self.default_value = code.get_argument_default_const(self.type)
return self.default_value
def annotate(self, code):
if self.default:
self.default.annotate(code)
class CBaseTypeNode(Node):
# Abstract base class for C base type nodes.
#
# Processing during analyse_declarations phase:
#
# analyse
# Returns the type.
pass
def analyse_as_type(self, env):
return self.analyse(env)
class CAnalysedBaseTypeNode(Node):
# type type
child_attrs = []
def analyse(self, env, could_be_name = False):
return self.type
class CSimpleBaseTypeNode(CBaseTypeNode):
# name string
# module_path [string] Qualifying name components
# is_basic_c_type boolean
# signed boolean
# longness integer
# complex boolean
# is_self_arg boolean Is self argument of C method
# ##is_type_arg boolean Is type argument of class method
child_attrs = []
arg_name = None # in case the argument name was interpreted as a type
module_path = []
is_basic_c_type = False
complex = False
def analyse(self, env, could_be_name = False):
# Return type descriptor.
#print "CSimpleBaseTypeNode.analyse: is_self_arg =", self.is_self_arg ###
type = None
if self.is_basic_c_type:
type = PyrexTypes.simple_c_type(self.signed, self.longness, self.name)
if not type:
error(self.pos, "Unrecognised type modifier combination")
elif self.name == "object" and not self.module_path:
type = py_object_type
elif self.name is None:
if self.is_self_arg and env.is_c_class_scope:
#print "CSimpleBaseTypeNode.analyse: defaulting to parent type" ###
type = env.parent_type
## elif self.is_type_arg and env.is_c_class_scope:
## type = Builtin.type_type
else:
type = py_object_type
else:
if self.module_path:
scope = env.find_imported_module(self.module_path, self.pos)
else:
scope = env
if scope:
if scope.is_c_class_scope:
scope = scope.global_scope()
entry = scope.lookup(self.name)
if entry and entry.is_type:
type = entry.type
elif could_be_name:
if self.is_self_arg and env.is_c_class_scope:
type = env.parent_type
## elif self.is_type_arg and env.is_c_class_scope:
## type = Builtin.type_type
else:
type = py_object_type
self.arg_name = self.name
else:
if self.templates:
if not self.name in self.templates:
error(self.pos, "'%s' is not a type identifier" % self.name)
type = PyrexTypes.TemplatePlaceholderType(self.name)
else:
error(self.pos, "'%s' is not a type identifier" % self.name)
if self.complex:
if not type.is_numeric or type.is_complex:
error(self.pos, "can only complexify c numeric types")
type = PyrexTypes.CComplexType(type)
type.create_declaration_utility_code(env)
elif type is Builtin.complex_type:
# Special case: optimise builtin complex type into C's
# double complex. The parser cannot do this (as for the
# normal scalar types) as the user may have redeclared the
# 'complex' type. Testing for the exact type here works.
type = PyrexTypes.c_double_complex_type
type.create_declaration_utility_code(env)
self.complex = True
if type:
return type
else:
return PyrexTypes.error_type
class MemoryViewSliceTypeNode(CBaseTypeNode):
child_attrs = ['base_type_node', 'axes']
def analyse(self, env, could_be_name = False):
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
import MemoryView
try:
axes_specs = MemoryView.get_axes_specs(env, self.axes)
except CompileError, e:
error(e.position, e.message_only)
self.type = PyrexTypes.ErrorType()
return self.type
MemoryView.validate_memslice_dtype(self.pos, base_type)
self.type = PyrexTypes.MemoryViewSliceType(base_type, axes_specs)
self.use_memview_utilities(env)
return self.type
def use_memview_utilities(self, env):
import MemoryView
env.use_utility_code(MemoryView.view_utility_code)
class CNestedBaseTypeNode(CBaseTypeNode):
# For C++ classes that live inside other C++ classes.
# name string
# base_type CBaseTypeNode
child_attrs = ['base_type']
def analyse(self, env, could_be_name = None):
base_type = self.base_type.analyse(env)
if base_type is PyrexTypes.error_type:
return PyrexTypes.error_type
if not base_type.is_cpp_class:
error(self.pos, "'%s' is not a valid type scope" % base_type)
return PyrexTypes.error_type
type_entry = base_type.scope.lookup_here(self.name)
if not type_entry or not type_entry.is_type:
error(self.pos, "'%s.%s' is not a type identifier" % (base_type, self.name))
return PyrexTypes.error_type
return type_entry.type
class TemplatedTypeNode(CBaseTypeNode):
# After parsing:
# positional_args [ExprNode] List of positional arguments
# keyword_args DictNode Keyword arguments
# base_type_node CBaseTypeNode
# After analysis:
# type PyrexTypes.BufferType or PyrexTypes.CppClassType ...containing the right options
child_attrs = ["base_type_node", "positional_args",
"keyword_args", "dtype_node"]
dtype_node = None
name = None
def analyse(self, env, could_be_name = False, base_type = None):
if base_type is None:
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
if base_type.is_cpp_class:
# Templated class
if self.keyword_args and self.keyword_args.key_value_pairs:
error(self.pos, "c++ templates cannot take keyword arguments");
self.type = PyrexTypes.error_type
else:
template_types = []
for template_node in self.positional_args:
type = template_node.analyse_as_type(env)
if type is None:
error(template_node.pos, "unknown type in template argument")
return error_type
template_types.append(type)
self.type = base_type.specialize_here(self.pos, template_types)
elif base_type.is_pyobject:
# Buffer
import Buffer
options = Buffer.analyse_buffer_options(
self.pos,
env,
self.positional_args,
self.keyword_args,
base_type.buffer_defaults)
if sys.version_info[0] < 3:
# Py 2.x enforces byte strings as keyword arguments ...
options = dict([ (name.encode('ASCII'), value)
for name, value in options.items() ])
self.type = PyrexTypes.BufferType(base_type, **options)
else:
# Array
empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None)
if len(self.positional_args) > 1 or self.keyword_args.key_value_pairs:
error(self.pos, "invalid array declaration")
self.type = PyrexTypes.error_type
else:
# It would be nice to merge this class with CArrayDeclaratorNode,
# but arrays are part of the declaration, not the type...
if not self.positional_args:
dimension = None
else:
dimension = self.positional_args[0]
self.array_declarator = CArrayDeclaratorNode(self.pos,
base = empty_declarator,
dimension = dimension)
self.type = self.array_declarator.analyse(base_type, env)[1]
return self.type
class CComplexBaseTypeNode(CBaseTypeNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
child_attrs = ["base_type", "declarator"]
def analyse(self, env, could_be_name = False):
base = self.base_type.analyse(env, could_be_name)
_, type = self.declarator.analyse(base, env)
return type
class CVarDefNode(StatNode):
# C variable definition or forward/extern function declaration.
#
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarators [CDeclaratorNode]
# in_pxd boolean
# api boolean
# decorators [cython.locals(...)] or None
# directive_locals { string : NameNode } locals defined by cython.locals(...)
child_attrs = ["base_type", "declarators"]
decorators = None
directive_locals = None
def analyse_declarations(self, env, dest_scope = None):
if self.directive_locals is None:
self.directive_locals = {}
if not dest_scope:
dest_scope = env
self.dest_scope = dest_scope
base_type = self.base_type.analyse(env)
visibility = self.visibility
for declarator in self.declarators:
if isinstance(declarator, CFuncDeclaratorNode):
name_declarator, type = declarator.analyse(base_type, env, directive_locals=self.directive_locals)
else:
name_declarator, type = declarator.analyse(base_type, env)
if not type.is_complete():
if not (self.visibility == 'extern' and type.is_array or type.is_memoryviewslice):
error(declarator.pos,
"Variable type '%s' is incomplete" % type)
if self.visibility == 'extern' and type.is_pyobject:
error(declarator.pos,
"Python object cannot be declared extern")
name = name_declarator.name
cname = name_declarator.cname
if name == '':
error(declarator.pos, "Missing name in declaration.")
return
if type.is_cfunction:
entry = dest_scope.declare_cfunction(name, type, declarator.pos,
cname = cname, visibility = self.visibility,
in_pxd = self.in_pxd, api = self.api)
if entry is not None:
entry.directive_locals = copy.copy(self.directive_locals)
else:
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
entry = dest_scope.declare_var(name, type, declarator.pos,
cname = cname, visibility = visibility,
in_pxd = self.in_pxd, api = self.api, is_cdef = 1)
class CStructOrUnionDefNode(StatNode):
# name string
# cname string or None
# kind "struct" or "union"
# typedef_flag boolean
# visibility "public" or "private"
# api boolean
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# packed boolean
child_attrs = ["attributes"]
def declare(self, env, scope=None):
if self.visibility == 'extern' and self.packed and not scope:
error(self.pos, "Cannot declare extern struct as 'packed'")
self.entry = env.declare_struct_or_union(
self.name, self.kind, scope, self.typedef_flag, self.pos,
self.cname, visibility = self.visibility, api = self.api,
packed = self.packed)
def analyse_declarations(self, env):
scope = None
if self.attributes is not None:
scope = StructOrUnionScope(self.name)
self.declare(env, scope)
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
attr.analyse_declarations(env, scope)
if self.visibility != 'extern':
for attr in scope.var_entries:
type = attr.type
while type.is_array:
type = type.base_type
if type == self.entry.type:
error(attr.pos, "Struct cannot contain itself as a member.")
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class CppClassNode(CStructOrUnionDefNode):
# name string
# cname string or None
# visibility "extern"
# in_pxd boolean
# attributes [CVarDefNode] or None
# entry Entry
# base_classes [string]
# templates [string] or None
def declare(self, env):
if self.templates is None:
template_types = None
else:
template_types = [PyrexTypes.TemplatePlaceholderType(template_name) for template_name in self.templates]
self.entry = env.declare_cpp_class(
self.name, None, self.pos,
self.cname, base_classes = [], visibility = self.visibility, templates = template_types)
def analyse_declarations(self, env):
scope = None
if self.attributes is not None:
scope = CppClassScope(self.name, env)
base_class_types = []
for base_class_name in self.base_classes:
base_class_entry = env.lookup(base_class_name)
if base_class_entry is None:
error(self.pos, "'%s' not found" % base_class_name)
elif not base_class_entry.is_type or not base_class_entry.type.is_cpp_class:
error(self.pos, "'%s' is not a cpp class type" % base_class_name)
else:
base_class_types.append(base_class_entry.type)
if self.templates is None:
template_types = None
else:
template_types = [PyrexTypes.TemplatePlaceholderType(template_name) for template_name in self.templates]
self.entry = env.declare_cpp_class(
self.name, scope, self.pos,
self.cname, base_class_types, visibility = self.visibility, templates = template_types)
if self.entry is None:
return
self.entry.is_cpp_class = 1
if self.attributes is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for attr in self.attributes:
attr.analyse_declarations(scope)
class CEnumDefNode(StatNode):
# name string or None
# cname string or None
# items [CEnumDefItemNode]
# typedef_flag boolean
# visibility "public" or "private"
# api boolean
# in_pxd boolean
# entry Entry
child_attrs = ["items"]
def declare(self, env):
self.entry = env.declare_enum(self.name, self.pos,
cname = self.cname, typedef_flag = self.typedef_flag,
visibility = self.visibility, api = self.api)
def analyse_declarations(self, env):
if self.items is not None:
if self.in_pxd and not env.in_cinclude:
self.entry.defined_in_pxd = 1
for item in self.items:
item.analyse_declarations(env, self.entry)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
if self.visibility == 'public' or self.api:
temp = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
for item in self.entry.enum_values:
code.putln("%s = PyInt_FromLong(%s); %s" % (
temp,
item.cname,
code.error_goto_if_null(temp, item.pos)))
code.put_gotref(temp)
code.putln('if (__Pyx_SetAttrString(%s, "%s", %s) < 0) %s' % (
Naming.module_cname,
item.name,
temp,
code.error_goto(item.pos)))
code.put_decref_clear(temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(temp)
class CEnumDefItemNode(StatNode):
# name string
# cname string or None
# value ExprNode or None
child_attrs = ["value"]
def analyse_declarations(self, env, enum_entry):
if self.value:
self.value.analyse_const_expression(env)
if not self.value.type.is_int:
self.value = self.value.coerce_to(PyrexTypes.c_int_type, env)
self.value.analyse_const_expression(env)
entry = env.declare_const(self.name, enum_entry.type,
self.value, self.pos, cname = self.cname,
visibility = enum_entry.visibility, api = enum_entry.api)
enum_entry.enum_values.append(entry)
class CTypeDefNode(StatNode):
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# visibility "public" or "private"
# api boolean
# in_pxd boolean
child_attrs = ["base_type", "declarator"]
def analyse_declarations(self, env):
base = self.base_type.analyse(env)
name_declarator, type = self.declarator.analyse(base, env)
name = name_declarator.name
cname = name_declarator.cname
entry = env.declare_typedef(name, type, self.pos,
cname = cname, visibility = self.visibility, api = self.api)
if self.in_pxd and not env.in_cinclude:
entry.defined_in_pxd = 1
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class FuncDefNode(StatNode, BlockNode):
# Base class for function definition nodes.
#
# return_type PyrexType
# #filename string C name of filename string const
# entry Symtab.Entry
# needs_closure boolean Whether or not this function has inner functions/classes/yield
# needs_outer_scope boolean Whether or not this function requires outer scope
# pymethdef_required boolean Force Python method struct generation
# directive_locals { string : NameNode } locals defined by cython.locals(...)
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
py_func = None
assmt = None
needs_closure = False
needs_outer_scope = False
pymethdef_required = False
is_generator = False
is_generator_body = False
modifiers = []
star_arg = None
starstar_arg = None
def analyse_default_values(self, env):
genv = env.global_scope()
default_seen = 0
for arg in self.args:
if arg.default:
default_seen = 1
if arg.is_generic:
arg.default.analyse_types(env)
arg.default = arg.default.coerce_to(arg.type, genv)
else:
error(arg.pos,
"This argument cannot have a default value")
arg.default = None
elif arg.kw_only:
default_seen = 1
elif default_seen:
error(arg.pos, "Non-default argument following default argument")
def align_argument_type(self, env, arg):
directive_locals = self.directive_locals
type = arg.type
if arg.name in directive_locals:
type_node = directive_locals[arg.name]
other_type = type_node.analyse_as_type(env)
if other_type is None:
error(type_node.pos, "Not a type")
elif (type is not PyrexTypes.py_object_type
and not type.same_as(other_type)):
error(arg.base_type.pos, "Signature does not agree with previous declaration")
error(type_node.pos, "Previous declaration here")
else:
arg.type = other_type
return arg
def need_gil_acquisition(self, lenv):
return 0
def create_local_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if self.needs_closure:
lenv = ClosureScope(name=self.entry.name,
outer_scope = genv,
parent_scope = env,
scope_name=self.entry.cname)
else:
lenv = LocalScope(name=self.entry.name,
outer_scope=genv,
parent_scope=env)
lenv.return_type = self.return_type
type = self.entry.type
if type.is_cfunction:
lenv.nogil = type.nogil and not type.with_gil
self.local_scope = lenv
lenv.directives = env.directives
return lenv
def generate_function_body(self, env, code):
self.body.generate_execution_code(code)
def generate_function_definitions(self, env, code):
import Buffer, MemoryView
lenv = self.local_scope
if lenv.is_closure_scope and not lenv.is_passthrough:
outer_scope_cname = "%s->%s" % (Naming.cur_scope_cname,
Naming.outer_scope_cname)
else:
outer_scope_cname = Naming.outer_scope_cname
lenv.mangle_closure_cnames(outer_scope_cname)
# Generate closure function definitions
self.body.generate_function_definitions(lenv, code)
# generate lambda function definitions
self.generate_lambda_definitions(lenv, code)
is_getbuffer_slot = (self.entry.name == "__getbuffer__" and
self.entry.scope.is_c_class_scope)
is_releasebuffer_slot = (self.entry.name == "__releasebuffer__" and
self.entry.scope.is_c_class_scope)
is_buffer_slot = is_getbuffer_slot or is_releasebuffer_slot
if is_buffer_slot:
if 'cython_unused' not in self.modifiers:
self.modifiers = self.modifiers + ['cython_unused']
preprocessor_guard = None
if self.entry.is_special and not is_buffer_slot:
slot = TypeSlots.method_name_to_slot.get(self.entry.name)
if slot:
preprocessor_guard = slot.preprocessor_guard_code()
if (self.entry.name == '__long__' and
not self.entry.scope.lookup_here('__int__')):
preprocessor_guard = None
profile = code.globalstate.directives['profile']
if profile and lenv.nogil:
warning(self.pos, "Cannot profile nogil function.", 1)
profile = False
if profile:
code.globalstate.use_utility_code(profile_utility_code)
# Generate C code for header and body of function
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
# ----- Function header
code.putln("")
if preprocessor_guard:
code.putln(preprocessor_guard)
with_pymethdef = (self.needs_assignment_synthesis(env, code) or
self.pymethdef_required)
if self.py_func:
self.py_func.generate_function_header(code,
with_pymethdef = with_pymethdef,
proto_only=True)
self.generate_function_header(code,
with_pymethdef = with_pymethdef)
# ----- Local variable declarations
# Find function scope
cenv = env
while cenv.is_py_class_scope or cenv.is_c_class_scope:
cenv = cenv.outer_scope
if self.needs_closure:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(";")
elif self.needs_outer_scope:
if lenv.is_passthrough:
code.put(lenv.scope_class.type.declaration_code(Naming.cur_scope_cname))
code.putln(";")
code.put(cenv.scope_class.type.declaration_code(Naming.outer_scope_cname))
code.putln(";")
self.generate_argument_declarations(lenv, code)
for entry in lenv.var_entries:
if not entry.in_closure:
code.put_var_declaration(entry)
# Initialize the return variable __pyx_r
init = ""
if not self.return_type.is_void:
if self.return_type.is_pyobject:
init = " = NULL"
elif self.return_type.is_memoryviewslice:
init = "= {0, 0}"
code.putln(
"%s%s;" %
(self.return_type.declaration_code(Naming.retval_cname),
init))
tempvardecl_code = code.insertion_point()
self.generate_keyword_list(code)
if profile:
code.put_trace_declarations()
# ----- Extern library function declarations
lenv.generate_library_function_declarations(code)
# ----- GIL acquisition
acquire_gil = self.acquire_gil
# See if we need to acquire the GIL for variable declarations and
acquire_gil_for_var_decls_only = (lenv.nogil and
lenv.has_with_gil_block)
use_refnanny = not lenv.nogil or acquire_gil_for_var_decls_only
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_ensure_gil()
# ----- set up refnanny
if use_refnanny:
tempvardecl_code.put_declare_refcount_context()
code.put_setup_refcount_context(self.entry.name)
# ----- Automatic lead-ins for certain special functions
if is_getbuffer_slot:
self.getbuffer_init(code)
# ----- Create closure scope object
if self.needs_closure:
code.putln("%s = (%s)%s->tp_new(%s, %s, NULL);" % (
Naming.cur_scope_cname,
lenv.scope_class.type.declaration_code(''),
lenv.scope_class.type.typeptr_cname,
lenv.scope_class.type.typeptr_cname,
Naming.empty_tuple))
code.putln("if (unlikely(!%s)) {" % Naming.cur_scope_cname)
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
if use_refnanny:
code.put_finish_refcount_context()
if acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
# FIXME: what if the error return value is a Python value?
code.putln("return %s;" % self.error_value())
code.putln("}")
code.put_gotref(Naming.cur_scope_cname)
# Note that it is unsafe to decref the scope at this point.
if self.needs_outer_scope:
code.putln("%s = (%s)%s;" % (
outer_scope_cname,
cenv.scope_class.type.declaration_code(''),
Naming.self_cname))
if lenv.is_passthrough:
code.putln("%s = %s;" % (Naming.cur_scope_cname, outer_scope_cname));
elif self.needs_closure:
# inner closures own a reference to their outer parent
code.put_incref(outer_scope_cname, cenv.scope_class.type)
code.put_giveref(outer_scope_cname)
# ----- Trace function call
if profile:
# this looks a bit late, but if we don't get here due to a
# fatal error before hand, it's not really worth tracing
code.put_trace_call(self.entry.name, self.pos)
# ----- Fetch arguments
self.generate_argument_parsing_code(env, code)
# If an argument is assigned to in the body, we must
# incref it to properly keep track of refcounts.
is_cdef = isinstance(self, CFuncDefNode)
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if (acquire_gil or entry.assignments) and not entry.in_closure:
code.put_var_incref(entry)
# Note: defaults are always increffed. For def functions, we
# we aquire arguments from object converstion, so we have
# new references. If we are a cdef function, we need to
# incref our arguments
if is_cdef and entry.type.is_memoryviewslice:
code.put_incref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
# ----- Initialise local buffer auxiliary variables
for entry in lenv.var_entries + lenv.arg_entries:
if entry.type.is_buffer and entry.buffer_aux.buflocal_nd_var.used:
Buffer.put_init_vars(entry, code)
# ----- Initialise local memoryviewslices
for entry in lenv.var_entries:
if entry.visibility == "private" and not entry.used:
continue
# ----- Check and convert arguments
self.generate_argument_type_tests(code)
# ----- Acquire buffer arguments
for entry in lenv.arg_entries:
if entry.type.is_buffer:
Buffer.put_acquire_arg_buffer(entry, code, self.pos)
if acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
# -------------------------
# ----- Function body -----
# -------------------------
self.generate_function_body(env, code)
# ----- Default return value
code.putln("")
if self.return_type.is_pyobject:
#if self.return_type.is_extension_type:
# lhs = "(PyObject *)%s" % Naming.retval_cname
#else:
lhs = Naming.retval_cname
code.put_init_to_py_none(lhs, self.return_type)
else:
val = self.return_type.default_value
if val:
code.putln("%s = %s;" % (Naming.retval_cname, val))
# ----- Error cleanup
if code.error_label in code.labels_used:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type, have_gil=not lenv.nogil)
# Clean up buffers -- this calls a Python function
# so need to save and restore error state
buffers_present = len(lenv.buffer_entries) > 0
memslice_entries = [e for e in lenv.entries.itervalues()
if e.type.is_memoryviewslice]
if buffers_present:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln("{ PyObject *__pyx_type, *__pyx_value, *__pyx_tb;")
code.putln("__Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb);")
for entry in lenv.buffer_entries:
Buffer.put_release_buffer_code(code, entry)
#code.putln("%s = 0;" % entry.cname)
code.putln("__Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);}")
if self.return_type.is_memoryviewslice:
MemoryView.put_init_entry(Naming.retval_cname, code)
err_val = Naming.retval_cname
else:
err_val = self.error_value()
exc_check = self.caller_will_check_exceptions()
if err_val is not None or exc_check:
# TODO: Fix exception tracing (though currently unused by cProfile).
# code.globalstate.use_utility_code(get_exception_tuple_utility_code)
# code.put_trace_exception()
if lenv.nogil:
code.putln("{")
code.put_ensure_gil()
code.put_add_traceback(self.entry.qualified_name)
if lenv.nogil:
code.put_release_ensured_gil()
code.putln("}")
else:
warning(self.entry.pos, "Unraisable exception in function '%s'." \
% self.entry.qualified_name, 0)
format_tuple = (
self.entry.qualified_name,
Naming.clineno_cname,
Naming.lineno_cname,
Naming.filename_cname,
)
code.putln(
'__Pyx_WriteUnraisable("%s", %s, %s, %s);' % format_tuple)
env.use_utility_code(unraisable_exception_utility_code)
env.use_utility_code(restore_exception_utility_code)
default_retval = self.return_type.default_value
if err_val is None and default_retval:
err_val = default_retval
if err_val is not None:
code.putln("%s = %s;" % (Naming.retval_cname, err_val))
if is_getbuffer_slot:
self.getbuffer_error_cleanup(code)
# If we are using the non-error cleanup section we should
# jump past it if we have an error. The if-test below determine
# whether this section is used.
if buffers_present or is_getbuffer_slot:
code.put_goto(code.return_from_error_cleanup_label)
# ----- Non-error return cleanup
code.put_label(code.return_label)
for entry in lenv.buffer_entries:
if entry.used:
Buffer.put_release_buffer_code(code, entry)
if is_getbuffer_slot:
self.getbuffer_normal_cleanup(code)
if self.return_type.is_memoryviewslice:
# See if our return value is uninitialized on non-error return
# import MemoryView
# MemoryView.err_if_nogil_initialized_check(self.pos, env)
cond = code.unlikely(self.return_type.error_condition(
Naming.retval_cname))
code.putln(
'if (%s) {' % cond)
if env.nogil:
code.put_ensure_gil()
code.putln(
'PyErr_SetString('
'PyExc_TypeError,'
'"Memoryview return value is not initialized");')
if env.nogil:
code.put_release_ensured_gil()
code.putln(
'}')
# ----- Return cleanup for both error and no-error return
code.put_label(code.return_from_error_cleanup_label)
for entry in lenv.var_entries:
if not entry.used or entry.in_closure:
continue
if entry.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
elif entry.type.is_pyobject:
code.put_var_decref(entry)
# Decref any increfed args
for entry in lenv.arg_entries:
if entry.type.is_pyobject:
if (acquire_gil or entry.assignments) and not entry.in_closure:
code.put_var_decref(entry)
if entry.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(entry.cname,
have_gil=not lenv.nogil)
if self.needs_closure:
code.put_decref(Naming.cur_scope_cname, lenv.scope_class.type)
# ----- Return
# This code is duplicated in ModuleNode.generate_module_init_func
if not lenv.nogil:
default_retval = self.return_type.default_value
err_val = self.error_value()
if err_val is None and default_retval:
err_val = default_retval
if self.return_type.is_pyobject:
code.put_xgiveref(self.return_type.as_pyobject(Naming.retval_cname))
if self.entry.is_special and self.entry.name == "__hash__":
# Returning -1 for __hash__ is supposed to signal an error
# We do as Python instances and coerce -1 into -2.
code.putln("if (unlikely(%s == -1) && !PyErr_Occurred()) %s = -2;" % (
Naming.retval_cname, Naming.retval_cname))
if profile:
if self.return_type.is_pyobject:
code.put_trace_return(Naming.retval_cname)
else:
code.put_trace_return("Py_None")
if not lenv.nogil:
# GIL holding funcion
code.put_finish_refcount_context()
if acquire_gil or acquire_gil_for_var_decls_only:
code.put_release_ensured_gil()
if not self.return_type.is_void:
code.putln("return %s;" % Naming.retval_cname)
code.putln("}")
if preprocessor_guard:
code.putln("#endif /*!(%s)*/" % preprocessor_guard)
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
if code.funcstate.should_declare_error_indicator:
# Initialize these variables to silence compiler warnings
tempvardecl_code.putln("int %s = 0;" % Naming.lineno_cname)
tempvardecl_code.putln("const char *%s = NULL;" %
Naming.filename_cname)
if code.c_line_in_traceback:
tempvardecl_code.putln("int %s = 0;" % Naming.clineno_cname)
# ----- Python version
code.exit_cfunc_scope()
if self.py_func:
self.py_func.generate_function_definitions(env, code)
self.generate_wrapper_functions(code)
def declare_argument(self, env, arg):
if arg.type.is_void:
error(arg.pos, "Invalid use of 'void'")
elif not arg.type.is_complete() and not (arg.type.is_array or arg.type.is_memoryviewslice):
error(arg.pos,
"Argument type '%s' is incomplete" % arg.type)
return env.declare_arg(arg.name, arg.type, arg.pos)
def generate_arg_type_test(self, arg, code):
# Generate type test for one argument.
if arg.type.typeobj_is_available():
code.globalstate.use_utility_code(arg_type_test_utility_code)
typeptr_cname = arg.type.typeptr_cname
arg_code = "((PyObject *)%s)" % arg.entry.cname
code.putln(
'if (unlikely(!__Pyx_ArgTypeTest(%s, %s, %d, "%s", %s))) %s' % (
arg_code,
typeptr_cname,
arg.accept_none,
arg.name,
arg.type.is_builtin_type,
code.error_goto(arg.pos)))
else:
error(arg.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_arg_none_check(self, arg, code):
# Generate None check for one argument.
code.putln('if (unlikely(((PyObject *)%s) == Py_None)) {' % arg.entry.cname)
code.putln('''PyErr_Format(PyExc_TypeError, "Argument '%s' must not be None"); %s''' % (
arg.name,
code.error_goto(arg.pos)))
code.putln('}')
def generate_wrapper_functions(self, code):
pass
def generate_execution_code(self, code):
# Evaluate and store argument default values
for arg in self.args:
default = arg.default
if default:
if not default.is_literal:
default.generate_evaluation_code(code)
default.make_owned_reference(code)
result = default.result_as(arg.type)
code.putln(
"%s = %s;" % (
arg.calculate_default_value_code(code),
result))
if arg.type.is_pyobject:
code.put_giveref(default.result())
default.generate_post_assignment_code(code)
default.free_temps(code)
# For Python class methods, create and store function object
if self.assmt:
self.assmt.generate_execution_code(code)
#
# Special code for the __getbuffer__ function
#
def getbuffer_init(self, code):
info = self.local_scope.arg_entries[1].cname
# Python 3.0 betas have a bug in memoryview which makes it call
# getbuffer with a NULL parameter. For now we work around this;
# the following block should be removed when this bug is fixed.
code.putln("if (%s != NULL) {" % info)
code.putln("%s->obj = Py_None; __Pyx_INCREF(Py_None);" % info)
code.put_giveref("%s->obj" % info) # Do not refnanny object within structs
code.putln("}")
def getbuffer_error_cleanup(self, code):
info = self.local_scope.arg_entries[1].cname
code.putln("if (%s != NULL && %s->obj != NULL) {"
% (info, info))
code.put_gotref("%s->obj" % info)
code.putln("__Pyx_DECREF(%s->obj); %s->obj = NULL;"
% (info, info))
code.putln("}")
def getbuffer_normal_cleanup(self, code):
info = self.local_scope.arg_entries[1].cname
code.putln("if (%s != NULL && %s->obj == Py_None) {" % (info, info))
code.put_gotref("Py_None")
code.putln("__Pyx_DECREF(Py_None); %s->obj = NULL;" % info)
code.putln("}")
class CFuncDefNode(FuncDefNode):
# C function definition.
#
# modifiers ['inline']
# visibility 'private' or 'public' or 'extern'
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# body StatListNode
# api boolean
# decorators [DecoratorNode] list of decorators
#
# with_gil boolean Acquire GIL around body
# type CFuncType
# py_func wrapper for calling from Python
# overridable whether or not this is a cpdef function
# inline_in_pxd whether this is an inline function in a pxd file
child_attrs = ["base_type", "declarator", "body", "py_func"]
inline_in_pxd = False
decorators = None
directive_locals = None
override = None
def unqualified_name(self):
return self.entry.name
def analyse_declarations(self, env):
if self.directive_locals is None:
self.directive_locals = {}
self.directive_locals.update(env.directives['locals'])
base_type = self.base_type.analyse(env)
# The 2 here is because we need both function and argument names.
if isinstance(self.declarator, CFuncDeclaratorNode):
name_declarator, type = self.declarator.analyse(base_type, env,
nonempty = 2 * (self.body is not None),
directive_locals = self.directive_locals)
else:
name_declarator, type = self.declarator.analyse(base_type, env, nonempty = 2 * (self.body is not None))
if not type.is_cfunction:
error(self.pos,
"Suite attached to non-function declaration")
# Remember the actual type according to the function header
# written here, because the type in the symbol table entry
# may be different if we're overriding a C method inherited
# from the base type of an extension type.
self.type = type
type.is_overridable = self.overridable
declarator = self.declarator
while not hasattr(declarator, 'args'):
declarator = declarator.base
self.args = declarator.args
for formal_arg, type_arg in zip(self.args, type.args):
self.align_argument_type(env, type_arg)
formal_arg.type = type_arg.type
formal_arg.name = type_arg.name
formal_arg.cname = type_arg.cname
if type_arg.type.is_buffer and 'inline' in self.modifiers:
warning(formal_arg.pos, "Buffer unpacking not optimized away.", 1)
name = name_declarator.name
cname = name_declarator.cname
self.entry = env.declare_cfunction(
name, type, self.pos,
cname = cname, visibility = self.visibility, api = self.api,
defining = self.body is not None, modifiers = self.modifiers)
self.entry.inline_func_in_pxd = self.inline_in_pxd
self.return_type = type.return_type
if self.return_type.is_array and self.visibility != 'extern':
error(self.pos,
"Function cannot return an array")
if self.overridable and not env.is_module_scope:
if len(self.args) < 1 or not self.args[0].type.is_pyobject:
# An error will be produced in the cdef function
self.overridable = False
if self.overridable:
import ExprNodes
py_func_body = self.call_self_node(is_module_scope = env.is_module_scope)
self.py_func = DefNode(pos = self.pos,
name = self.entry.name,
args = self.args,
star_arg = None,
starstar_arg = None,
doc = self.doc,
body = py_func_body,
is_wrapper = 1)
self.py_func.is_module_scope = env.is_module_scope
self.py_func.analyse_declarations(env)
self.entry.as_variable = self.py_func.entry
# Reset scope entry the above cfunction
env.entries[name] = self.entry
if (not self.entry.is_final_cmethod and
(not env.is_module_scope or Options.lookup_module_cpdef)):
self.override = OverrideCheckNode(self.pos, py_func = self.py_func)
self.body = StatListNode(self.pos, stats=[self.override, self.body])
self.create_local_scope(env)
def call_self_node(self, omit_optional_args=0, is_module_scope=0):
import ExprNodes
args = self.type.args
if omit_optional_args:
args = args[:len(args) - self.type.optional_arg_count]
arg_names = [arg.name for arg in args]
if is_module_scope:
cfunc = ExprNodes.NameNode(self.pos, name=self.entry.name)
else:
self_arg = ExprNodes.NameNode(self.pos, name=arg_names[0])
cfunc = ExprNodes.AttributeNode(self.pos, obj=self_arg, attribute=self.entry.name)
skip_dispatch = not is_module_scope or Options.lookup_module_cpdef
c_call = ExprNodes.SimpleCallNode(self.pos, function=cfunc, args=[ExprNodes.NameNode(self.pos, name=n) for n in arg_names[1-is_module_scope:]], wrapper_call=skip_dispatch)
return ReturnStatNode(pos=self.pos, return_type=PyrexTypes.py_object_type, value=c_call)
def declare_arguments(self, env):
for arg in self.type.args:
if not arg.name:
error(arg.pos, "Missing argument name")
self.declare_argument(env, arg)
def need_gil_acquisition(self, lenv):
return self.type.with_gil
def nogil_check(self, env):
type = self.type
with_gil = type.with_gil
if type.nogil and not with_gil:
if type.return_type.is_pyobject:
error(self.pos,
"Function with Python return type cannot be declared nogil")
for entry in self.local_scope.var_entries:
if entry.type.is_pyobject and not entry.in_with_gil_block:
error(self.pos, "Function declared nogil has Python locals or temporaries")
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
if self.py_func is not None:
# this will also analyse the default values
self.py_func.analyse_expressions(env)
else:
self.analyse_default_values(env)
self.acquire_gil = self.need_gil_acquisition(self.local_scope)
def needs_assignment_synthesis(self, env, code=None):
return False
def generate_function_header(self, code, with_pymethdef, with_opt_args = 1, with_dispatch = 1, cname = None):
scope = self.local_scope
arg_decls = []
type = self.type
for arg in type.args[:len(type.args)-type.optional_arg_count]:
arg_decl = arg.declaration_code()
entry = scope.lookup(arg.name)
if not entry.cf_used:
arg_decl = 'CYTHON_UNUSED %s' % arg_decl
arg_decls.append(arg_decl)
if with_dispatch and self.overridable:
dispatch_arg = PyrexTypes.c_int_type.declaration_code(
Naming.skip_dispatch_cname)
if self.override:
arg_decls.append(dispatch_arg)
else:
arg_decls.append('CYTHON_UNUSED %s' % dispatch_arg)
if type.optional_arg_count and with_opt_args:
arg_decls.append(type.op_arg_struct.declaration_code(Naming.optional_args_cname))
if type.has_varargs:
arg_decls.append("...")
if not arg_decls:
arg_decls = ["void"]
if cname is None:
cname = self.entry.func_cname
entity = type.function_header_code(cname, ', '.join(arg_decls))
if self.entry.visibility == 'private':
storage_class = "static "
else:
storage_class = ""
dll_linkage = None
modifiers = ""
if 'inline' in self.modifiers:
self.modifiers[self.modifiers.index('inline')] = 'cython_inline'
if self.modifiers:
modifiers = "%s " % ' '.join(self.modifiers).upper()
header = self.return_type.declaration_code(entity, dll_linkage=dll_linkage)
#print (storage_class, modifiers, header)
code.putln("%s%s%s {" % (storage_class, modifiers, header))
def generate_argument_declarations(self, env, code):
scope = self.local_scope
for arg in self.args:
if arg.default:
entry = scope.lookup(arg.name)
if self.override or entry.cf_used:
result = arg.calculate_default_value_code(code)
code.putln('%s = %s;' % (
arg.type.declaration_code(arg.cname), result))
def generate_keyword_list(self, code):
pass
def generate_argument_parsing_code(self, env, code):
i = 0
used = 0
if self.type.optional_arg_count:
scope = self.local_scope
code.putln('if (%s) {' % Naming.optional_args_cname)
for arg in self.args:
if arg.default:
entry = scope.lookup(arg.name)
if self.override or entry.cf_used:
code.putln('if (%s->%sn > %s) {' %
(Naming.optional_args_cname,
Naming.pyrex_prefix, i))
declarator = arg.declarator
while not hasattr(declarator, 'name'):
declarator = declarator.base
code.putln('%s = %s->%s;' %
(arg.cname, Naming.optional_args_cname,
self.type.opt_arg_cname(declarator.name)))
used += 1
i += 1
for _ in range(used):
code.putln('}')
code.putln('}')
def generate_argument_conversion_code(self, code):
pass
def generate_argument_type_tests(self, code):
# Generate type tests for args whose type in a parent
# class is a supertype of the declared type.
for arg in self.type.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
elif arg.type.is_pyobject and not arg.accept_none:
self.generate_arg_none_check(arg, code)
def error_value(self):
if self.return_type.is_pyobject:
return "0"
else:
#return None
return self.entry.type.exception_value
def caller_will_check_exceptions(self):
return self.entry.type.exception_check
def generate_wrapper_functions(self, code):
# If the C signature of a function has changed, we need to generate
# wrappers to put in the slots here.
k = 0
entry = self.entry
func_type = entry.type
while entry.prev_entry is not None:
k += 1
entry = entry.prev_entry
entry.func_cname = "%s%swrap_%s" % (self.entry.func_cname, Naming.pyrex_prefix, k)
code.putln()
self.generate_function_header(code,
0,
with_dispatch = entry.type.is_overridable,
with_opt_args = entry.type.optional_arg_count,
cname = entry.func_cname)
if not self.return_type.is_void:
code.put('return ')
args = self.type.args
arglist = [arg.cname for arg in args[:len(args)-self.type.optional_arg_count]]
if entry.type.is_overridable:
arglist.append(Naming.skip_dispatch_cname)
elif func_type.is_overridable:
arglist.append('0')
if entry.type.optional_arg_count:
arglist.append(Naming.optional_args_cname)
elif func_type.optional_arg_count:
arglist.append('NULL')
code.putln('%s(%s);' % (self.entry.func_cname, ', '.join(arglist)))
code.putln('}')
class PyArgDeclNode(Node):
# Argument which must be a Python object (used
# for * and ** arguments).
#
# name string
# entry Symtab.Entry
# annotation ExprNode or None Py3 argument annotation
child_attrs = []
def generate_function_definitions(self, env, code):
self.entry.generate_function_definitions(env, code)
class DecoratorNode(Node):
# A decorator
#
# decorator NameNode or CallNode or AttributeNode
child_attrs = ['decorator']
class DefNode(FuncDefNode):
# A Python function definition.
#
# name string the Python name of the function
# lambda_name string the internal name of a lambda 'function'
# decorators [DecoratorNode] list of decorators
# args [CArgDeclNode] formal arguments
# doc EncodedString or None
# body StatListNode
# return_type_annotation
# ExprNode or None the Py3 return type annotation
#
# The following subnode is constructed internally
# when the def statement is inside a Python class definition.
#
# assmt AssignmentNode Function construction/assignment
# py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign
child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators"]
lambda_name = None
assmt = None
num_kwonly_args = 0
num_required_kw_args = 0
reqd_kw_flags_cname = "0"
is_wrapper = 0
no_assignment_synthesis = 0
decorators = None
return_type_annotation = None
entry = None
acquire_gil = 0
self_in_stararg = 0
py_cfunc_node = None
doc = None
def __init__(self, pos, **kwds):
FuncDefNode.__init__(self, pos, **kwds)
k = rk = r = 0
for arg in self.args:
if arg.kw_only:
k += 1
if not arg.default:
rk += 1
if not arg.default:
r += 1
self.num_kwonly_args = k
self.num_required_kw_args = rk
self.num_required_args = r
def as_cfunction(self, cfunc=None, scope=None, overridable=True):
if self.star_arg:
error(self.star_arg.pos, "cdef function cannot have star argument")
if self.starstar_arg:
error(self.starstar_arg.pos, "cdef function cannot have starstar argument")
if cfunc is None:
cfunc_args = []
for formal_arg in self.args:
name_declarator, type = formal_arg.analyse(scope, nonempty=1)
cfunc_args.append(PyrexTypes.CFuncTypeArg(name = name_declarator.name,
cname = None,
type = py_object_type,
pos = formal_arg.pos))
cfunc_type = PyrexTypes.CFuncType(return_type = py_object_type,
args = cfunc_args,
has_varargs = False,
exception_value = None,
exception_check = False,
nogil = False,
with_gil = False,
is_overridable = overridable)
cfunc = CVarDefNode(self.pos, type=cfunc_type)
else:
if scope is None:
scope = cfunc.scope
cfunc_type = cfunc.type
if len(self.args) != len(cfunc_type.args) or cfunc_type.has_varargs:
error(self.pos, "wrong number of arguments")
error(cfunc.pos, "previous declaration here")
for i, (formal_arg, type_arg) in enumerate(zip(self.args, cfunc_type.args)):
name_declarator, type = formal_arg.analyse(scope, nonempty=1,
is_self_arg = (i == 0 and scope.is_c_class_scope))
if type is None or type is PyrexTypes.py_object_type:
formal_arg.type = type_arg.type
formal_arg.name_declarator = name_declarator
import ExprNodes
if cfunc_type.exception_value is None:
exception_value = None
else:
exception_value = ExprNodes.ConstNode(self.pos, value=cfunc_type.exception_value, type=cfunc_type.return_type)
declarator = CFuncDeclaratorNode(self.pos,
base = CNameDeclaratorNode(self.pos, name=self.name, cname=None),
args = self.args,
has_varargs = False,
exception_check = cfunc_type.exception_check,
exception_value = exception_value,
with_gil = cfunc_type.with_gil,
nogil = cfunc_type.nogil)
return CFuncDefNode(self.pos,
modifiers = [],
base_type = CAnalysedBaseTypeNode(self.pos, type=cfunc_type.return_type),
declarator = declarator,
body = self.body,
doc = self.doc,
overridable = cfunc_type.is_overridable,
type = cfunc_type,
with_gil = cfunc_type.with_gil,
nogil = cfunc_type.nogil,
visibility = 'private',
api = False,
directive_locals = getattr(cfunc, 'directive_locals', {}))
def is_cdef_func_compatible(self):
"""Determines if the function's signature is compatible with a
cdef function. This can be used before calling
.as_cfunction() to see if that will be successful.
"""
if self.needs_closure:
return False
if self.star_arg or self.starstar_arg:
return False
return True
def analyse_declarations(self, env):
self.is_classmethod = self.is_staticmethod = False
if self.decorators:
for decorator in self.decorators:
func = decorator.decorator
if func.is_name:
self.is_classmethod |= func.name == 'classmethod'
self.is_staticmethod |= func.name == 'staticmethod'
if self.is_classmethod and env.lookup_here('classmethod'):
# classmethod() was overridden - not much we can do here ...
self.is_classmethod = False
if self.is_staticmethod and env.lookup_here('staticmethod'):
# staticmethod() was overridden - not much we can do here ...
self.is_staticmethod = False
if self.name == '__new__' and env.is_py_class_scope:
self.is_staticmethod = 1
self.analyse_argument_types(env)
if self.name == '<lambda>':
self.declare_lambda_function(env)
else:
self.declare_pyfunction(env)
self.analyse_signature(env)
self.return_type = self.entry.signature.return_type()
self.create_local_scope(env)
def analyse_argument_types(self, env):
directive_locals = self.directive_locals = env.directives['locals']
allow_none_for_extension_args = env.directives['allow_none_for_extension_args']
for arg in self.args:
if hasattr(arg, 'name'):
name_declarator = None
else:
base_type = arg.base_type.analyse(env)
name_declarator, type = \
arg.declarator.analyse(base_type, env)
arg.name = name_declarator.name
arg.type = type
self.align_argument_type(env, arg)
if name_declarator and name_declarator.cname:
error(self.pos,
"Python function argument cannot have C name specification")
arg.type = arg.type.as_argument_type()
arg.hdr_type = None
arg.needs_conversion = 0
arg.needs_type_test = 0
arg.is_generic = 1
if arg.type.is_pyobject:
if arg.or_none:
arg.accept_none = True
elif arg.not_none:
arg.accept_none = False
elif arg.type.is_extension_type or arg.type.is_builtin_type:
if arg.default and arg.default.constant_result is None:
# special case: def func(MyType obj = None)
arg.accept_none = True
else:
# default depends on compiler directive
arg.accept_none = allow_none_for_extension_args
else:
# probably just a plain 'object'
arg.accept_none = True
else:
arg.accept_none = True # won't be used, but must be there
if arg.not_none:
error(arg.pos, "Only Python type arguments can have 'not None'")
if arg.or_none:
error(arg.pos, "Only Python type arguments can have 'or None'")
def analyse_signature(self, env):
if self.entry.is_special:
if self.decorators:
error(self.pos, "special functions of cdef classes cannot have decorators")
self.entry.trivial_signature = len(self.args) == 1 and not (self.star_arg or self.starstar_arg)
elif not env.directives['always_allow_keywords'] and not (self.star_arg or self.starstar_arg):
# Use the simpler calling signature for zero- and one-argument functions.
if self.entry.signature is TypeSlots.pyfunction_signature:
if len(self.args) == 0:
self.entry.signature = TypeSlots.pyfunction_noargs
elif len(self.args) == 1:
if self.args[0].default is None and not self.args[0].kw_only:
self.entry.signature = TypeSlots.pyfunction_onearg
elif self.entry.signature is TypeSlots.pymethod_signature:
if len(self.args) == 1:
self.entry.signature = TypeSlots.unaryfunc
elif len(self.args) == 2:
if self.args[1].default is None and not self.args[1].kw_only:
self.entry.signature = TypeSlots.ibinaryfunc
sig = self.entry.signature
nfixed = sig.num_fixed_args()
if sig is TypeSlots.pymethod_signature and nfixed == 1 \
and len(self.args) == 0 and self.star_arg:
# this is the only case where a diverging number of
# arguments is not an error - when we have no explicit
# 'self' parameter as in method(*args)
sig = self.entry.signature = TypeSlots.pyfunction_signature # self is not 'really' used
self.self_in_stararg = 1
nfixed = 0
for i in range(min(nfixed, len(self.args))):
arg = self.args[i]
arg.is_generic = 0
if sig.is_self_arg(i) and not self.is_staticmethod:
if self.is_classmethod:
arg.is_type_arg = 1
arg.hdr_type = arg.type = Builtin.type_type
else:
arg.is_self_arg = 1
arg.hdr_type = arg.type = env.parent_type
arg.needs_conversion = 0
else:
arg.hdr_type = sig.fixed_arg_type(i)
if not arg.type.same_as(arg.hdr_type):
if arg.hdr_type.is_pyobject and arg.type.is_pyobject:
arg.needs_type_test = 1
else:
arg.needs_conversion = 1
if arg.needs_conversion:
arg.hdr_cname = Naming.arg_prefix + arg.name
else:
arg.hdr_cname = Naming.var_prefix + arg.name
if nfixed > len(self.args):
self.bad_signature()
return
elif nfixed < len(self.args):
if not sig.has_generic_args:
self.bad_signature()
for arg in self.args:
if arg.is_generic and \
(arg.type.is_extension_type or arg.type.is_builtin_type):
arg.needs_type_test = 1
def bad_signature(self):
sig = self.entry.signature
expected_str = "%d" % sig.num_fixed_args()
if sig.has_generic_args:
expected_str = expected_str + " or more"
name = self.name
if name.startswith("__") and name.endswith("__"):
desc = "Special method"
else:
desc = "Method"
error(self.pos,
"%s %s has wrong number of arguments "
"(%d declared, %s expected)" % (
desc, self.name, len(self.args), expected_str))
def signature_has_nongeneric_args(self):
argcount = len(self.args)
if argcount == 0 or (
argcount == 1 and (self.args[0].is_self_arg or
self.args[0].is_type_arg)):
return 0
return 1
def signature_has_generic_args(self):
return self.entry.signature.has_generic_args
def declare_pyfunction(self, env):
#print "DefNode.declare_pyfunction:", self.name, "in", env ###
name = self.name
entry = env.lookup_here(name)
if entry:
if entry.is_final_cmethod and not env.parent_type.is_final_type:
error(self.pos, "Only final types can have final Python (def/cpdef) methods")
if (entry.type.is_cfunction and not entry.is_builtin_cmethod
and not self.is_wrapper):
warning(self.pos, "Overriding cdef method with def method.", 5)
entry = env.declare_pyfunction(name, self.pos, allow_redefine=not self.is_wrapper)
self.entry = entry
prefix = env.next_id(env.scope_prefix)
entry.func_cname = Naming.pyfunc_prefix + prefix + name
entry.pymethdef_cname = Naming.pymethdef_prefix + prefix + name
if Options.docstrings:
entry.doc = embed_position(self.pos, self.doc)
entry.doc_cname = Naming.funcdoc_prefix + prefix + name
if entry.is_special:
if entry.name in TypeSlots.invisible or not entry.doc or (entry.name in '__getattr__' and env.directives['fast_getattr']):
entry.wrapperbase_cname = None
else:
entry.wrapperbase_cname = Naming.wrapperbase_prefix + prefix + name
else:
entry.doc = None
def declare_lambda_function(self, env):
entry = env.declare_lambda_function(self.lambda_name, self.pos)
entry.doc = None
self.entry = entry
def declare_arguments(self, env):
for arg in self.args:
if not arg.name:
error(arg.pos, "Missing argument name")
if arg.needs_conversion:
arg.entry = env.declare_var(arg.name, arg.type, arg.pos)
if arg.type.is_pyobject:
arg.entry.init = "0"
else:
arg.entry = self.declare_argument(env, arg)
arg.entry.is_arg = 1
arg.entry.used = 1
arg.entry.is_self_arg = arg.is_self_arg
if arg.hdr_type:
if arg.is_self_arg or arg.is_type_arg or \
(arg.type.is_extension_type and not arg.hdr_type.is_extension_type):
arg.entry.is_declared_generic = 1
self.declare_python_arg(env, self.star_arg)
self.declare_python_arg(env, self.starstar_arg)
def declare_python_arg(self, env, arg):
if arg:
if env.directives['infer_types'] != False:
type = PyrexTypes.unspecified_type
else:
type = py_object_type
entry = env.declare_var(arg.name, type, arg.pos)
entry.is_arg = 1
entry.used = 1
entry.init = "0"
entry.xdecref_cleanup = 1
arg.entry = entry
def analyse_expressions(self, env):
self.local_scope.directives = env.directives
self.analyse_default_values(env)
if self.needs_assignment_synthesis(env):
# Shouldn't we be doing this at the module level too?
self.synthesize_assignment_node(env)
elif self.decorators:
for decorator in self.decorators[::-1]:
decorator.decorator.analyse_expressions(env)
def needs_assignment_synthesis(self, env, code=None):
if self.no_assignment_synthesis:
return False
# Should enable for module level as well, that will require more testing...
if self.entry.is_anonymous:
return True
if env.is_module_scope:
if code is None:
return env.directives['binding']
else:
return code.globalstate.directives['binding']
return env.is_py_class_scope or env.is_closure_scope
def synthesize_assignment_node(self, env):
import ExprNodes
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
if genv.is_closure_scope:
rhs = self.py_cfunc_node = ExprNodes.InnerFunctionNode(
self.pos, pymethdef_cname = self.entry.pymethdef_cname,
code_object = ExprNodes.CodeObjectNode(self))
else:
rhs = self.py_cfunc_node = ExprNodes.PyCFunctionNode(
self.pos, pymethdef_cname = self.entry.pymethdef_cname,
binding = env.directives['binding'],
code_object = ExprNodes.CodeObjectNode(self))
if env.is_py_class_scope:
if not self.is_staticmethod and not self.is_classmethod:
rhs.binding = True
else:
rhs.binding = False
if self.decorators:
for decorator in self.decorators[::-1]:
rhs = ExprNodes.SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [rhs])
self.assmt = SingleAssignmentNode(self.pos,
lhs = ExprNodes.NameNode(self.pos, name = self.name),
rhs = rhs)
self.assmt.analyse_declarations(env)
self.assmt.analyse_expressions(env)
def generate_function_header(self, code, with_pymethdef, proto_only=0):
arg_code_list = []
sig = self.entry.signature
if sig.has_dummy_arg or self.self_in_stararg:
arg_code_list.append(
"PyObject *%s" % Naming.self_cname)
for arg in self.args:
if not arg.is_generic:
if arg.is_self_arg or arg.is_type_arg:
arg_code_list.append("PyObject *%s" % arg.hdr_cname)
else:
decl = arg.hdr_type.declaration_code(arg.hdr_cname)
entry = self.local_scope.lookup(arg.name)
if not entry.cf_used:
arg_code_list.append('CYTHON_UNUSED ' + decl)
else:
arg_code_list.append(decl)
if not self.entry.is_special and sig.method_flags() == [TypeSlots.method_noargs]:
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if (self.entry.scope.is_c_class_scope and self.entry.name == "__ipow__"):
arg_code_list.append("CYTHON_UNUSED PyObject *unused")
if sig.has_generic_args:
arg_code_list.append(
"PyObject *%s, PyObject *%s"
% (Naming.args_cname, Naming.kwds_cname))
arg_code = ", ".join(arg_code_list)
dc = self.return_type.declaration_code(self.entry.func_cname)
mf = " ".join(self.modifiers).upper()
if mf: mf += " "
header = "static %s%s(%s)" % (mf, dc, arg_code)
code.putln("%s; /*proto*/" % header)
if proto_only:
return
if (Options.docstrings and self.entry.doc and
not self.entry.scope.is_property_scope and
(not self.entry.is_special or self.entry.wrapperbase_cname)):
docstr = self.entry.doc
if docstr.is_unicode:
docstr = docstr.utf8encode()
code.putln(
'static char %s[] = "%s";' % (
self.entry.doc_cname,
split_string_literal(escape_byte_string(docstr))))
if self.entry.is_special:
code.putln(
"struct wrapperbase %s;" % self.entry.wrapperbase_cname)
if with_pymethdef:
code.put(
"static PyMethodDef %s = " %
self.entry.pymethdef_cname)
code.put_pymethoddef(self.entry, ";", allow_skip=False)
code.putln("%s {" % header)
def generate_argument_declarations(self, env, code):
for arg in self.args:
if arg.is_generic: # or arg.needs_conversion:
if arg.needs_conversion:
code.putln("PyObject *%s = 0;" % arg.hdr_cname)
elif not arg.entry.in_closure:
code.put_var_declaration(arg.entry)
def generate_keyword_list(self, code):
if self.signature_has_generic_args() and \
self.signature_has_nongeneric_args():
code.put(
"static PyObject **%s[] = {" %
Naming.pykwdlist_cname)
for arg in self.args:
if arg.is_generic:
pystring_cname = code.intern_identifier(arg.name)
code.put('&%s,' % pystring_cname)
code.putln("0};")
def generate_argument_parsing_code(self, env, code):
# Generate fast equivalent of PyArg_ParseTuple call for
# generic arguments, if any, including args/kwargs
if self.entry.signature.has_dummy_arg and not self.self_in_stararg:
# get rid of unused argument warning
code.putln("%s = %s;" % (Naming.self_cname, Naming.self_cname))
old_error_label = code.new_error_label()
our_error_label = code.error_label
end_label = code.new_label("argument_unpacking_done")
has_kwonly_args = self.num_kwonly_args > 0
has_star_or_kw_args = self.star_arg is not None \
or self.starstar_arg is not None or has_kwonly_args
for arg in self.args:
if not arg.type.is_pyobject:
if not arg.type.create_from_py_utility_code(env):
pass # will fail later
elif arg.is_self_arg and arg.entry.in_closure:
# must store 'self' in the closure explicitly for extension types
self.generate_arg_assignment(arg, arg.hdr_cname, code)
if not self.signature_has_generic_args():
if has_star_or_kw_args:
error(self.pos, "This method cannot have * or keyword arguments")
self.generate_argument_conversion_code(code)
elif not self.signature_has_nongeneric_args():
# func(*args) or func(**kw) or func(*args, **kw)
self.generate_stararg_copy_code(code)
else:
positional_args = []
kw_only_args = []
for arg in self.args:
arg_entry = arg.entry
if arg.is_generic:
if arg.default:
if not arg.is_self_arg and not arg.is_type_arg:
if arg.kw_only:
kw_only_args.append(arg)
else:
positional_args.append(arg)
elif arg.kw_only:
kw_only_args.append(arg)
elif not arg.is_self_arg and not arg.is_type_arg:
positional_args.append(arg)
self.generate_tuple_and_keyword_parsing_code(
positional_args, kw_only_args, end_label, code)
code.error_label = old_error_label
if code.label_used(our_error_label):
if not code.label_used(end_label):
code.put_goto(end_label)
code.put_label(our_error_label)
if has_star_or_kw_args:
self.generate_arg_decref(self.star_arg, code)
if self.starstar_arg:
if self.starstar_arg.entry.xdecref_cleanup:
code.put_var_xdecref_clear(self.starstar_arg.entry)
else:
code.put_var_decref_clear(self.starstar_arg.entry)
code.put_add_traceback(self.entry.qualified_name)
# The arguments are put into the closure one after the
# other, so when type errors are found, all references in
# the closure instance must be properly ref-counted to
# facilitate generic closure instance deallocation. In
# the case of an argument type error, it's best to just
# DECREF+clear the already handled references, as this
# frees their references as early as possible.
for arg in self.args:
if arg.type.is_pyobject and arg.entry.in_closure:
code.put_var_xdecref_clear(arg.entry)
if self.needs_closure:
code.put_decref(Naming.cur_scope_cname, self.local_scope.scope_class.type)
code.put_finish_refcount_context()
code.putln("return %s;" % self.error_value())
if code.label_used(end_label):
code.put_label(end_label)
# fix refnanny view on closure variables here, instead of
# doing it separately for each arg parsing special case
if self.star_arg and self.star_arg.entry.in_closure:
code.put_var_giveref(self.star_arg.entry)
if self.starstar_arg and self.starstar_arg.entry.in_closure:
code.put_var_giveref(self.starstar_arg.entry)
for arg in self.args:
if arg.type.is_pyobject and arg.entry.in_closure:
code.put_var_giveref(arg.entry)
def generate_arg_assignment(self, arg, item, code, incref_closure=True):
if arg.type.is_pyobject:
if arg.is_generic:
item = PyrexTypes.typecast(arg.type, PyrexTypes.py_object_type, item)
entry = arg.entry
if incref_closure and entry.in_closure:
code.put_incref(item, PyrexTypes.py_object_type)
code.putln("%s = %s;" % (entry.cname, item))
else:
func = arg.type.from_py_function
if func:
code.putln("%s = %s(%s); %s" % (
arg.entry.cname,
func,
item,
code.error_goto_if(arg.type.error_condition(arg.entry.cname), arg.pos)))
else:
error(arg.pos, "Cannot convert Python object argument to type '%s'" % arg.type)
def generate_arg_xdecref(self, arg, code):
if arg:
code.put_var_xdecref_clear(arg.entry)
def generate_arg_decref(self, arg, code):
if arg:
code.put_var_decref_clear(arg.entry)
def generate_stararg_copy_code(self, code):
if not self.star_arg:
code.globalstate.use_utility_code(raise_argtuple_invalid_utility_code)
code.putln("if (unlikely(PyTuple_GET_SIZE(%s) > 0)) {" %
Naming.args_cname)
code.put('__Pyx_RaiseArgtupleInvalid("%s", 1, 0, 0, PyTuple_GET_SIZE(%s)); return %s;' % (
self.name, Naming.args_cname, self.error_value()))
code.putln("}")
if self.starstar_arg:
if self.star_arg:
kwarg_check = "unlikely(%s)" % Naming.kwds_cname
else:
kwarg_check = "%s" % Naming.kwds_cname
else:
kwarg_check = "unlikely(%s) && unlikely(PyDict_Size(%s) > 0)" % (
Naming.kwds_cname, Naming.kwds_cname)
code.globalstate.use_utility_code(keyword_string_check_utility_code)
code.putln(
"if (%s && unlikely(!__Pyx_CheckKeywordStrings(%s, \"%s\", %d))) return %s;" % (
kwarg_check, Naming.kwds_cname, self.name,
bool(self.starstar_arg), self.error_value()))
if self.starstar_arg:
code.putln("%s = (%s) ? PyDict_Copy(%s) : PyDict_New();" % (
self.starstar_arg.entry.cname,
Naming.kwds_cname,
Naming.kwds_cname))
code.putln("if (unlikely(!%s)) return %s;" % (
self.starstar_arg.entry.cname, self.error_value()))
self.starstar_arg.entry.xdecref_cleanup = 0
code.put_gotref(self.starstar_arg.entry.cname)
if self.self_in_stararg:
# need to create a new tuple with 'self' inserted as first item
code.put("%s = PyTuple_New(PyTuple_GET_SIZE(%s)+1); if (unlikely(!%s)) " % (
self.star_arg.entry.cname,
Naming.args_cname,
self.star_arg.entry.cname))
if self.starstar_arg:
code.putln("{")
code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type)
code.putln("return %s;" % self.error_value())
code.putln("}")
else:
code.putln("return %s;" % self.error_value())
code.put_gotref(self.star_arg.entry.cname)
code.put_incref(Naming.self_cname, py_object_type)
code.put_giveref(Naming.self_cname)
code.putln("PyTuple_SET_ITEM(%s, 0, %s);" % (
self.star_arg.entry.cname, Naming.self_cname))
temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln("for (%s=0; %s < PyTuple_GET_SIZE(%s); %s++) {" % (
temp, temp, Naming.args_cname, temp))
code.putln("PyObject* item = PyTuple_GET_ITEM(%s, %s);" % (
Naming.args_cname, temp))
code.put_incref("item", py_object_type)
code.put_giveref("item")
code.putln("PyTuple_SET_ITEM(%s, %s+1, item);" % (
self.star_arg.entry.cname, temp))
code.putln("}")
code.funcstate.release_temp(temp)
self.star_arg.entry.xdecref_cleanup = 0
elif self.star_arg:
code.put_incref(Naming.args_cname, py_object_type)
code.putln("%s = %s;" % (
self.star_arg.entry.cname,
Naming.args_cname))
self.star_arg.entry.xdecref_cleanup = 0
def generate_tuple_and_keyword_parsing_code(self, positional_args,
kw_only_args, success_label, code):
argtuple_error_label = code.new_label("argtuple_error")
min_positional_args = self.num_required_args - self.num_required_kw_args
if len(self.args) > 0 and (self.args[0].is_self_arg or self.args[0].is_type_arg):
min_positional_args -= 1
max_positional_args = len(positional_args)
has_fixed_positional_count = not self.star_arg and \
min_positional_args == max_positional_args
has_kw_only_args = bool(kw_only_args)
if self.num_required_kw_args:
code.globalstate.use_utility_code(raise_keyword_required_utility_code)
if self.starstar_arg or self.star_arg:
self.generate_stararg_init_code(max_positional_args, code)
# Before being converted and assigned to the target variables,
# borrowed references to all unpacked argument values are
# collected into a local PyObject* array, regardless if they
# were taken from default arguments, positional arguments or
# keyword arguments.
code.putln('{')
all_args = tuple(positional_args) + tuple(kw_only_args)
self.generate_argument_values_setup_code(
all_args, max_positional_args, argtuple_error_label, code)
# --- optimised code when we receive keyword arguments
code.putln("if (%s(%s)) {" % (
(self.num_required_kw_args > 0) and "likely" or "unlikely",
Naming.kwds_cname))
self.generate_keyword_unpacking_code(
min_positional_args, max_positional_args,
has_fixed_positional_count, has_kw_only_args,
all_args, argtuple_error_label, code)
# --- optimised code when we do not receive any keyword arguments
if (self.num_required_kw_args and min_positional_args > 0) or min_positional_args == max_positional_args:
# Python raises arg tuple related errors first, so we must
# check the length here
if min_positional_args == max_positional_args and not self.star_arg:
compare = '!='
else:
compare = '<'
code.putln('} else if (PyTuple_GET_SIZE(%s) %s %d) {' % (
Naming.args_cname, compare, min_positional_args))
code.put_goto(argtuple_error_label)
if self.num_required_kw_args:
# pure error case: keywords required but not passed
if max_positional_args > min_positional_args and not self.star_arg:
code.putln('} else if (PyTuple_GET_SIZE(%s) > %d) {' % (
Naming.args_cname, max_positional_args))
code.put_goto(argtuple_error_label)
code.putln('} else {')
for i, arg in enumerate(kw_only_args):
if not arg.default:
pystring_cname = code.intern_identifier(arg.name)
# required keyword-only argument missing
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' % (
self.name,
pystring_cname))
code.putln(code.error_goto(self.pos))
break
else:
# optimised tuple unpacking code
code.putln('} else {')
if min_positional_args == max_positional_args:
# parse the exact number of positional arguments from
# the args tuple
for i, arg in enumerate(positional_args):
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
else:
# parse the positional arguments from the variable length
# args tuple and reject illegal argument tuple sizes
code.putln('switch (PyTuple_GET_SIZE(%s)) {' % Naming.args_cname)
if self.star_arg:
code.putln('default:')
reversed_args = list(enumerate(positional_args))[::-1]
for i, arg in reversed_args:
if i >= min_positional_args-1:
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (i, Naming.args_cname, i))
if min_positional_args == 0:
code.put('case 0: ')
code.putln('break;')
if self.star_arg:
if min_positional_args:
for i in range(min_positional_args-1, -1, -1):
code.putln('case %2d:' % i)
code.put_goto(argtuple_error_label)
else:
code.put('default: ')
code.put_goto(argtuple_error_label)
code.putln('}')
code.putln('}')
# convert arg values to their final type and assign them
for i, arg in enumerate(all_args):
if arg.default and not arg.type.is_pyobject:
code.putln("if (values[%d]) {" % i)
self.generate_arg_assignment(arg, "values[%d]" % i, code)
if arg.default and not arg.type.is_pyobject:
code.putln('} else {')
code.putln(
"%s = %s;" % (
arg.entry.cname,
arg.calculate_default_value_code(code)))
if arg.entry.type.is_memoryviewslice:
code.put_incref_memoryviewslice(arg.entry.cname,
have_gil=True)
code.putln('}')
code.putln('}')
if code.label_used(argtuple_error_label):
code.put_goto(success_label)
code.put_label(argtuple_error_label)
code.globalstate.use_utility_code(raise_argtuple_invalid_utility_code)
code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, PyTuple_GET_SIZE(%s)); ' % (
self.name, has_fixed_positional_count,
min_positional_args, max_positional_args,
Naming.args_cname))
code.putln(code.error_goto(self.pos))
def generate_arg_default_assignments(self, code):
for arg in self.args:
if arg.is_generic and arg.default:
code.putln(
"%s = %s;" % (
arg.entry.cname,
arg.calculate_default_value_code(code)))
if arg.type.is_memoryviewslice:
code.put_incref_memoryviewslice(arg.entry.cname,
have_gil=True)
def generate_stararg_init_code(self, max_positional_args, code):
if self.starstar_arg:
self.starstar_arg.entry.xdecref_cleanup = 0
code.putln('%s = PyDict_New(); if (unlikely(!%s)) return %s;' % (
self.starstar_arg.entry.cname,
self.starstar_arg.entry.cname,
self.error_value()))
code.put_gotref(self.starstar_arg.entry.cname)
if self.star_arg:
self.star_arg.entry.xdecref_cleanup = 0
code.putln('if (PyTuple_GET_SIZE(%s) > %d) {' % (
Naming.args_cname,
max_positional_args))
code.putln('%s = PyTuple_GetSlice(%s, %d, PyTuple_GET_SIZE(%s));' % (
self.star_arg.entry.cname, Naming.args_cname,
max_positional_args, Naming.args_cname))
code.putln("if (unlikely(!%s)) {" % self.star_arg.entry.cname)
if self.starstar_arg:
code.put_decref_clear(self.starstar_arg.entry.cname, py_object_type)
if self.needs_closure:
code.put_decref(Naming.cur_scope_cname, self.local_scope.scope_class.type)
code.put_finish_refcount_context()
code.putln('return %s;' % self.error_value())
code.putln('}')
code.put_gotref(self.star_arg.entry.cname)
code.putln('} else {')
code.put("%s = %s; " % (self.star_arg.entry.cname, Naming.empty_tuple))
code.put_incref(Naming.empty_tuple, py_object_type)
code.putln('}')
def generate_argument_values_setup_code(self, args, max_positional_args, argtuple_error_label, code):
max_args = len(args)
# the 'values' array collects borrowed references to arguments
# before doing any type coercion etc.
code.putln("PyObject* values[%d] = {%s};" % (
max_args, ','.join('0'*max_args)))
# assign borrowed Python default values to the values array,
# so that they can be overwritten by received arguments below
for i, arg in enumerate(args):
if arg.default and arg.type.is_pyobject:
default_value = arg.calculate_default_value_code(code)
code.putln('values[%d] = %s;' % (i, arg.type.as_pyobject(default_value)))
def generate_keyword_unpacking_code(self, min_positional_args, max_positional_args,
has_fixed_positional_count, has_kw_only_args,
all_args, argtuple_error_label, code):
code.putln('Py_ssize_t kw_args;')
code.putln('const Py_ssize_t pos_args = PyTuple_GET_SIZE(%s);' % Naming.args_cname)
# copy the values from the args tuple and check that it's not too long
code.putln('switch (pos_args) {')
if self.star_arg:
code.putln('default:')
for i in range(max_positional_args-1, -1, -1):
code.put('case %2d: ' % (i+1))
code.putln("values[%d] = PyTuple_GET_ITEM(%s, %d);" % (
i, Naming.args_cname, i))
code.putln('case 0: break;')
if not self.star_arg:
code.put('default: ') # more arguments than allowed
code.put_goto(argtuple_error_label)
code.putln('}')
# The code above is very often (but not always) the same as
# the optimised non-kwargs tuple unpacking code, so we keep
# the code block above at the very top, before the following
# 'external' PyDict_Size() call, to make it easy for the C
# compiler to merge the two separate tuple unpacking
# implementations into one when they turn out to be identical.
# If we received kwargs, fill up the positional/required
# arguments with values from the kw dict
code.putln('kw_args = PyDict_Size(%s);' % Naming.kwds_cname)
if self.num_required_args or max_positional_args > 0:
last_required_arg = -1
for i, arg in enumerate(all_args):
if not arg.default:
last_required_arg = i
if last_required_arg < max_positional_args:
last_required_arg = max_positional_args-1
if max_positional_args > 0:
code.putln('switch (pos_args) {')
for i, arg in enumerate(all_args[:last_required_arg+1]):
if max_positional_args > 0 and i <= max_positional_args:
if self.star_arg and i == max_positional_args:
code.putln('default:')
else:
code.putln('case %2d:' % i)
pystring_cname = code.intern_identifier(arg.name)
if arg.default:
if arg.kw_only:
# handled separately below
continue
code.putln('if (kw_args > 0) {')
code.putln('PyObject* value = PyDict_GetItem(%s, %s);' % (
Naming.kwds_cname, pystring_cname))
code.putln('if (value) { values[%d] = value; kw_args--; }' % i)
code.putln('}')
else:
code.putln('values[%d] = PyDict_GetItem(%s, %s);' % (
i, Naming.kwds_cname, pystring_cname))
code.putln('if (likely(values[%d])) kw_args--;' % i);
if i < min_positional_args:
if i == 0:
# special case: we know arg 0 is missing
code.put('else ')
code.put_goto(argtuple_error_label)
else:
# print the correct number of values (args or
# kwargs) that were passed into positional
# arguments up to this point
code.putln('else {')
code.globalstate.use_utility_code(raise_argtuple_invalid_utility_code)
code.put('__Pyx_RaiseArgtupleInvalid("%s", %d, %d, %d, %d); ' % (
self.name, has_fixed_positional_count,
min_positional_args, max_positional_args, i))
code.putln(code.error_goto(self.pos))
code.putln('}')
elif arg.kw_only:
code.putln('else {')
code.put('__Pyx_RaiseKeywordRequired("%s", %s); ' %(
self.name, pystring_cname))
code.putln(code.error_goto(self.pos))
code.putln('}')
if max_positional_args > 0:
code.putln('}')
if has_kw_only_args and not self.starstar_arg:
# unpack optional keyword-only arguments
# checking for interned strings in a dict is faster than iterating
# but it's too likely that we must iterate if we expect **kwargs
optional_args = []
for i, arg in enumerate(all_args[max_positional_args:]):
if not arg.kw_only or not arg.default:
continue
optional_args.append((i+max_positional_args, arg))
if optional_args:
# this mimics an unrolled loop so that we can "break" out of it
code.putln('while (kw_args > 0) {')
code.putln('PyObject* value;')
for i, arg in optional_args:
pystring_cname = code.intern_identifier(arg.name)
code.putln(
'value = PyDict_GetItem(%s, %s);' % (
Naming.kwds_cname, pystring_cname))
code.putln(
'if (value) { values[%d] = value; if (!(--kw_args)) break; }' % i)
code.putln('break;')
code.putln('}')
code.putln('if (unlikely(kw_args > 0)) {')
# non-positional/-required kw args left in dict: default args,
# kw-only args, **kwargs or error
#
# This is sort of a catch-all: except for checking required
# arguments, this will always do the right thing for unpacking
# keyword arguments, so that we can concentrate on optimising
# common cases above.
if max_positional_args == 0:
pos_arg_count = "0"
elif self.star_arg:
code.putln("const Py_ssize_t used_pos_args = (pos_args < %d) ? pos_args : %d;" % (
max_positional_args, max_positional_args))
pos_arg_count = "used_pos_args"
else:
pos_arg_count = "pos_args"
code.globalstate.use_utility_code(parse_keywords_utility_code)
code.putln(
'if (unlikely(__Pyx_ParseOptionalKeywords(%s, %s, %s, values, %s, "%s") < 0)) %s' % (
Naming.kwds_cname,
Naming.pykwdlist_cname,
self.starstar_arg and self.starstar_arg.entry.cname or '0',
pos_arg_count,
self.name,
code.error_goto(self.pos)))
code.putln('}')
# convert arg values to their final type and assign them
for i, arg in enumerate(all_args):
if arg.default and not arg.type.is_pyobject:
code.putln("if (values[%d]) {" % i)
self.generate_arg_assignment(arg, "values[%d]" % i, code, incref_closure=False)
if arg.default and not arg.type.is_pyobject:
code.putln('} else {')
code.putln(
"%s = %s;" % (
arg.entry.cname,
arg.calculate_default_value_code(code)))
if arg.type.is_memoryviewslice:
code.put_incref_memoryviewslice(arg.entry.cname,
have_gil=True)
code.putln('}')
def generate_argument_conversion_code(self, code):
# Generate code to convert arguments from signature type to
# declared type, if needed. Also copies signature arguments
# into closure fields.
for arg in self.args:
if arg.needs_conversion:
self.generate_arg_conversion(arg, code)
elif not arg.is_self_arg and arg.entry.in_closure:
if arg.type.is_pyobject:
code.put_incref(arg.hdr_cname, py_object_type)
code.putln('%s = %s;' % (arg.entry.cname, arg.hdr_cname))
def generate_arg_conversion(self, arg, code):
# Generate conversion code for one argument.
old_type = arg.hdr_type
new_type = arg.type
if old_type.is_pyobject:
if arg.default:
code.putln("if (%s) {" % arg.hdr_cname)
else:
code.putln("assert(%s); {" % arg.hdr_cname)
self.generate_arg_conversion_from_pyobject(arg, code)
code.putln("}")
elif new_type.is_pyobject:
self.generate_arg_conversion_to_pyobject(arg, code)
else:
if new_type.assignable_from(old_type):
code.putln(
"%s = %s;" % (arg.entry.cname, arg.hdr_cname))
else:
error(arg.pos,
"Cannot convert 1 argument from '%s' to '%s'" %
(old_type, new_type))
def generate_arg_conversion_from_pyobject(self, arg, code):
new_type = arg.type
func = new_type.from_py_function
# copied from CoerceFromPyTypeNode
if func:
lhs = arg.entry.cname
rhs = "%s(%s)" % (func, arg.hdr_cname)
if new_type.is_enum:
rhs = PyrexTypes.typecast(new_type, PyrexTypes.c_long_type, rhs)
code.putln("%s = %s; %s" % (
lhs,
rhs,
code.error_goto_if(new_type.error_condition(arg.entry.cname), arg.pos)))
else:
error(arg.pos,
"Cannot convert Python object argument to type '%s'"
% new_type)
def generate_arg_conversion_to_pyobject(self, arg, code):
old_type = arg.hdr_type
func = old_type.to_py_function
if func:
code.putln("%s = %s(%s); %s" % (
arg.entry.cname,
func,
arg.hdr_cname,
code.error_goto_if_null(arg.entry.cname, arg.pos)))
code.put_var_gotref(arg.entry)
else:
error(arg.pos,
"Cannot convert argument of type '%s' to Python object"
% old_type)
def generate_argument_type_tests(self, code):
# Generate type tests for args whose signature
# type is PyObject * and whose declared type is
# a subtype thereof.
for arg in self.args:
if arg.needs_type_test:
self.generate_arg_type_test(arg, code)
elif not arg.accept_none and arg.type.is_pyobject:
self.generate_arg_none_check(arg, code)
def error_value(self):
return self.entry.signature.error_value
def caller_will_check_exceptions(self):
return 1
class GeneratorDefNode(DefNode):
# Generator DefNode.
#
# gbody GeneratorBodyDefNode
#
is_generator = True
needs_closure = True
child_attrs = DefNode.child_attrs + ["gbody"]
def __init__(self, **kwargs):
# XXX: don't actually needs a body
kwargs['body'] = StatListNode(kwargs['pos'], stats=[])
super(GeneratorDefNode, self).__init__(**kwargs)
def analyse_declarations(self, env):
super(GeneratorDefNode, self).analyse_declarations(env)
self.gbody.local_scope = self.local_scope
self.gbody.analyse_declarations(env)
def generate_function_body(self, env, code):
body_cname = self.gbody.entry.func_cname
generator_cname = '%s->%s' % (Naming.cur_scope_cname, Naming.obj_base_cname)
code.putln('%s.resume_label = 0;' % generator_cname)
code.putln('%s.body = (__pyx_generator_body_t) %s;' % (generator_cname, body_cname))
code.put_giveref(Naming.cur_scope_cname)
code.put_finish_refcount_context()
code.putln("return (PyObject *) %s;" % Naming.cur_scope_cname);
def generate_function_definitions(self, env, code):
from ExprNodes import generator_utility_code
env.use_utility_code(generator_utility_code)
self.gbody.generate_function_header(code, proto=True)
super(GeneratorDefNode, self).generate_function_definitions(env, code)
self.gbody.generate_function_definitions(env, code)
class GeneratorBodyDefNode(DefNode):
# Generator body DefNode.
#
is_generator_body = True
def __init__(self, pos=None, name=None, body=None):
super(GeneratorBodyDefNode, self).__init__(pos=pos, body=body, name=name, doc=None,
args=[],
star_arg=None, starstar_arg=None)
def declare_generator_body(self, env):
prefix = env.next_id(env.scope_prefix)
name = env.next_id('generator')
cname = Naming.genbody_prefix + prefix + name
entry = env.declare_var(None, py_object_type, self.pos,
cname=cname, visibility='private')
entry.func_cname = cname
entry.qualified_name = EncodedString(self.name)
self.entry = entry
def analyse_declarations(self, env):
self.analyse_argument_types(env)
self.declare_generator_body(env)
def generate_function_header(self, code, proto=False):
header = "static PyObject *%s(%s, PyObject *%s)" % (
self.entry.func_cname,
self.local_scope.scope_class.type.declaration_code(Naming.cur_scope_cname),
Naming.sent_value_cname)
if proto:
code.putln('%s; /* proto */' % header)
else:
code.putln('%s /* generator body */\n{' % header);
def generate_function_definitions(self, env, code):
lenv = self.local_scope
# Generate closure function definitions
self.body.generate_function_definitions(lenv, code)
# Generate C code for header and body of function
code.enter_cfunc_scope()
code.return_from_error_cleanup_label = code.new_label()
# ----- Top-level constants used by this function
code.mark_pos(self.pos)
self.generate_cached_builtins_decls(lenv, code)
# ----- Function header
code.putln("")
self.generate_function_header(code)
# ----- Local variables
code.putln("PyObject *%s = NULL;" % Naming.retval_cname)
tempvardecl_code = code.insertion_point()
code.put_declare_refcount_context()
code.put_setup_refcount_context(self.entry.name)
# ----- Resume switch point.
code.funcstate.init_closure_temps(lenv.scope_class.type.scope)
resume_code = code.insertion_point()
first_run_label = code.new_label('first_run')
code.use_label(first_run_label)
code.put_label(first_run_label)
code.putln('%s' %
(code.error_goto_if_null(Naming.sent_value_cname, self.pos)))
# ----- Function body
self.generate_function_body(env, code)
code.putln('PyErr_SetNone(PyExc_StopIteration); %s' % code.error_goto(self.pos))
# ----- Error cleanup
if code.error_label in code.labels_used:
code.put_goto(code.return_label)
code.put_label(code.error_label)
for cname, type in code.funcstate.all_managed_temps():
code.put_xdecref(cname, type)
code.put_add_traceback(self.entry.qualified_name)
# ----- Non-error return cleanup
code.put_label(code.return_label)
code.put_xdecref(Naming.retval_cname, py_object_type)
code.putln('%s->%s.resume_label = -1;' % (Naming.cur_scope_cname, Naming.obj_base_cname))
code.put_finish_refcount_context()
code.putln('return NULL;');
code.putln("}")
# ----- Go back and insert temp variable declarations
tempvardecl_code.put_temp_declarations(code.funcstate)
# ----- Generator resume code
resume_code.putln("switch (%s->%s.resume_label) {" % (Naming.cur_scope_cname, Naming.obj_base_cname));
resume_code.putln("case 0: goto %s;" % first_run_label)
from ParseTreeTransforms import YieldNodeCollector
collector = YieldNodeCollector()
collector.visitchildren(self)
for yield_expr in collector.yields:
resume_code.putln("case %d: goto %s;" % (yield_expr.label_num, yield_expr.label_name));
resume_code.putln("default: /* CPython raises the right error here */");
resume_code.put_finish_refcount_context()
resume_code.putln("return NULL;");
resume_code.putln("}");
code.exit_cfunc_scope()
class OverrideCheckNode(StatNode):
# A Node for dispatching to the def method if it
# is overriden.
#
# py_func
#
# args
# func_temp
# body
child_attrs = ['body']
body = None
def analyse_expressions(self, env):
self.args = env.arg_entries
if self.py_func.is_module_scope:
first_arg = 0
else:
first_arg = 1
import ExprNodes
self.func_node = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
call_tuple = ExprNodes.TupleNode(self.pos, args=[ExprNodes.NameNode(self.pos, name=arg.name) for arg in self.args[first_arg:]])
call_node = ExprNodes.SimpleCallNode(self.pos,
function=self.func_node,
args=[ExprNodes.NameNode(self.pos, name=arg.name) for arg in self.args[first_arg:]])
self.body = ReturnStatNode(self.pos, value=call_node)
self.body.analyse_expressions(env)
def generate_execution_code(self, code):
interned_attr_cname = code.intern_identifier(self.py_func.entry.name)
# Check to see if we are an extension type
if self.py_func.is_module_scope:
self_arg = "((PyObject *)%s)" % Naming.module_cname
else:
self_arg = "((PyObject *)%s)" % self.args[0].cname
code.putln("/* Check if called by wrapper */")
code.putln("if (unlikely(%s)) ;" % Naming.skip_dispatch_cname)
code.putln("/* Check if overriden in Python */")
if self.py_func.is_module_scope:
code.putln("else {")
else:
code.putln("else if (unlikely(Py_TYPE(%s)->tp_dictoffset != 0)) {" % self_arg)
func_node_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.func_node.set_cname(func_node_temp)
# need to get attribute manually--scope would return cdef method
err = code.error_goto_if_null(func_node_temp, self.pos)
code.putln("%s = PyObject_GetAttr(%s, %s); %s" % (
func_node_temp, self_arg, interned_attr_cname, err))
code.put_gotref(func_node_temp)
is_builtin_function_or_method = "PyCFunction_Check(%s)" % func_node_temp
is_overridden = "(PyCFunction_GET_FUNCTION(%s) != (void *)&%s)" % (
func_node_temp, self.py_func.entry.func_cname)
code.putln("if (!%s || %s) {" % (is_builtin_function_or_method, is_overridden))
self.body.generate_execution_code(code)
code.putln("}")
code.put_decref_clear(func_node_temp, PyrexTypes.py_object_type)
code.funcstate.release_temp(func_node_temp)
code.putln("}")
class ClassDefNode(StatNode, BlockNode):
pass
class PyClassDefNode(ClassDefNode):
# A Python class definition.
#
# name EncodedString Name of the class
# doc string or None
# body StatNode Attribute definition code
# entry Symtab.Entry
# scope PyClassScope
# decorators [DecoratorNode] list of decorators or None
#
# The following subnodes are constructed internally:
#
# dict DictNode Class dictionary or Py3 namespace
# classobj ClassNode Class object
# target NameNode Variable to assign class object to
child_attrs = ["body", "dict", "metaclass", "mkw", "bases", "class_result", "target"]
decorators = None
class_result = None
py3_style_class = False # Python3 style class (bases+kwargs)
def __init__(self, pos, name, bases, doc, body, decorators = None,
keyword_args = None, starstar_arg = None):
StatNode.__init__(self, pos)
self.name = name
self.doc = doc
self.body = body
self.decorators = decorators
import ExprNodes
if self.doc and Options.docstrings:
doc = embed_position(self.pos, self.doc)
doc_node = ExprNodes.StringNode(pos, value = doc)
else:
doc_node = None
if keyword_args or starstar_arg:
self.py3_style_class = True
self.bases = bases
self.metaclass = None
if keyword_args and not starstar_arg:
for i, item in list(enumerate(keyword_args.key_value_pairs))[::-1]:
if item.key.value == 'metaclass':
if self.metaclass is not None:
error(item.pos, "keyword argument 'metaclass' passed multiple times")
# special case: we already know the metaclass,
# so we don't need to do the "build kwargs,
# find metaclass" dance at runtime
self.metaclass = item.value
del keyword_args.key_value_pairs[i]
if starstar_arg:
self.mkw = ExprNodes.KeywordArgsNode(
pos, keyword_args = keyword_args and keyword_args.key_value_pairs or [],
starstar_arg = starstar_arg)
elif keyword_args and keyword_args.key_value_pairs:
self.mkw = keyword_args
else:
self.mkw = ExprNodes.NullNode(pos)
if self.metaclass is None:
self.metaclass = ExprNodes.PyClassMetaclassNode(
pos, mkw = self.mkw, bases = self.bases)
self.dict = ExprNodes.PyClassNamespaceNode(pos, name = name,
doc = doc_node, metaclass = self.metaclass, bases = self.bases,
mkw = self.mkw)
self.classobj = ExprNodes.Py3ClassNode(pos, name = name,
bases = self.bases, dict = self.dict, doc = doc_node,
metaclass = self.metaclass, mkw = self.mkw)
else:
self.dict = ExprNodes.DictNode(pos, key_value_pairs = [])
self.metaclass = None
self.mkw = None
self.bases = None
self.classobj = ExprNodes.ClassNode(pos, name = name,
bases = bases, dict = self.dict, doc = doc_node)
self.target = ExprNodes.NameNode(pos, name = name)
def as_cclass(self):
"""
Return this node as if it were declared as an extension class
"""
if self.py3_style_class:
error(self.classobj.pos, "Python3 style class could not be represented as C class")
return
bases = self.classobj.bases.args
if len(bases) == 0:
base_class_name = None
base_class_module = None
elif len(bases) == 1:
base = bases[0]
path = []
from ExprNodes import AttributeNode, NameNode
while isinstance(base, AttributeNode):
path.insert(0, base.attribute)
base = base.obj
if isinstance(base, NameNode):
path.insert(0, base.name)
base_class_name = path[-1]
if len(path) > 1:
base_class_module = u'.'.join(path[:-1])
else:
base_class_module = None
else:
error(self.classobj.bases.args.pos, "Invalid base class")
else:
error(self.classobj.bases.args.pos, "C class may only have one base class")
return None
return CClassDefNode(self.pos,
visibility = 'private',
module_name = None,
class_name = self.name,
base_class_module = base_class_module,
base_class_name = base_class_name,
decorators = self.decorators,
body = self.body,
in_pxd = False,
doc = self.doc)
def create_scope(self, env):
genv = env
while genv.is_py_class_scope or genv.is_c_class_scope:
genv = genv.outer_scope
cenv = self.scope = PyClassScope(name = self.name, outer_scope = genv)
return cenv
def analyse_declarations(self, env):
class_result = self.classobj
if self.decorators:
from ExprNodes import SimpleCallNode
for decorator in self.decorators[::-1]:
class_result = SimpleCallNode(
decorator.pos,
function = decorator.decorator,
args = [class_result])
self.class_result = class_result
self.class_result.analyse_declarations(env)
self.target.analyse_target_declaration(env)
cenv = self.create_scope(env)
cenv.directives = env.directives
cenv.class_obj_cname = self.target.entry.cname
self.body.analyse_declarations(cenv)
def analyse_expressions(self, env):
if self.py3_style_class:
self.bases.analyse_expressions(env)
self.metaclass.analyse_expressions(env)
self.mkw.analyse_expressions(env)
self.dict.analyse_expressions(env)
self.class_result.analyse_expressions(env)
genv = env.global_scope()
cenv = self.scope
self.body.analyse_expressions(cenv)
self.target.analyse_target_expression(env, self.classobj)
def generate_function_definitions(self, env, code):
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
code.pyclass_stack.append(self)
cenv = self.scope
if self.py3_style_class:
self.bases.generate_evaluation_code(code)
self.mkw.generate_evaluation_code(code)
self.metaclass.generate_evaluation_code(code)
self.dict.generate_evaluation_code(code)
cenv.namespace_cname = cenv.class_obj_cname = self.dict.result()
self.body.generate_execution_code(code)
self.class_result.generate_evaluation_code(code)
cenv.namespace_cname = cenv.class_obj_cname = self.classobj.result()
self.target.generate_assignment_code(self.class_result, code)
self.dict.generate_disposal_code(code)
self.dict.free_temps(code)
if self.py3_style_class:
self.mkw.generate_disposal_code(code)
self.mkw.free_temps(code)
self.metaclass.generate_disposal_code(code)
self.metaclass.free_temps(code)
self.bases.generate_disposal_code(code)
self.bases.free_temps(code)
code.pyclass_stack.pop()
class CClassDefNode(ClassDefNode):
# An extension type definition.
#
# visibility 'private' or 'public' or 'extern'
# typedef_flag boolean
# api boolean
# module_name string or None For import of extern type objects
# class_name string Unqualified name of class
# as_name string or None Name to declare as in this scope
# base_class_module string or None Module containing the base class
# base_class_name string or None Name of the base class
# objstruct_name string or None Specified C name of object struct
# typeobj_name string or None Specified C name of type object
# in_pxd boolean Is in a .pxd file
# decorators [DecoratorNode] list of decorators or None
# doc string or None
# body StatNode or None
# entry Symtab.Entry
# base_type PyExtensionType or None
# buffer_defaults_node DictNode or None Declares defaults for a buffer
# buffer_defaults_pos
child_attrs = ["body"]
buffer_defaults_node = None
buffer_defaults_pos = None
typedef_flag = False
api = False
objstruct_name = None
typeobj_name = None
decorators = None
shadow = False
def buffer_defaults(self, env):
if not hasattr(self, '_buffer_defaults'):
import Buffer
if self.buffer_defaults_node:
self._buffer_defaults = Buffer.analyse_buffer_options(
self.buffer_defaults_pos,
env, [], self.buffer_defaults_node,
need_complete=False)
else:
self._buffer_defaults = None
return self._buffer_defaults
def declare(self, env):
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return None
else:
home_scope = env
self.entry = home_scope.declare_c_class(
name = self.class_name,
pos = self.pos,
defining = 0,
implementing = 0,
module_name = self.module_name,
base_type = None,
objstruct_cname = self.objstruct_name,
typeobj_cname = self.typeobj_name,
visibility = self.visibility,
typedef_flag = self.typedef_flag,
api = self.api,
buffer_defaults = self.buffer_defaults(env),
shadow = self.shadow)
def analyse_declarations(self, env):
#print "CClassDefNode.analyse_declarations:", self.class_name
#print "...visibility =", self.visibility
#print "...module_name =", self.module_name
if env.in_cinclude and not self.objstruct_name:
error(self.pos, "Object struct name specification required for "
"C class defined in 'extern from' block")
if self.decorators:
error(self.pos,
"Decorators not allowed on cdef classes (used on type '%s')" % self.class_name)
self.base_type = None
# Now that module imports are cached, we need to
# import the modules for extern classes.
if self.module_name:
self.module = None
for module in env.cimported_modules:
if module.name == self.module_name:
self.module = module
if self.module is None:
self.module = ModuleScope(self.module_name, None, env.context)
self.module.has_extern_class = 1
env.add_imported_module(self.module)
if self.base_class_name:
if self.base_class_module:
base_class_scope = env.find_module(self.base_class_module, self.pos)
else:
base_class_scope = env
if self.base_class_name == 'object':
# extension classes are special and don't need to inherit from object
if base_class_scope is None or base_class_scope.lookup('object') is None:
self.base_class_name = None
self.base_class_module = None
base_class_scope = None
if base_class_scope:
base_class_entry = base_class_scope.find(self.base_class_name, self.pos)
if base_class_entry:
if not base_class_entry.is_type:
error(self.pos, "'%s' is not a type name" % self.base_class_name)
elif not base_class_entry.type.is_extension_type and \
not (base_class_entry.type.is_builtin_type and \
base_class_entry.type.objstruct_cname):
error(self.pos, "'%s' is not an extension type" % self.base_class_name)
elif not base_class_entry.type.is_complete():
error(self.pos, "Base class '%s' of type '%s' is incomplete" % (
self.base_class_name, self.class_name))
elif base_class_entry.type.scope and base_class_entry.type.scope.directives and \
base_class_entry.type.is_final_type:
error(self.pos, "Base class '%s' of type '%s' is final" % (
self.base_class_name, self.class_name))
elif base_class_entry.type.is_builtin_type and \
base_class_entry.type.name in ('tuple', 'str', 'bytes'):
error(self.pos, "inheritance from PyVarObject types like '%s' is not currently supported"
% base_class_entry.type.name)
else:
self.base_type = base_class_entry.type
has_body = self.body is not None
if self.module_name and self.visibility != 'extern':
module_path = self.module_name.split(".")
home_scope = env.find_imported_module(module_path, self.pos)
if not home_scope:
return
else:
home_scope = env
if self.visibility == 'extern':
if (self.module_name == '__builtin__' and
self.class_name in Builtin.builtin_types and
env.qualified_name[:8] != 'cpython.'): # allow overloaded names for cimporting from cpython
warning(self.pos, "%s already a builtin Cython type" % self.class_name, 1)
self.entry = home_scope.declare_c_class(
name = self.class_name,
pos = self.pos,
defining = has_body and self.in_pxd,
implementing = has_body and not self.in_pxd,
module_name = self.module_name,
base_type = self.base_type,
objstruct_cname = self.objstruct_name,
typeobj_cname = self.typeobj_name,
visibility = self.visibility,
typedef_flag = self.typedef_flag,
api = self.api,
buffer_defaults = self.buffer_defaults(env),
shadow = self.shadow)
if self.shadow:
home_scope.lookup(self.class_name).as_variable = self.entry
if home_scope is not env and self.visibility == 'extern':
env.add_imported_entry(self.class_name, self.entry, self.pos)
self.scope = scope = self.entry.type.scope
if scope is not None:
scope.directives = env.directives
if self.doc and Options.docstrings:
scope.doc = embed_position(self.pos, self.doc)
if has_body:
self.body.analyse_declarations(scope)
if self.in_pxd:
scope.defined = 1
else:
scope.implemented = 1
env.allocate_vtable_names(self.entry)
def analyse_expressions(self, env):
if self.body:
scope = self.entry.type.scope
self.body.analyse_expressions(scope)
def generate_function_definitions(self, env, code):
if self.body:
self.generate_lambda_definitions(self.scope, code)
self.body.generate_function_definitions(self.scope, code)
def generate_execution_code(self, code):
# This is needed to generate evaluation code for
# default values of method arguments.
if self.body:
self.body.generate_execution_code(code)
def annotate(self, code):
if self.body:
self.body.annotate(code)
class PropertyNode(StatNode):
# Definition of a property in an extension type.
#
# name string
# doc EncodedString or None Doc string
# body StatListNode
child_attrs = ["body"]
def analyse_declarations(self, env):
entry = env.declare_property(self.name, self.doc, self.pos)
if entry:
entry.scope.directives = env.directives
self.body.analyse_declarations(entry.scope)
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
pass
def annotate(self, code):
self.body.annotate(code)
class GlobalNode(StatNode):
# Global variable declaration.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_global(name, self.pos)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class NonlocalNode(StatNode):
# Nonlocal variable declaration via the 'nonlocal' keyword.
#
# names [string]
child_attrs = []
def analyse_declarations(self, env):
for name in self.names:
env.declare_nonlocal(name, self.pos)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class ExprStatNode(StatNode):
# Expression used as a statement.
#
# expr ExprNode
child_attrs = ["expr"]
def analyse_declarations(self, env):
import ExprNodes
if isinstance(self.expr, ExprNodes.GeneralCallNode):
func = self.expr.function.as_cython_attribute()
if func == u'declare':
args, kwds = self.expr.explicit_args_kwds()
if len(args):
error(self.expr.pos, "Variable names must be specified.")
for var, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
error(type_node.pos, "Unknown type")
else:
env.declare_var(var.value, type, var.pos, is_cdef = True)
self.__class__ = PassStatNode
def analyse_expressions(self, env):
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr.analyse_expressions(env)
def nogil_check(self, env):
if self.expr.type.is_pyobject and self.expr.is_temp:
self.gil_error()
gil_message = "Discarding owned Python object"
def generate_execution_code(self, code):
self.expr.generate_evaluation_code(code)
if not self.expr.is_temp and self.expr.result():
code.putln("%s;" % self.expr.result())
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class AssignmentNode(StatNode):
# Abstract base class for assignment nodes.
#
# The analyse_expressions and generate_execution_code
# phases of assignments are split into two sub-phases
# each, to enable all the right hand sides of a
# parallel assignment to be evaluated before assigning
# to any of the left hand sides.
def analyse_expressions(self, env):
self.analyse_types(env)
# def analyse_expressions(self, env):
# self.analyse_expressions_1(env)
# self.analyse_expressions_2(env)
def generate_execution_code(self, code):
self.generate_rhs_evaluation_code(code)
self.generate_assignment_code(code)
class SingleAssignmentNode(AssignmentNode):
# The simplest case:
#
# a = b
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# first bool Is this guaranteed the first assignment to lhs?
child_attrs = ["lhs", "rhs"]
first = False
declaration_only = False
def analyse_declarations(self, env):
import ExprNodes
# handle declarations of the form x = cython.foo()
if isinstance(self.rhs, ExprNodes.CallNode):
func_name = self.rhs.function.as_cython_attribute()
if func_name:
args, kwds = self.rhs.explicit_args_kwds()
if func_name in ['declare', 'typedef']:
if len(args) > 2 or kwds is not None:
error(self.rhs.pos, "Can only declare one type at a time.")
return
type = args[0].analyse_as_type(env)
if type is None:
error(args[0].pos, "Unknown type")
return
lhs = self.lhs
if func_name == 'declare':
if isinstance(lhs, ExprNodes.NameNode):
vars = [(lhs.name, lhs.pos)]
elif isinstance(lhs, ExprNodes.TupleNode):
vars = [(var.name, var.pos) for var in lhs.args]
else:
error(lhs.pos, "Invalid declaration")
return
for var, pos in vars:
env.declare_var(var, type, pos, is_cdef = True)
if len(args) == 2:
# we have a value
self.rhs = args[1]
else:
self.declaration_only = True
else:
self.declaration_only = True
if not isinstance(lhs, ExprNodes.NameNode):
error(lhs.pos, "Invalid declaration.")
env.declare_typedef(lhs.name, type, self.pos, visibility='private')
elif func_name in ['struct', 'union']:
self.declaration_only = True
if len(args) > 0 or kwds is None:
error(self.rhs.pos, "Struct or union members must be given by name.")
return
members = []
for member, type_node in kwds.key_value_pairs:
type = type_node.analyse_as_type(env)
if type is None:
error(type_node.pos, "Unknown type")
else:
members.append((member.value, type, member.pos))
if len(members) < len(kwds.key_value_pairs):
return
if not isinstance(self.lhs, ExprNodes.NameNode):
error(self.lhs.pos, "Invalid declaration.")
name = self.lhs.name
scope = StructOrUnionScope(name)
env.declare_struct_or_union(name, func_name, scope, False, self.rhs.pos)
for member, type, pos in members:
scope.declare_var(member, type, pos)
if self.declaration_only:
return
else:
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp = 0):
self.rhs.analyse_types(env)
self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
if use_temp:
self.rhs = self.rhs.coerce_to_temp(env)
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code):
self.lhs.generate_assignment_code(self.rhs, code)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
class CascadedAssignmentNode(AssignmentNode):
# An assignment with multiple left hand sides:
#
# a = b = c
#
# lhs_list [ExprNode] Left hand sides
# rhs ExprNode Right hand sides
#
# Used internally:
#
# coerced_rhs_list [ExprNode] RHS coerced to type of each LHS
child_attrs = ["lhs_list", "rhs", "coerced_rhs_list"]
coerced_rhs_list = None
def analyse_declarations(self, env):
for lhs in self.lhs_list:
lhs.analyse_target_declaration(env)
def analyse_types(self, env, use_temp = 0):
self.rhs.analyse_types(env)
if not self.rhs.is_simple():
if use_temp:
self.rhs = self.rhs.coerce_to_temp(env)
else:
self.rhs = self.rhs.coerce_to_simple(env)
from ExprNodes import CloneNode
self.coerced_rhs_list = []
for lhs in self.lhs_list:
lhs.analyse_target_types(env)
lhs.gil_assignment_check(env)
rhs = CloneNode(self.rhs)
rhs = rhs.coerce_to(lhs.type, env)
self.coerced_rhs_list.append(rhs)
def generate_rhs_evaluation_code(self, code):
self.rhs.generate_evaluation_code(code)
def generate_assignment_code(self, code):
for i in range(len(self.lhs_list)):
lhs = self.lhs_list[i]
rhs = self.coerced_rhs_list[i]
rhs.generate_evaluation_code(code)
lhs.generate_assignment_code(rhs, code)
# Assignment has disposed of the cloned RHS
self.rhs.generate_disposal_code(code)
self.rhs.free_temps(code)
def generate_function_definitions(self, env, code):
self.rhs.generate_function_definitions(env, code)
def annotate(self, code):
for i in range(len(self.lhs_list)):
lhs = self.lhs_list[i].annotate(code)
rhs = self.coerced_rhs_list[i].annotate(code)
self.rhs.annotate(code)
class ParallelAssignmentNode(AssignmentNode):
# A combined packing/unpacking assignment:
#
# a, b, c = d, e, f
#
# This has been rearranged by the parser into
#
# a = d ; b = e ; c = f
#
# but we must evaluate all the right hand sides
# before assigning to any of the left hand sides.
#
# stats [AssignmentNode] The constituent assignments
child_attrs = ["stats"]
def analyse_declarations(self, env):
for stat in self.stats:
stat.analyse_declarations(env)
def analyse_expressions(self, env):
for stat in self.stats:
stat.analyse_types(env, use_temp = 1)
# def analyse_expressions(self, env):
# for stat in self.stats:
# stat.analyse_expressions_1(env, use_temp = 1)
# for stat in self.stats:
# stat.analyse_expressions_2(env)
def generate_execution_code(self, code):
for stat in self.stats:
stat.generate_rhs_evaluation_code(code)
for stat in self.stats:
stat.generate_assignment_code(code)
def generate_function_definitions(self, env, code):
for stat in self.stats:
stat.generate_function_definitions(env, code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
class InPlaceAssignmentNode(AssignmentNode):
# An in place arithmetic operand:
#
# a += b
# a -= b
# ...
#
# lhs ExprNode Left hand side
# rhs ExprNode Right hand side
# operator char one of "+-*/%^&|"
#
# This code is a bit tricky because in order to obey Python
# semantics the sub-expressions (e.g. indices) of the lhs must
# not be evaluated twice. So we must re-use the values calculated
# in evaluation phase for the assignment phase as well.
# Fortunately, the type of the lhs node is fairly constrained
# (it must be a NameNode, AttributeNode, or IndexNode).
child_attrs = ["lhs", "rhs"]
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env):
self.rhs.analyse_types(env)
self.lhs.analyse_target_types(env)
def generate_execution_code(self, code):
import ExprNodes
self.rhs.generate_evaluation_code(code)
self.lhs.generate_subexpr_evaluation_code(code)
c_op = self.operator
if c_op == "//":
c_op = "/"
elif c_op == "**":
error(self.pos, "No C inplace power operator")
if isinstance(self.lhs, ExprNodes.IndexNode) and self.lhs.is_buffer_access:
if self.lhs.type.is_pyobject:
error(self.pos, "In-place operators not allowed on object buffers in this release.")
if c_op in ('/', '%') and self.lhs.type.is_int and not code.directives['cdivision']:
error(self.pos, "In-place non-c divide operators not allowed on int buffers.")
self.lhs.generate_buffer_setitem_code(self.rhs, code, c_op)
else:
# C++
# TODO: make sure overload is declared
code.putln("%s %s= %s;" % (self.lhs.result(), c_op, self.rhs.result()))
self.lhs.generate_subexpr_disposal_code(code)
self.lhs.free_subexpr_temps(code)
self.rhs.generate_disposal_code(code)
self.rhs.free_temps(code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
def create_binop_node(self):
import ExprNodes
return ExprNodes.binop_node(self.pos, self.operator, self.lhs, self.rhs)
class PrintStatNode(StatNode):
# print statement
#
# arg_tuple TupleNode
# stream ExprNode or None (stdout)
# append_newline boolean
child_attrs = ["arg_tuple", "stream"]
def analyse_expressions(self, env):
if self.stream:
self.stream.analyse_expressions(env)
self.stream = self.stream.coerce_to_pyobject(env)
self.arg_tuple.analyse_expressions(env)
self.arg_tuple = self.arg_tuple.coerce_to_pyobject(env)
env.use_utility_code(printing_utility_code)
if len(self.arg_tuple.args) == 1 and self.append_newline:
env.use_utility_code(printing_one_utility_code)
nogil_check = Node.gil_error
gil_message = "Python print statement"
def generate_execution_code(self, code):
if self.stream:
self.stream.generate_evaluation_code(code)
stream_result = self.stream.py_result()
else:
stream_result = '0'
if len(self.arg_tuple.args) == 1 and self.append_newline:
arg = self.arg_tuple.args[0]
arg.generate_evaluation_code(code)
code.putln(
"if (__Pyx_PrintOne(%s, %s) < 0) %s" % (
stream_result,
arg.py_result(),
code.error_goto(self.pos)))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
self.arg_tuple.generate_evaluation_code(code)
code.putln(
"if (__Pyx_Print(%s, %s, %d) < 0) %s" % (
stream_result,
self.arg_tuple.py_result(),
self.append_newline,
code.error_goto(self.pos)))
self.arg_tuple.generate_disposal_code(code)
self.arg_tuple.free_temps(code)
if self.stream:
self.stream.generate_disposal_code(code)
self.stream.free_temps(code)
def generate_function_definitions(self, env, code):
if self.stream:
self.stream.generate_function_definitions(env, code)
self.arg_tuple.generate_function_definitions(env, code)
def annotate(self, code):
if self.stream:
self.stream.annotate(code)
self.arg_tuple.annotate(code)
class ExecStatNode(StatNode):
# exec statement
#
# args [ExprNode]
child_attrs = ["args"]
def analyse_expressions(self, env):
for i, arg in enumerate(self.args):
arg.analyse_expressions(env)
arg = arg.coerce_to_pyobject(env)
self.args[i] = arg
env.use_utility_code(Builtin.pyexec_utility_code)
nogil_check = Node.gil_error
gil_message = "Python exec statement"
def generate_execution_code(self, code):
args = []
for arg in self.args:
arg.generate_evaluation_code(code)
args.append( arg.py_result() )
args = tuple(args + ['0', '0'][:3-len(args)])
temp_result = code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=True)
code.putln("%s = __Pyx_PyRun(%s, %s, %s);" % (
(temp_result,) + args))
for arg in self.args:
arg.generate_disposal_code(code)
arg.free_temps(code)
code.putln(
code.error_goto_if_null(temp_result, self.pos))
code.put_gotref(temp_result)
code.put_decref_clear(temp_result, py_object_type)
code.funcstate.release_temp(temp_result)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
class DelStatNode(StatNode):
# del statement
#
# args [ExprNode]
child_attrs = ["args"]
def analyse_declarations(self, env):
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_expressions(self, env):
for arg in self.args:
arg.analyse_target_expression(env, None)
if arg.type.is_pyobject or (arg.is_name and
arg.type.is_memoryviewslice):
pass
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
self.cpp_check(env)
elif arg.type.is_cpp_class:
error(arg.pos, "Deletion of non-heap C++ object")
else:
error(arg.pos, "Deletion of non-Python, non-C++ object")
#arg.release_target_temp(env)
def nogil_check(self, env):
for arg in self.args:
if arg.type.is_pyobject:
self.gil_error()
gil_message = "Deleting Python object"
def generate_execution_code(self, code):
for arg in self.args:
if arg.type.is_pyobject or arg.type.is_memoryviewslice:
arg.generate_deletion_code(code)
elif arg.type.is_ptr and arg.type.base_type.is_cpp_class:
arg.generate_result_code(code)
code.putln("delete %s;" % arg.result())
# else error reported earlier
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
class PassStatNode(StatNode):
# pass statement
child_attrs = []
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class BreakStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
if not code.break_label:
error(self.pos, "break statement not inside loop")
else:
code.put_goto(code.break_label)
class ContinueStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
if code.funcstate.in_try_finally:
error(self.pos, "continue statement inside try of try...finally")
elif not code.continue_label:
error(self.pos, "continue statement not inside loop")
else:
code.put_goto(code.continue_label)
class ReturnStatNode(StatNode):
# return statement
#
# value ExprNode or None
# return_type PyrexType
child_attrs = ["value"]
is_terminator = True
# Whether we are in a parallel section
in_parallel = False
def analyse_expressions(self, env):
return_type = env.return_type
self.return_type = return_type
if not return_type:
error(self.pos, "Return not inside a function body")
return
if self.value:
self.value.analyse_types(env)
if return_type.is_void or return_type.is_returncode:
error(self.value.pos,
"Return with value in void function")
else:
self.value = self.value.coerce_to(env.return_type, env)
else:
if (not return_type.is_void
and not return_type.is_pyobject
and not return_type.is_returncode):
error(self.pos, "Return value required")
def nogil_check(self, env):
if self.return_type.is_pyobject:
self.gil_error()
gil_message = "Returning Python object"
def generate_execution_code(self, code):
code.mark_pos(self.pos)
if not self.return_type:
# error reported earlier
return
if self.return_type.is_pyobject:
code.put_xdecref(Naming.retval_cname,
self.return_type)
if self.value:
self.value.generate_evaluation_code(code)
if self.return_type.is_memoryviewslice:
import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=Naming.retval_cname,
lhs_type=self.return_type,
lhs_pos=self.value.pos,
rhs=self.value,
code=code,
incref_rhs=self.value.is_name,
have_gil=self.in_nogil_context)
else:
self.value.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.value.result_as(self.return_type)))
self.value.generate_post_assignment_code(code)
self.value.free_temps(code)
else:
if self.return_type.is_pyobject:
code.put_init_to_py_none(Naming.retval_cname, self.return_type)
elif self.return_type.is_returncode:
self.put_return(code, self.return_type.default_value)
for cname, type in code.funcstate.temps_holding_reference():
code.put_decref_clear(cname, type)
code.put_goto(code.return_label)
def put_return(self, code, value):
if self.in_parallel:
code.putln_openmp("#pragma omp critical(__pyx_returning)")
code.putln("%s = %s;" % (Naming.retval_cname, value))
def generate_function_definitions(self, env, code):
if self.value is not None:
self.value.generate_function_definitions(env, code)
def annotate(self, code):
if self.value:
self.value.annotate(code)
class RaiseStatNode(StatNode):
# raise statement
#
# exc_type ExprNode or None
# exc_value ExprNode or None
# exc_tb ExprNode or None
# cause ExprNode or None
child_attrs = ["exc_type", "exc_value", "exc_tb", "cause"]
is_terminator = True
def analyse_expressions(self, env):
if self.exc_type:
self.exc_type.analyse_types(env)
self.exc_type = self.exc_type.coerce_to_pyobject(env)
if self.exc_value:
self.exc_value.analyse_types(env)
self.exc_value = self.exc_value.coerce_to_pyobject(env)
if self.exc_tb:
self.exc_tb.analyse_types(env)
self.exc_tb = self.exc_tb.coerce_to_pyobject(env)
if self.cause:
self.cause.analyse_types(env)
self.cause = self.cause.coerce_to_pyobject(env)
# special cases for builtin exceptions
self.builtin_exc_name = None
if self.exc_type and not self.exc_value and not self.exc_tb:
exc = self.exc_type
import ExprNodes
if (isinstance(exc, ExprNodes.SimpleCallNode) and
not (exc.args or (exc.arg_tuple is not None and
exc.arg_tuple.args))):
exc = exc.function # extract the exception type
if exc.is_name and exc.entry.is_builtin:
self.builtin_exc_name = exc.name
if self.builtin_exc_name == 'MemoryError':
self.exc_type = None # has a separate implementation
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
if self.builtin_exc_name == 'MemoryError':
code.putln('PyErr_NoMemory(); %s' % code.error_goto(self.pos))
return
if self.exc_type:
self.exc_type.generate_evaluation_code(code)
type_code = self.exc_type.py_result()
else:
type_code = "0"
if self.exc_value:
self.exc_value.generate_evaluation_code(code)
value_code = self.exc_value.py_result()
else:
value_code = "0"
if self.exc_tb:
self.exc_tb.generate_evaluation_code(code)
tb_code = self.exc_tb.py_result()
else:
tb_code = "0"
if self.cause:
self.cause.generate_evaluation_code(code)
cause_code = self.cause.py_result()
else:
cause_code = "0"
code.globalstate.use_utility_code(raise_utility_code)
code.putln(
"__Pyx_Raise(%s, %s, %s, %s);" % (
type_code,
value_code,
tb_code,
cause_code))
for obj in (self.exc_type, self.exc_value, self.exc_tb, self.cause):
if obj:
obj.generate_disposal_code(code)
obj.free_temps(code)
code.putln(
code.error_goto(self.pos))
def generate_function_definitions(self, env, code):
if self.exc_type is not None:
self.exc_type.generate_function_definitions(env, code)
if self.exc_value is not None:
self.exc_value.generate_function_definitions(env, code)
if self.exc_tb is not None:
self.exc_tb.generate_function_definitions(env, code)
if self.cause is not None:
self.cause.generate_function_definitions(env, code)
def annotate(self, code):
if self.exc_type:
self.exc_type.annotate(code)
if self.exc_value:
self.exc_value.annotate(code)
if self.exc_tb:
self.exc_tb.annotate(code)
if self.cause:
self.cause.annotate(code)
class ReraiseStatNode(StatNode):
child_attrs = []
is_terminator = True
def analyse_expressions(self, env):
env.use_utility_code(restore_exception_utility_code)
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
vars = code.funcstate.exc_vars
if vars:
for varname in vars:
code.put_giveref(varname)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % tuple(vars))
for varname in vars:
code.put("%s = 0; " % varname)
code.putln()
code.putln(code.error_goto(self.pos))
else:
error(self.pos, "Reraise not inside except clause")
class AssertStatNode(StatNode):
# assert statement
#
# cond ExprNode
# value ExprNode or None
child_attrs = ["cond", "value"]
def analyse_expressions(self, env):
self.cond = self.cond.analyse_boolean_expression(env)
if self.value:
self.value.analyse_types(env)
self.value = self.value.coerce_to_pyobject(env)
nogil_check = Node.gil_error
gil_message = "Raising exception"
def generate_execution_code(self, code):
code.putln("#ifndef CYTHON_WITHOUT_ASSERTIONS")
self.cond.generate_evaluation_code(code)
code.putln(
"if (unlikely(!%s)) {" %
self.cond.result())
if self.value:
self.value.generate_evaluation_code(code)
code.putln(
"PyErr_SetObject(PyExc_AssertionError, %s);" %
self.value.py_result())
self.value.generate_disposal_code(code)
self.value.free_temps(code)
else:
code.putln(
"PyErr_SetNone(PyExc_AssertionError);")
code.putln(
code.error_goto(self.pos))
code.putln(
"}")
self.cond.generate_disposal_code(code)
self.cond.free_temps(code)
code.putln("#endif")
def generate_function_definitions(self, env, code):
self.cond.generate_function_definitions(env, code)
if self.value is not None:
self.value.generate_function_definitions(env, code)
def annotate(self, code):
self.cond.annotate(code)
if self.value:
self.value.annotate(code)
class IfStatNode(StatNode):
# if statement
#
# if_clauses [IfClauseNode]
# else_clause StatNode or None
child_attrs = ["if_clauses", "else_clause"]
def analyse_declarations(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
for if_clause in self.if_clauses:
if_clause.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
def generate_execution_code(self, code):
code.mark_pos(self.pos)
end_label = code.new_label()
for if_clause in self.if_clauses:
if_clause.generate_execution_code(code, end_label)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(end_label)
def generate_function_definitions(self, env, code):
for clause in self.if_clauses:
clause.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
for if_clause in self.if_clauses:
if_clause.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class IfClauseNode(Node):
# if or elif clause in an if statement
#
# condition ExprNode
# body StatNode
child_attrs = ["condition", "body"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.condition = \
self.condition.analyse_temp_boolean_expression(env)
self.body.analyse_expressions(env)
def get_constant_condition_result(self):
if self.condition.has_constant_result():
return bool(self.condition.constant_result)
else:
return None
def generate_execution_code(self, code, end_label):
self.condition.generate_evaluation_code(code)
code.putln(
"if (%s) {" %
self.condition.result())
self.condition.generate_disposal_code(code)
self.condition.free_temps(code)
self.body.generate_execution_code(code)
code.put_goto(end_label)
code.putln("}")
def generate_function_definitions(self, env, code):
self.condition.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
self.condition.annotate(code)
self.body.annotate(code)
class SwitchCaseNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# conditions [ExprNode]
# body StatNode
child_attrs = ['conditions', 'body']
def generate_execution_code(self, code):
for cond in self.conditions:
code.mark_pos(cond.pos)
cond.generate_evaluation_code(code)
code.putln("case %s:" % cond.result())
self.body.generate_execution_code(code)
code.putln("break;")
def generate_function_definitions(self, env, code):
for cond in self.conditions:
cond.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
for cond in self.conditions:
cond.annotate(code)
self.body.annotate(code)
class SwitchStatNode(StatNode):
# Generated in the optimization of an if-elif-else node
#
# test ExprNode
# cases [SwitchCaseNode]
# else_clause StatNode or None
child_attrs = ['test', 'cases', 'else_clause']
def generate_execution_code(self, code):
self.test.generate_evaluation_code(code)
code.putln("switch (%s) {" % self.test.result())
for case in self.cases:
case.generate_execution_code(code)
if self.else_clause is not None:
code.putln("default:")
self.else_clause.generate_execution_code(code)
code.putln("break;")
code.putln("}")
def generate_function_definitions(self, env, code):
self.test.generate_function_definitions(env, code)
for case in self.cases:
case.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.test.annotate(code)
for case in self.cases:
case.annotate(code)
if self.else_clause is not None:
self.else_clause.annotate(code)
class LoopNode(object):
pass
class WhileStatNode(LoopNode, StatNode):
# while statement
#
# condition ExprNode
# body StatNode
# else_clause StatNode
child_attrs = ["condition", "body", "else_clause"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
if self.condition:
self.condition = self.condition.analyse_temp_boolean_expression(env)
self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
code.putln(
"while (1) {")
if self.condition:
self.condition.generate_evaluation_code(code)
self.condition.generate_disposal_code(code)
code.putln(
"if (!%s) break;" %
self.condition.result())
self.condition.free_temps(code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
def generate_function_definitions(self, env, code):
if self.condition:
self.condition.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
if self.condition:
self.condition.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class DictIterationNextNode(Node):
# Helper node for calling PyDict_Next() inside of a WhileStatNode
# and checking the dictionary size for changes. Created in
# Optimize.py.
child_attrs = ['dict_obj', 'expected_size', 'pos_index_addr', 'key_addr', 'value_addr']
def __init__(self, dict_obj, expected_size, pos_index_addr, key_addr, value_addr):
Node.__init__(
self, dict_obj.pos,
dict_obj = dict_obj,
expected_size = expected_size,
pos_index_addr = pos_index_addr,
key_addr = key_addr,
value_addr = value_addr,
type = PyrexTypes.c_bint_type)
def analyse_expressions(self, env):
self.dict_obj.analyse_types(env)
self.expected_size.analyse_types(env)
self.pos_index_addr.analyse_types(env)
self.key_addr.analyse_types(env)
self.value_addr.analyse_types(env)
def generate_function_definitions(self, env, code):
self.dict_obj.generate_function_definitions(env, code)
def generate_execution_code(self, code):
self.dict_obj.generate_evaluation_code(code)
code.putln("if (unlikely(%s != PyDict_Size(%s))) {" % (
self.expected_size.result(),
self.dict_obj.py_result(),
))
code.putln('PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration"); %s' % (
code.error_goto(self.pos)))
code.putln("}")
self.pos_index_addr.generate_evaluation_code(code)
code.putln("if (!PyDict_Next(%s, %s, %s, %s)) break;" % (
self.dict_obj.py_result(),
self.pos_index_addr.result(),
self.key_addr.result(),
self.value_addr.result()))
def ForStatNode(pos, **kw):
if 'iterator' in kw:
return ForInStatNode(pos, **kw)
else:
return ForFromStatNode(pos, **kw)
class ForInStatNode(LoopNode, StatNode):
# for statement
#
# target ExprNode
# iterator IteratorNode
# body StatNode
# else_clause StatNode
# item NextNode used internally
child_attrs = ["target", "iterator", "body", "else_clause"]
item = None
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
import ExprNodes
self.target.analyse_target_types(env)
self.iterator.analyse_expressions(env)
self.item = ExprNodes.NextNode(self.iterator)
if (self.iterator.type.is_ptr or self.iterator.type.is_array) and \
self.target.type.assignable_from(self.iterator.type):
# C array slice optimization.
pass
else:
self.item = self.item.coerce_to(self.target.type, env)
self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
self.iterator.generate_evaluation_code(code)
code.putln("for (;;) {")
self.item.generate_evaluation_code(code)
self.target.generate_assignment_code(self.item, code)
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
code.putln("}")
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
# in nested loops, the 'else' block can contain a
# 'continue' statement for the outer loop, but we may need
# to generate cleanup code before taking that path, so we
# intercept it here
orig_continue_label = code.continue_label
code.continue_label = code.new_label('outer_continue')
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
if code.label_used(code.continue_label):
code.put_goto(break_label)
code.put_label(code.continue_label)
self.iterator.generate_disposal_code(code)
code.put_goto(orig_continue_label)
code.set_loop_labels(old_loop_labels)
if code.label_used(break_label):
code.put_label(break_label)
self.iterator.generate_disposal_code(code)
self.iterator.free_temps(code)
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.iterator.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.iterator.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
self.item.annotate(code)
class ForFromStatNode(LoopNode, StatNode):
# for name from expr rel name rel expr
#
# target NameNode
# bound1 ExprNode
# relation1 string
# relation2 string
# bound2 ExprNode
# step ExprNode or None
# body StatNode
# else_clause StatNode or None
#
# Used internally:
#
# from_range bool
# is_py_target bool
# loopvar_node ExprNode (usually a NameNode or temp node)
# py_loopvar_node PyTempNode or None
child_attrs = ["target", "bound1", "bound2", "step", "body", "else_clause"]
is_py_target = False
loopvar_node = None
py_loopvar_node = None
from_range = False
gil_message = "For-loop using object bounds or target"
def nogil_check(self, env):
for x in (self.target, self.bound1, self.bound2):
if x.type.is_pyobject:
self.gil_error()
def analyse_declarations(self, env):
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
def analyse_expressions(self, env):
import ExprNodes
self.target.analyse_target_types(env)
self.bound1.analyse_types(env)
self.bound2.analyse_types(env)
if self.step is not None:
if isinstance(self.step, ExprNodes.UnaryMinusNode):
warning(self.step.pos, "Probable infinite loop in for-from-by statment. Consider switching the directions of the relations.", 2)
self.step.analyse_types(env)
target_type = self.target.type
if self.target.type.is_numeric:
loop_type = self.target.type
else:
loop_type = PyrexTypes.c_int_type
if not self.bound1.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound1.type)
if not self.bound2.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.bound2.type)
if self.step is not None and not self.step.type.is_pyobject:
loop_type = PyrexTypes.widest_numeric_type(loop_type, self.step.type)
self.bound1 = self.bound1.coerce_to(loop_type, env)
self.bound2 = self.bound2.coerce_to(loop_type, env)
if not self.bound2.is_literal:
self.bound2 = self.bound2.coerce_to_temp(env)
if self.step is not None:
self.step = self.step.coerce_to(loop_type, env)
if not self.step.is_literal:
self.step = self.step.coerce_to_temp(env)
target_type = self.target.type
if not (target_type.is_pyobject or target_type.is_numeric):
error(self.target.pos,
"for-from loop variable must be c numeric type or Python object")
if target_type.is_numeric:
self.is_py_target = False
if isinstance(self.target, ExprNodes.IndexNode) and self.target.is_buffer_access:
raise error(self.pos, "Buffer indexing not allowed as for loop target.")
self.loopvar_node = self.target
self.py_loopvar_node = None
else:
self.is_py_target = True
c_loopvar_node = ExprNodes.TempNode(self.pos, loop_type, env)
self.loopvar_node = c_loopvar_node
self.py_loopvar_node = \
ExprNodes.CloneNode(c_loopvar_node).coerce_to_pyobject(env)
self.body.analyse_expressions(env)
if self.else_clause:
self.else_clause.analyse_expressions(env)
def generate_execution_code(self, code):
old_loop_labels = code.new_loop_labels()
from_range = self.from_range
self.bound1.generate_evaluation_code(code)
self.bound2.generate_evaluation_code(code)
offset, incop = self.relation_table[self.relation1]
if self.step is not None:
self.step.generate_evaluation_code(code)
step = self.step.result()
incop = "%s=%s" % (incop[0], step)
import ExprNodes
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.allocate(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.allocate(code)
if from_range:
loopvar_name = code.funcstate.allocate_temp(self.target.type, False)
else:
loopvar_name = self.loopvar_node.result()
code.putln(
"for (%s = %s%s; %s %s %s; %s%s) {" % (
loopvar_name,
self.bound1.result(), offset,
loopvar_name, self.relation2, self.bound2.result(),
loopvar_name, incop))
if self.py_loopvar_node:
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
elif from_range:
code.putln("%s = %s;" % (
self.target.result(), loopvar_name))
self.body.generate_execution_code(code)
code.put_label(code.continue_label)
if self.py_loopvar_node:
# This mess is to make for..from loops with python targets behave
# exactly like those with C targets with regards to re-assignment
# of the loop variable.
import ExprNodes
if self.target.entry.is_pyglobal:
# We know target is a NameNode, this is the only ugly case.
target_node = ExprNodes.PyTempNode(self.target.pos, None)
target_node.allocate(code)
interned_cname = code.intern_identifier(self.target.entry.name)
code.globalstate.use_utility_code(ExprNodes.get_name_interned_utility_code)
code.putln("%s = __Pyx_GetName(%s, %s); %s" % (
target_node.result(),
Naming.module_cname,
interned_cname,
code.error_goto_if_null(target_node.result(), self.target.pos)))
code.put_gotref(target_node.result())
else:
target_node = self.target
from_py_node = ExprNodes.CoerceFromPyTypeNode(self.loopvar_node.type, target_node, None)
from_py_node.temp_code = loopvar_name
from_py_node.generate_result_code(code)
if self.target.entry.is_pyglobal:
code.put_decref(target_node.result(), target_node.type)
target_node.release(code)
code.putln("}")
if self.py_loopvar_node:
# This is potentially wasteful, but we don't want the semantics to
# depend on whether or not the loop is a python type.
self.py_loopvar_node.generate_evaluation_code(code)
self.target.generate_assignment_code(self.py_loopvar_node, code)
if from_range:
code.funcstate.release_temp(loopvar_name)
break_label = code.break_label
code.set_loop_labels(old_loop_labels)
if self.else_clause:
code.putln("/*else*/ {")
self.else_clause.generate_execution_code(code)
code.putln("}")
code.put_label(break_label)
self.bound1.generate_disposal_code(code)
self.bound1.free_temps(code)
self.bound2.generate_disposal_code(code)
self.bound2.free_temps(code)
if isinstance(self.loopvar_node, ExprNodes.TempNode):
self.loopvar_node.release(code)
if isinstance(self.py_loopvar_node, ExprNodes.TempNode):
self.py_loopvar_node.release(code)
if self.step is not None:
self.step.generate_disposal_code(code)
self.step.free_temps(code)
relation_table = {
# {relop : (initial offset, increment op)}
'<=': ("", "++"),
'<' : ("+1", "++"),
'>=': ("", "--"),
'>' : ("-1", "--")
}
def generate_function_definitions(self, env, code):
self.target.generate_function_definitions(env, code)
self.bound1.generate_function_definitions(env, code)
self.bound2.generate_function_definitions(env, code)
if self.step is not None:
self.step.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.target.annotate(code)
self.bound1.annotate(code)
self.bound2.annotate(code)
if self.step:
self.step.annotate(code)
self.body.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class WithStatNode(StatNode):
"""
Represents a Python with statement.
Implemented by the WithTransform as follows:
MGR = EXPR
EXIT = MGR.__exit__
VALUE = MGR.__enter__()
EXC = True
try:
try:
TARGET = VALUE # optional
BODY
except:
EXC = False
if not EXIT(*EXCINFO):
raise
finally:
if EXC:
EXIT(None, None, None)
MGR = EXIT = VALUE = None
"""
# manager The with statement manager object
# target ExprNode the target lhs of the __enter__() call
# body StatNode
# enter_call ExprNode the call to the __enter__() method
child_attrs = ["manager", "target", "body", "enter_call"]
enter_call = None
has_target = False
def analyse_declarations(self, env):
self.manager.analyse_declarations(env)
self.enter_call.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.manager.analyse_types(env)
self.enter_call.analyse_types(env)
self.body.analyse_expressions(env)
def generate_function_definitions(self, env, code):
self.manager.generate_function_definitions(env, code)
self.enter_call.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def generate_execution_code(self, code):
code.putln("/*with:*/ {")
self.manager.generate_evaluation_code(code)
self.exit_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.putln("%s = PyObject_GetAttr(%s, %s); %s" % (
self.exit_var,
self.manager.py_result(),
code.get_py_string_const(EncodedString('__exit__'), identifier=True),
code.error_goto_if_null(self.exit_var, self.pos),
))
code.put_gotref(self.exit_var)
# need to free exit_var in the face of exceptions during setup
old_error_label = code.new_error_label()
intermediate_error_label = code.error_label
self.enter_call.generate_evaluation_code(code)
if not self.target:
self.enter_call.generate_disposal_code(code)
self.enter_call.free_temps(code)
self.manager.generate_disposal_code(code)
self.manager.free_temps(code)
code.error_label = old_error_label
self.body.generate_execution_code(code)
if code.label_used(intermediate_error_label):
step_over_label = code.new_label()
code.put_goto(step_over_label)
code.put_label(intermediate_error_label)
code.put_decref_clear(self.exit_var, py_object_type)
code.put_goto(old_error_label)
code.put_label(step_over_label)
code.funcstate.release_temp(self.exit_var)
code.putln('}')
class WithTargetAssignmentStatNode(AssignmentNode):
# The target assignment of the 'with' statement value (return
# value of the __enter__() call).
#
# This is a special cased assignment that steals the RHS reference
# and frees its temp.
#
# lhs ExprNode the assignment target
# orig_rhs ExprNode the return value of the __enter__() call (not owned by this node!)
# rhs ResultRefNode a ResultRefNode for the orig_rhs (owned by this node)
child_attrs = ["lhs", "rhs"]
def analyse_declarations(self, env):
self.lhs.analyse_target_declaration(env)
def analyse_types(self, env):
self.rhs.analyse_types(env)
self.lhs.analyse_target_types(env)
self.lhs.gil_assignment_check(env)
self.rhs = self.rhs.coerce_to(self.lhs.type, env)
def generate_execution_code(self, code):
self.rhs.generate_evaluation_code(code)
self.lhs.generate_assignment_code(self.rhs, code)
self.orig_rhs.generate_disposal_code(code)
self.orig_rhs.free_temps(code)
def annotate(self, code):
self.lhs.annotate(code)
self.rhs.annotate(code)
class TryExceptStatNode(StatNode):
# try .. except statement
#
# body StatNode
# except_clauses [ExceptClauseNode]
# else_clause StatNode or None
child_attrs = ["body", "except_clauses", "else_clause"]
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
for except_clause in self.except_clauses:
except_clause.analyse_declarations(env)
if self.else_clause:
self.else_clause.analyse_declarations(env)
env.use_utility_code(reset_exception_utility_code)
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
default_clause_seen = 0
for except_clause in self.except_clauses:
except_clause.analyse_expressions(env)
if default_clause_seen:
error(except_clause.pos, "default 'except:' must be last")
if not except_clause.pattern:
default_clause_seen = 1
self.has_default_clause = default_clause_seen
if self.else_clause:
self.else_clause.analyse_expressions(env)
nogil_check = Node.gil_error
gil_message = "Try-except statement"
def generate_execution_code(self, code):
old_return_label = code.return_label
old_break_label = code.break_label
old_continue_label = code.continue_label
old_error_label = code.new_error_label()
our_error_label = code.error_label
except_end_label = code.new_label('exception_handled')
except_error_label = code.new_label('except_error')
except_return_label = code.new_label('except_return')
try_return_label = code.new_label('try_return')
try_break_label = code.new_label('try_break')
try_continue_label = code.new_label('try_continue')
try_end_label = code.new_label('try_end')
exc_save_vars = [code.funcstate.allocate_temp(py_object_type, False)
for i in xrange(3)]
code.putln("{")
code.putln("__Pyx_ExceptionSave(%s);" %
', '.join(['&%s' % var for var in exc_save_vars]))
for var in exc_save_vars:
code.put_xgotref(var)
code.putln(
"/*try:*/ {")
code.return_label = try_return_label
code.break_label = try_break_label
code.continue_label = try_continue_label
self.body.generate_execution_code(code)
code.putln(
"}")
temps_to_clean_up = code.funcstate.all_free_managed_temps()
code.error_label = except_error_label
code.return_label = except_return_label
if self.else_clause:
code.putln(
"/*else:*/ {")
self.else_clause.generate_execution_code(code)
code.putln(
"}")
for var in exc_save_vars:
code.put_xdecref_clear(var, py_object_type)
code.put_goto(try_end_label)
if code.label_used(try_return_label):
code.put_label(try_return_label)
for var in exc_save_vars:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s);" %
', '.join(exc_save_vars))
code.put_goto(old_return_label)
code.put_label(our_error_label)
for temp_name, type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, type)
for except_clause in self.except_clauses:
except_clause.generate_handling_code(code, except_end_label)
error_label_used = code.label_used(except_error_label)
if error_label_used or not self.has_default_clause:
if error_label_used:
code.put_label(except_error_label)
for var in exc_save_vars:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s);" %
', '.join(exc_save_vars))
code.put_goto(old_error_label)
for exit_label, old_label in zip(
[try_break_label, try_continue_label, except_return_label],
[old_break_label, old_continue_label, old_return_label]):
if code.label_used(exit_label):
code.put_label(exit_label)
for var in exc_save_vars:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s);" %
', '.join(exc_save_vars))
code.put_goto(old_label)
if code.label_used(except_end_label):
code.put_label(except_end_label)
for var in exc_save_vars:
code.put_xgiveref(var)
code.putln("__Pyx_ExceptionReset(%s);" %
', '.join(exc_save_vars))
code.put_label(try_end_label)
code.putln("}")
for cname in exc_save_vars:
code.funcstate.release_temp(cname)
code.return_label = old_return_label
code.break_label = old_break_label
code.continue_label = old_continue_label
code.error_label = old_error_label
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
for except_clause in self.except_clauses:
except_clause.generate_function_definitions(env, code)
if self.else_clause is not None:
self.else_clause.generate_function_definitions(env, code)
def annotate(self, code):
self.body.annotate(code)
for except_node in self.except_clauses:
except_node.annotate(code)
if self.else_clause:
self.else_clause.annotate(code)
class ExceptClauseNode(Node):
# Part of try ... except statement.
#
# pattern [ExprNode]
# target ExprNode or None
# body StatNode
# excinfo_target ResultRefNode or None optional target for exception info
# match_flag string result of exception match
# exc_value ExcValueNode used internally
# function_name string qualified name of enclosing function
# exc_vars (string * 3) local exception variables
# excinfo_target is never set by the parser, but can be set by a transform
# in order to extract more extensive information about the exception as a
# sys.exc_info()-style tuple into a target variable
child_attrs = ["pattern", "target", "body", "exc_value", "excinfo_target"]
exc_value = None
excinfo_target = None
def analyse_declarations(self, env):
if self.target:
self.target.analyse_target_declaration(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
import ExprNodes
genv = env.global_scope()
self.function_name = env.qualified_name
if self.pattern:
# normalise/unpack self.pattern into a list
for i, pattern in enumerate(self.pattern):
pattern.analyse_expressions(env)
self.pattern[i] = pattern.coerce_to_pyobject(env)
if self.target:
self.exc_value = ExprNodes.ExcValueNode(self.pos, env)
self.target.analyse_target_expression(env, self.exc_value)
if self.excinfo_target is not None:
import ExprNodes
self.excinfo_tuple = ExprNodes.TupleNode(pos=self.pos, args=[
ExprNodes.ExcValueNode(pos=self.pos, env=env) for x in range(3)])
self.excinfo_tuple.analyse_expressions(env)
self.body.analyse_expressions(env)
def generate_handling_code(self, code, end_label):
code.mark_pos(self.pos)
if self.pattern:
exc_tests = []
for pattern in self.pattern:
pattern.generate_evaluation_code(code)
exc_tests.append("PyErr_ExceptionMatches(%s)" % pattern.py_result())
match_flag = code.funcstate.allocate_temp(PyrexTypes.c_int_type, False)
code.putln(
"%s = %s;" % (match_flag, ' || '.join(exc_tests)))
for pattern in self.pattern:
pattern.generate_disposal_code(code)
pattern.free_temps(code)
code.putln(
"if (%s) {" %
match_flag)
code.funcstate.release_temp(match_flag)
else:
code.putln("/*except:*/ {")
if not getattr(self.body, 'stats', True) and \
self.excinfo_target is None and self.target is None:
# most simple case: no exception variable, empty body (pass)
# => reset the exception state, done
code.putln("PyErr_Restore(0,0,0);")
code.put_goto(end_label)
code.putln("}")
return
exc_vars = [code.funcstate.allocate_temp(py_object_type,
manage_ref=True)
for i in xrange(3)]
code.put_add_traceback(self.function_name)
# We always have to fetch the exception value even if
# there is no target, because this also normalises the
# exception and stores it in the thread state.
code.globalstate.use_utility_code(get_exception_utility_code)
exc_args = "&%s, &%s, &%s" % tuple(exc_vars)
code.putln("if (__Pyx_GetException(%s) < 0) %s" % (exc_args,
code.error_goto(self.pos)))
for x in exc_vars:
code.put_gotref(x)
if self.target:
self.exc_value.set_var(exc_vars[1])
self.exc_value.generate_evaluation_code(code)
self.target.generate_assignment_code(self.exc_value, code)
if self.excinfo_target is not None:
for tempvar, node in zip(exc_vars, self.excinfo_tuple.args):
node.set_var(tempvar)
self.excinfo_tuple.generate_evaluation_code(code)
self.excinfo_target.result_code = self.excinfo_tuple.result()
old_break_label, old_continue_label = code.break_label, code.continue_label
code.break_label = code.new_label('except_break')
code.continue_label = code.new_label('except_continue')
old_exc_vars = code.funcstate.exc_vars
code.funcstate.exc_vars = exc_vars
self.body.generate_execution_code(code)
code.funcstate.exc_vars = old_exc_vars
if self.excinfo_target is not None:
self.excinfo_tuple.generate_disposal_code(code)
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(end_label)
if code.label_used(code.break_label):
code.put_label(code.break_label)
if self.excinfo_target is not None:
self.excinfo_tuple.generate_disposal_code(code)
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(old_break_label)
code.break_label = old_break_label
if code.label_used(code.continue_label):
code.put_label(code.continue_label)
if self.excinfo_target is not None:
self.excinfo_tuple.generate_disposal_code(code)
for var in exc_vars:
code.put_decref_clear(var, py_object_type)
code.put_goto(old_continue_label)
code.continue_label = old_continue_label
if self.excinfo_target is not None:
self.excinfo_tuple.free_temps(code)
for temp in exc_vars:
code.funcstate.release_temp(temp)
code.putln(
"}")
def generate_function_definitions(self, env, code):
if self.target is not None:
self.target.generate_function_definitions(env, code)
self.body.generate_function_definitions(env, code)
def annotate(self, code):
if self.pattern:
for pattern in self.pattern:
pattern.annotate(code)
if self.target:
self.target.annotate(code)
self.body.annotate(code)
class TryFinallyStatNode(StatNode):
# try ... finally statement
#
# body StatNode
# finally_clause StatNode
#
# The plan is that we funnel all continue, break
# return and error gotos into the beginning of the
# finally block, setting a variable to remember which
# one we're doing. At the end of the finally block, we
# switch on the variable to figure out where to go.
# In addition, if we're doing an error, we save the
# exception on entry to the finally block and restore
# it on exit.
child_attrs = ["body", "finally_clause"]
preserve_exception = 1
# handle exception case, in addition to return/break/continue
handle_error_case = True
disallow_continue_in_try_finally = 0
# There doesn't seem to be any point in disallowing
# continue in the try block, since we have no problem
# handling it.
is_try_finally_in_nogil = False
def create_analysed(pos, env, body, finally_clause):
node = TryFinallyStatNode(pos, body=body, finally_clause=finally_clause)
return node
create_analysed = staticmethod(create_analysed)
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.finally_clause.analyse_declarations(env)
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
self.finally_clause.analyse_expressions(env)
nogil_check = Node.gil_error
gil_message = "Try-finally statement"
def generate_execution_code(self, code):
old_error_label = code.error_label
old_labels = code.all_new_labels()
new_labels = code.get_all_labels()
new_error_label = code.error_label
if not self.handle_error_case:
code.error_label = old_error_label
catch_label = code.new_label()
code.putln("/*try:*/ {")
if self.disallow_continue_in_try_finally:
was_in_try_finally = code.funcstate.in_try_finally
code.funcstate.in_try_finally = 1
self.body.generate_execution_code(code)
if self.disallow_continue_in_try_finally:
code.funcstate.in_try_finally = was_in_try_finally
code.putln("}")
temps_to_clean_up = code.funcstate.all_free_managed_temps()
code.mark_pos(self.finally_clause.pos)
code.putln("/*finally:*/ {")
cases_used = []
error_label_used = 0
for i, new_label in enumerate(new_labels):
if new_label in code.labels_used:
cases_used.append(i)
if new_label == new_error_label:
error_label_used = 1
error_label_case = i
if cases_used:
code.putln("int __pyx_why;")
if error_label_used and self.preserve_exception:
if self.is_try_finally_in_nogil:
code.declare_gilstate()
code.putln("PyObject *%s, *%s, *%s;" % Naming.exc_vars)
code.putln("int %s;" % Naming.exc_lineno_name)
exc_var_init_zero = ''.join(
["%s = 0; " % var for var in Naming.exc_vars])
exc_var_init_zero += '%s = 0;' % Naming.exc_lineno_name
code.putln(exc_var_init_zero)
else:
exc_var_init_zero = None
code.use_label(catch_label)
code.putln("__pyx_why = 0; goto %s;" % catch_label)
for i in cases_used:
new_label = new_labels[i]
#if new_label and new_label != "<try>":
if new_label == new_error_label and self.preserve_exception:
self.put_error_catcher(code,
new_error_label, i+1, catch_label, temps_to_clean_up)
else:
code.put('%s: ' % new_label)
if exc_var_init_zero:
code.putln(exc_var_init_zero)
code.putln("__pyx_why = %s; goto %s;" % (i+1, catch_label))
code.put_label(catch_label)
code.set_all_labels(old_labels)
if error_label_used:
code.new_error_label()
finally_error_label = code.error_label
self.finally_clause.generate_execution_code(code)
if error_label_used:
if finally_error_label in code.labels_used and self.preserve_exception:
over_label = code.new_label()
code.put_goto(over_label)
code.put_label(finally_error_label)
code.putln("if (__pyx_why == %d) {" % (error_label_case + 1))
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
for var in Naming.exc_vars:
code.putln("Py_XDECREF(%s);" % var)
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.putln("}")
code.put_goto(old_error_label)
code.put_label(over_label)
code.error_label = old_error_label
if cases_used:
code.putln(
"switch (__pyx_why) {")
for i in cases_used:
old_label = old_labels[i]
if old_label == old_error_label and self.preserve_exception:
self.put_error_uncatcher(code, i+1, old_error_label)
else:
code.use_label(old_label)
code.putln("case %s: goto %s;" % (i+1, old_label))
# End the switch
code.putln(
"}")
# End finally
code.putln(
"}")
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
self.finally_clause.generate_function_definitions(env, code)
def put_error_catcher(self, code, error_label, i, catch_label,
temps_to_clean_up):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln("%s: {" % error_label)
code.putln("__pyx_why = %s;" % i)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
for temp_name, type in temps_to_clean_up:
code.put_xdecref_clear(temp_name, type)
code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % Naming.exc_vars)
code.putln("%s = %s;" % (Naming.exc_lineno_name, Naming.lineno_cname))
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
code.put_goto(catch_label)
code.putln("}")
def put_error_uncatcher(self, code, i, error_label):
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln(
"case %s: {" % i)
if self.is_try_finally_in_nogil:
code.put_ensure_gil(declare_gilstate=False)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % Naming.exc_vars)
code.putln("%s = %s;" % (Naming.lineno_cname, Naming.exc_lineno_name))
if self.is_try_finally_in_nogil:
code.put_release_ensured_gil()
for var in Naming.exc_vars:
code.putln(
"%s = 0;" % var)
code.put_goto(error_label)
code.putln(
"}")
def annotate(self, code):
self.body.annotate(code)
self.finally_clause.annotate(code)
class NogilTryFinallyStatNode(TryFinallyStatNode):
"""
A try/finally statement that may be used in nogil code sections.
"""
preserve_exception = False
nogil_check = None
class GILStatNode(NogilTryFinallyStatNode):
# 'with gil' or 'with nogil' statement
#
# state string 'gil' or 'nogil'
def __init__(self, pos, state, body):
self.state = state
TryFinallyStatNode.__init__(self, pos,
body = body,
finally_clause = GILExitNode(pos, state = state))
def analyse_declarations(self, env):
env._in_with_gil_block = (self.state == 'gil')
if self.state == 'gil':
env.has_with_gil_block = True
return super(GILStatNode, self).analyse_declarations(env)
def analyse_expressions(self, env):
env.use_utility_code(force_init_threads_utility_code)
was_nogil = env.nogil
env.nogil = self.state == 'nogil'
TryFinallyStatNode.analyse_expressions(self, env)
env.nogil = was_nogil
def generate_execution_code(self, code):
code.mark_pos(self.pos)
code.begin_block()
if self.state == 'gil':
code.put_ensure_gil()
else:
code.put_release_gil()
TryFinallyStatNode.generate_execution_code(self, code)
code.end_block()
class GILExitNode(StatNode):
"""
Used as the 'finally' block in a GILStatNode
state string 'gil' or 'nogil'
"""
child_attrs = []
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
if self.state == 'gil':
code.put_release_ensured_gil()
else:
code.put_acquire_gil()
class EnsureGILNode(GILExitNode):
"""
Ensure the GIL in nogil functions for cleanup before returning.
"""
def generate_execution_code(self, code):
code.put_ensure_gil(declare_gilstate=False)
class CImportStatNode(StatNode):
# cimport statement
#
# module_name string Qualified name of module being imported
# as_name string or None Name specified in "as" clause, if any
child_attrs = []
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
module_scope = env.find_module(self.module_name, self.pos)
if "." in self.module_name:
names = [EncodedString(name) for name in self.module_name.split(".")]
top_name = names[0]
top_module_scope = env.context.find_submodule(top_name)
module_scope = top_module_scope
for name in names[1:]:
submodule_scope = module_scope.find_submodule(name)
module_scope.declare_module(name, submodule_scope, self.pos)
module_scope = submodule_scope
if self.as_name:
env.declare_module(self.as_name, module_scope, self.pos)
else:
env.add_imported_module(module_scope)
env.declare_module(top_name, top_module_scope, self.pos)
else:
name = self.as_name or self.module_name
env.declare_module(name, module_scope, self.pos)
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class FromCImportStatNode(StatNode):
# from ... cimport statement
#
# module_name string Qualified name of module
# imported_names [(pos, name, as_name, kind)] Names to be imported
child_attrs = []
def analyse_declarations(self, env):
if not env.is_module_scope:
error(self.pos, "cimport only allowed at module level")
return
module_scope = env.find_module(self.module_name, self.pos)
env.add_imported_module(module_scope)
for pos, name, as_name, kind in self.imported_names:
if name == "*":
for local_name, entry in module_scope.entries.items():
env.add_imported_entry(local_name, entry, pos)
else:
entry = module_scope.lookup(name)
if entry:
if kind and not self.declaration_matches(entry, kind):
entry.redeclared(pos)
entry.used = 1
else:
if kind == 'struct' or kind == 'union':
entry = module_scope.declare_struct_or_union(name,
kind = kind, scope = None, typedef_flag = 0, pos = pos)
elif kind == 'class':
entry = module_scope.declare_c_class(name, pos = pos,
module_name = self.module_name)
else:
submodule_scope = env.context.find_module(name, relative_to = module_scope, pos = self.pos)
if submodule_scope.parent_module is module_scope:
env.declare_module(as_name or name, submodule_scope, self.pos)
else:
error(pos, "Name '%s' not declared in module '%s'"
% (name, self.module_name))
if entry:
local_name = as_name or name
env.add_imported_entry(local_name, entry, pos)
def declaration_matches(self, entry, kind):
if not entry.is_type:
return 0
type = entry.type
if kind == 'class':
if not type.is_extension_type:
return 0
else:
if not type.is_struct_or_union:
return 0
if kind != type.kind:
return 0
return 1
def analyse_expressions(self, env):
pass
def generate_execution_code(self, code):
pass
class FromImportStatNode(StatNode):
# from ... import statement
#
# module ImportNode
# items [(string, NameNode)]
# interned_items [(string, NameNode, ExprNode)]
# item PyTempNode used internally
# import_star boolean used internally
child_attrs = ["module"]
import_star = 0
def analyse_declarations(self, env):
for name, target in self.items:
if name == "*":
if not env.is_module_scope:
error(self.pos, "import * only allowed at module level")
return
env.has_import_star = 1
self.import_star = 1
else:
target.analyse_target_declaration(env)
def analyse_expressions(self, env):
import ExprNodes
self.module.analyse_expressions(env)
self.item = ExprNodes.RawCNameExprNode(self.pos, py_object_type)
self.interned_items = []
for name, target in self.items:
if name == '*':
for _, entry in env.entries.items():
if not entry.is_type and entry.type.is_extension_type:
env.use_utility_code(ExprNodes.type_test_utility_code)
break
else:
entry = env.lookup(target.name)
# check whether or not entry is already cimported
if (entry.is_type and entry.type.name == name
and hasattr(entry.type, 'module_name')):
if entry.type.module_name == self.module.module_name.value:
# cimported with absolute name
continue
try:
# cimported with relative name
module = env.find_module(self.module.module_name.value,
pos=None)
if entry.type.module_name == module.qualified_name:
continue
except AttributeError:
pass
target.analyse_target_expression(env, None)
if target.type is py_object_type:
coerced_item = None
else:
coerced_item = self.item.coerce_to(target.type, env)
self.interned_items.append((name, target, coerced_item))
if self.interned_items:
env.use_utility_code(raise_import_error_utility_code)
def generate_execution_code(self, code):
self.module.generate_evaluation_code(code)
if self.import_star:
code.putln(
'if (%s(%s) < 0) %s;' % (
Naming.import_star,
self.module.py_result(),
code.error_goto(self.pos)))
item_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.item.set_cname(item_temp)
for name, target, coerced_item in self.interned_items:
cname = code.intern_identifier(name)
code.putln(
'%s = PyObject_GetAttr(%s, %s);' % (
item_temp,
self.module.py_result(),
cname))
code.putln('if (%s == NULL) {' % item_temp)
code.putln(
'if (PyErr_ExceptionMatches(PyExc_AttributeError)) '
'__Pyx_RaiseImportError(%s);' % cname)
code.putln(code.error_goto_if_null(item_temp, self.pos))
code.putln('}')
code.put_gotref(item_temp)
if coerced_item is None:
target.generate_assignment_code(self.item, code)
else:
coerced_item.allocate_temp_result(code)
coerced_item.generate_result_code(code)
target.generate_assignment_code(coerced_item, code)
code.put_decref_clear(item_temp, py_object_type)
code.funcstate.release_temp(item_temp)
self.module.generate_disposal_code(code)
self.module.free_temps(code)
class ParallelNode(Node):
"""
Base class for cython.parallel constructs.
"""
nogil_check = None
class ParallelStatNode(StatNode, ParallelNode):
"""
Base class for 'with cython.parallel.parallel():' and 'for i in prange():'.
assignments { Entry(var) : (var.pos, inplace_operator_or_None) }
assignments to variables in this parallel section
parent parent ParallelStatNode or None
is_parallel indicates whether this node is OpenMP parallel
(true for #pragma omp parallel for and
#pragma omp parallel)
is_parallel is true for:
#pragma omp parallel
#pragma omp parallel for
sections, but NOT for
#pragma omp for
We need this to determine the sharing attributes.
privatization_insertion_point a code insertion point used to make temps
private (esp. the "nsteps" temp)
args tuple the arguments passed to the parallel construct
kwargs DictNode the keyword arguments passed to the parallel
construct (replaced by its compile time value)
"""
child_attrs = ['body', 'num_threads']
body = None
is_prange = False
error_label_used = False
num_threads = None
parallel_exc = (
Naming.parallel_exc_type,
Naming.parallel_exc_value,
Naming.parallel_exc_tb,
)
parallel_pos_info = (
Naming.parallel_filename,
Naming.parallel_lineno,
Naming.parallel_clineno,
)
pos_info = (
Naming.filename_cname,
Naming.lineno_cname,
Naming.clineno_cname,
)
critical_section_counter = 0
def __init__(self, pos, **kwargs):
super(ParallelStatNode, self).__init__(pos, **kwargs)
# All assignments in this scope
self.assignments = kwargs.get('assignments') or {}
# All seen closure cnames and their temporary cnames
self.seen_closure_vars = set()
# Dict of variables that should be declared (first|last|)private or
# reduction { Entry: (op, lastprivate) }.
# If op is not None, it's a reduction.
self.privates = {}
# [NameNode]
self.assigned_nodes = []
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
self.num_threads = None
if self.kwargs:
for idx, dictitem in enumerate(self.kwargs.key_value_pairs[:]):
if dictitem.key.value == 'num_threads':
self.num_threads = dictitem.value
del self.kwargs.key_value_pairs[idx]
break
try:
self.kwargs = self.kwargs.compile_time_value(env)
except Exception, e:
error(self.kwargs.pos, "Only compile-time values may be "
"supplied as keyword arguments")
else:
self.kwargs = {}
for kw, val in self.kwargs.iteritems():
if kw not in self.valid_keyword_arguments:
error(self.pos, "Invalid keyword argument: %s" % kw)
else:
setattr(self, kw, val)
def analyse_expressions(self, env):
if self.num_threads:
self.num_threads.analyse_expressions(env)
self.body.analyse_expressions(env)
self.analyse_sharing_attributes(env)
if self.num_threads is not None:
if self.parent and self.parent.num_threads is not None:
error(self.pos,
"num_threads already declared in outer section")
elif self.parent:
error(self.pos,
"num_threads must be declared in the parent parallel section")
elif (self.num_threads.type.is_int and
self.num_threads.is_literal and
self.num_threads.compile_time_value(env) <= 0):
error(self.pos,
"argument to num_threads must be greater than 0")
self.num_threads = self.num_threads.coerce_to(
PyrexTypes.c_int_type, env).coerce_to_temp(env)
def analyse_sharing_attributes(self, env):
"""
Analyse the privates for this block and set them in self.privates.
This should be called in a post-order fashion during the
analyse_expressions phase
"""
for entry, (pos, op) in self.assignments.iteritems():
if self.is_prange and not self.is_parallel:
# closely nested prange in a with parallel block, disallow
# assigning to privates in the with parallel block (we
# consider it too implicit and magicky for users)
if entry in self.parent.assignments:
error(pos,
"Cannot assign to private of outer parallel block")
continue
if not self.is_prange and op:
# Again possible, but considered to magicky
error(pos, "Reductions not allowed for parallel blocks")
continue
# By default all variables should have the same values as if
# executed sequentially
lastprivate = True
self.propagate_var_privatization(entry, pos, op, lastprivate)
def propagate_var_privatization(self, entry, pos, op, lastprivate):
"""
Propagate the sharing attributes of a variable. If the privatization is
determined by a parent scope, done propagate further.
If we are a prange, we propagate our sharing attributes outwards to
other pranges. If we are a prange in parallel block and the parallel
block does not determine the variable private, we propagate to the
parent of the parent. Recursion stops at parallel blocks, as they have
no concept of lastprivate or reduction.
So the following cases propagate:
sum is a reduction for all loops:
for i in prange(n):
for j in prange(n):
for k in prange(n):
sum += i * j * k
sum is a reduction for both loops, local_var is private to the
parallel with block:
for i in prange(n):
with parallel:
local_var = ... # private to the parallel
for j in prange(n):
sum += i * j
Nested with parallel blocks are disallowed, because they wouldn't
allow you to propagate lastprivates or reductions:
#pragma omp parallel for lastprivate(i)
for i in prange(n):
sum = 0
#pragma omp parallel private(j, sum)
with parallel:
#pragma omp parallel
with parallel:
#pragma omp for lastprivate(j) reduction(+:sum)
for j in prange(n):
sum += i
# sum and j are well-defined here
# sum and j are undefined here
# sum and j are undefined here
"""
self.privates[entry] = (op, lastprivate)
if entry.type.is_memoryviewslice:
error(pos, "Memoryview slices can only be shared in parallel sections")
return
if self.is_prange:
if not self.is_parallel and entry not in self.parent.assignments:
# Parent is a parallel with block
parent = self.parent.parent
else:
parent = self.parent
# We don't need to propagate privates, only reductions and
# lastprivates
if parent and (op or lastprivate):
parent.propagate_var_privatization(entry, pos, op, lastprivate)
def _allocate_closure_temp(self, code, entry):
"""
Helper function that allocate a temporary for a closure variable that
is assigned to.
"""
if self.parent:
return self.parent._allocate_closure_temp(code, entry)
if entry.cname in self.seen_closure_vars:
return entry.cname
cname = code.funcstate.allocate_temp(entry.type, True)
# Add both the actual cname and the temp cname, as the actual cname
# will be replaced with the temp cname on the entry
self.seen_closure_vars.add(entry.cname)
self.seen_closure_vars.add(cname)
self.modified_entries.append((entry, entry.cname))
code.putln("%s = %s;" % (cname, entry.cname))
entry.cname = cname
def initialize_privates_to_nan(self, code, exclude=None):
first = True
for entry, (op, lastprivate) in self.privates.iteritems():
if not op and (not exclude or entry != exclude):
invalid_value = entry.type.invalid_value()
if invalid_value:
if first:
code.putln("/* Initialize private variables to "
"invalid values */")
code.globalstate.use_utility_code(
invalid_values_utility_code)
first = False
have_invalid_values = True
code.putln("%s = %s;" % (entry.cname,
entry.type.cast_code(invalid_value)))
def put_num_threads(self, code):
"""
Write self.num_threads if set as the num_threads OpenMP directive
"""
if self.num_threads is not None:
c = self.begin_of_parallel_control_block_point
# we need to set the owner to ourselves temporarily, as
# allocate_temp may generate a comment in the middle of our pragma
# otherwise when DebugFlags.debug_temp_code_comments is in effect
owner = c.funcstate.owner
c.funcstate.owner = c
self.num_threads.generate_evaluation_code(c)
c.funcstate.owner = owner
code.put(" num_threads(%s)" % (self.num_threads.result(),))
def declare_closure_privates(self, code):
"""
If a variable is in a scope object, we need to allocate a temp and
assign the value from the temp to the variable in the scope object
after the parallel section. This kind of copying should be done only
in the outermost parallel section.
"""
self.modified_entries = []
for entry, (pos, op) in self.assignments.iteritems():
if entry.from_closure or entry.in_closure:
self._allocate_closure_temp(code, entry)
def release_closure_privates(self, code):
"""
Release any temps used for variables in scope objects. As this is the
outermost parallel block, we don't need to delete the cnames from
self.seen_closure_vars.
"""
for entry, original_cname in self.modified_entries:
code.putln("%s = %s;" % (original_cname, entry.cname))
code.funcstate.release_temp(entry.cname)
entry.cname = original_cname
def privatize_temps(self, code, exclude_temps=()):
"""
Make any used temporaries private. Before the relevant code block
code.start_collecting_temps() should have been called.
"""
if self.is_parallel:
c = self.privatization_insertion_point
temps = code.funcstate.stop_collecting_temps()
privates, firstprivates = [], []
for temp, type in temps:
if type.is_pyobject:
firstprivates.append(temp)
else:
privates.append(temp)
if privates:
c.put(" private(%s)" % ", ".join(privates))
if firstprivates:
c.put(" firstprivate(%s)" % ", ".join(firstprivates))
if self.breaking_label_used:
shared_vars = [Naming.parallel_why]
if self.error_label_used:
shared_vars.extend(self.parallel_exc)
c.put(" private(%s, %s, %s)" % self.pos_info)
c.put(" shared(%s)" % ', '.join(shared_vars))
def setup_parallel_control_flow_block(self, code):
"""
Sets up a block that surrounds the parallel block to determine
how the parallel section was exited. Any kind of return is
trapped (break, continue, return, exceptions). This is the idea:
{
int why = 0;
#pragma omp parallel
{
return # -> goto new_return_label;
goto end_parallel;
new_return_label:
why = 3;
goto end_parallel;
end_parallel:;
#pragma omp flush(why) # we need to flush for every iteration
}
if (why == 3)
goto old_return_label;
}
"""
self.old_loop_labels = code.new_loop_labels()
self.old_error_label = code.new_error_label()
self.old_return_label = code.return_label
code.return_label = code.new_label(name="return")
code.begin_block() # parallel control flow block
self.begin_of_parallel_control_block_point = code.insertion_point()
def begin_parallel_block(self, code):
"""
Each OpenMP thread in a parallel section that contains a with gil block
must have the thread-state initialized. The call to
PyGILState_Release() then deallocates our threadstate. If we wouldn't
do this, each with gil block would allocate and deallocate one, thereby
losing exception information before it can be saved before leaving the
parallel section.
"""
self.begin_of_parallel_block = code.insertion_point()
def end_parallel_block(self, code):
"Acquire the GIL, deallocate threadstate, release"
if self.error_label_used:
begin_code = self.begin_of_parallel_block
end_code = code
begin_code.put_ensure_gil(declare_gilstate=True)
begin_code.putln("Py_BEGIN_ALLOW_THREADS")
end_code.putln("Py_END_ALLOW_THREADS")
end_code.put_release_ensured_gil()
def trap_parallel_exit(self, code, should_flush=False):
"""
Trap any kind of return inside a parallel construct. 'should_flush'
indicates whether the variable should be flushed, which is needed by
prange to skip the loop. It also indicates whether we need to register
a continue (we need this for parallel blocks, but not for prange
loops, as it is a direct jump there).
It uses the same mechanism as try/finally:
1 continue
2 break
3 return
4 error
"""
save_lastprivates_label = code.new_label()
dont_return_label = code.new_label()
insertion_point = code.insertion_point()
self.any_label_used = False
self.breaking_label_used = False
self.error_label_used = False
self.parallel_private_temps = []
all_labels = code.get_all_labels()
# Figure this out before starting to generate any code
for label in all_labels:
if code.label_used(label):
self.breaking_label_used = (self.breaking_label_used or
label != code.continue_label)
self.any_label_used = True
if self.any_label_used:
code.put_goto(dont_return_label)
for i, label in enumerate(all_labels):
if not code.label_used(label):
continue
is_continue_label = label == code.continue_label
code.put_label(label)
if not (should_flush and is_continue_label):
if label == code.error_label:
self.error_label_used = True
self.fetch_parallel_exception(code)
code.putln("%s = %d;" % (Naming.parallel_why, i + 1))
if (self.breaking_label_used and self.is_prange and not
is_continue_label):
code.put_goto(save_lastprivates_label)
else:
code.put_goto(dont_return_label)
if self.any_label_used:
if self.is_prange and self.breaking_label_used:
# Don't rely on lastprivate, save our lastprivates
code.put_label(save_lastprivates_label)
self.save_parallel_vars(code)
code.put_label(dont_return_label)
if should_flush and self.breaking_label_used:
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_why)
def save_parallel_vars(self, code):
"""
The following shenanigans are instated when we break, return or
propagate errors from a prange. In this case we cannot rely on
lastprivate() to do its job, as no iterations may have executed yet
in the last thread, leaving the values undefined. It is most likely
that the breaking thread has well-defined values of the lastprivate
variables, so we keep those values.
"""
section_name = ("__pyx_parallel_lastprivates%d" %
self.critical_section_counter)
code.putln_openmp("#pragma omp critical(%s)" % section_name)
ParallelStatNode.critical_section_counter += 1
code.begin_block() # begin critical section
c = self.begin_of_parallel_control_block_point
temp_count = 0
for entry, (op, lastprivate) in self.privates.iteritems():
if not lastprivate or entry.type.is_pyobject:
continue
type_decl = entry.type.declaration_code("")
temp_cname = "__pyx_parallel_temp%d" % temp_count
private_cname = entry.cname
temp_count += 1
# Declare the parallel private in the outer block
c.putln("%s %s;" % (type_decl, temp_cname))
# Initialize before escaping
code.putln("%s = %s;" % (temp_cname, private_cname))
self.parallel_private_temps.append((temp_cname, private_cname))
code.end_block() # end critical section
def fetch_parallel_exception(self, code):
"""
As each OpenMP thread may raise an exception, we need to fetch that
exception from the threadstate and save it for after the parallel
section where it can be re-raised in the master thread.
Although it would seem that __pyx_filename, __pyx_lineno and
__pyx_clineno are only assigned to under exception conditions (i.e.,
when we have the GIL), and thus should be allowed to be shared without
any race condition, they are in fact subject to the same race
conditions that they were previously when they were global variables
and functions were allowed to release the GIL:
thread A thread B
acquire
set lineno
release
acquire
set lineno
release
acquire
fetch exception
release
skip the fetch
deallocate threadstate deallocate threadstate
"""
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.putln_openmp("#pragma omp flush(%s)" % Naming.parallel_exc_type)
code.putln(
"if (!%s) {" % Naming.parallel_exc_type)
code.putln("__Pyx_ErrFetch(&%s, &%s, &%s);" % self.parallel_exc)
pos_info = chain(*zip(self.parallel_pos_info, self.pos_info))
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.putln('__Pyx_GOTREF(%s);' % Naming.parallel_exc_type)
code.putln(
"}")
code.put_release_ensured_gil()
code.end_block()
def restore_parallel_exception(self, code):
"Re-raise a parallel exception"
code.begin_block()
code.put_ensure_gil(declare_gilstate=True)
code.putln("__Pyx_ErrRestore(%s, %s, %s);" % self.parallel_exc)
pos_info = chain(*zip(self.pos_info, self.parallel_pos_info))
code.putln("%s = %s; %s = %s; %s = %s;" % tuple(pos_info))
code.putln("__Pyx_GIVEREF(%s);" % Naming.parallel_exc_type)
code.put_release_ensured_gil()
code.end_block()
def restore_labels(self, code):
"""
Restore all old labels. Call this before the 'else' clause to for
loops and always before ending the parallel control flow block.
"""
code.set_all_labels(self.old_loop_labels + (self.old_return_label,
self.old_error_label))
def end_parallel_control_flow_block(self, code,
break_=False, continue_=False):
"""
This ends the parallel control flow block and based on how the parallel
section was exited, takes the corresponding action. The break_ and
continue_ parameters indicate whether these should be propagated
outwards:
for i in prange(...):
with cython.parallel.parallel():
continue
Here break should be trapped in the parallel block, and propagated to
the for loop.
"""
c = self.begin_of_parallel_control_block_point
# Firstly, always prefer errors over returning, continue or break
if self.error_label_used:
c.putln("const char *%s; int %s, %s;" % self.parallel_pos_info)
c.putln("%s = NULL; %s = %s = 0;" % self.parallel_pos_info)
c.putln("PyObject *%s = NULL, *%s = NULL, *%s = NULL;" %
self.parallel_exc)
code.putln(
"if (%s) {" % Naming.parallel_exc_type)
code.putln("/* This may have been overridden by a continue, "
"break or return in another thread. Prefer the error. */")
code.putln("%s = 4;" % Naming.parallel_why)
code.putln(
"}")
if continue_:
any_label_used = self.any_label_used
else:
any_label_used = self.breaking_label_used
if any_label_used:
# __pyx_parallel_why is used, declare and initialize
c.putln("int %s;" % Naming.parallel_why)
c.putln("%s = 0;" % Naming.parallel_why)
code.putln(
"if (%s) {" % Naming.parallel_why)
for temp_cname, private_cname in self.parallel_private_temps:
code.putln("%s = %s;" % (private_cname, temp_cname))
code.putln("switch (%s) {" % Naming.parallel_why)
if continue_:
code.put(" case 1: ")
code.put_goto(code.continue_label)
if break_:
code.put(" case 2: ")
code.put_goto(code.break_label)
code.put(" case 3: ")
code.put_goto(code.return_label)
if self.error_label_used:
code.globalstate.use_utility_code(restore_exception_utility_code)
code.putln(" case 4:")
self.restore_parallel_exception(code)
code.put_goto(code.error_label)
code.putln("}") # end switch
code.putln(
"}") # end if
code.end_block() # end parallel control flow block
class ParallelWithBlockNode(ParallelStatNode):
"""
This node represents a 'with cython.parallel.parallel():' block
"""
valid_keyword_arguments = ['num_threads']
num_threads = None
def analyse_declarations(self, env):
super(ParallelWithBlockNode, self).analyse_declarations(env)
if self.args:
error(self.pos, "cython.parallel.parallel() does not take "
"positional arguments")
def generate_execution_code(self, code):
self.declare_closure_privates(code)
self.setup_parallel_control_flow_block(code)
code.putln("#ifdef _OPENMP")
code.put("#pragma omp parallel ")
if self.privates:
privates = [e.cname for e in self.privates
if not e.type.is_pyobject]
code.put('private(%s)' % ', '.join(privates))
self.privatization_insertion_point = code.insertion_point()
self.put_num_threads(code)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # parallel block
self.begin_parallel_block(code)
self.initialize_privates_to_nan(code)
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code)
self.privatize_temps(code)
self.end_parallel_block(code)
code.end_block() # end parallel block
continue_ = code.label_used(code.continue_label)
break_ = code.label_used(code.break_label)
self.restore_labels(code)
self.end_parallel_control_flow_block(code, break_=break_,
continue_=continue_)
self.release_closure_privates(code)
class ParallelRangeNode(ParallelStatNode):
"""
This node represents a 'for i in cython.parallel.prange():' construct.
target NameNode the target iteration variable
else_clause Node or None the else clause of this loop
"""
child_attrs = ['body', 'target', 'else_clause', 'args']
body = target = else_clause = args = None
start = stop = step = None
is_prange = True
nogil = None
schedule = None
num_threads = None
valid_keyword_arguments = ['schedule', 'nogil', 'num_threads']
def __init__(self, pos, **kwds):
super(ParallelRangeNode, self).__init__(pos, **kwds)
# Pretend to be a ForInStatNode for control flow analysis
self.iterator = PassStatNode(pos)
def analyse_declarations(self, env):
super(ParallelRangeNode, self).analyse_declarations(env)
self.target.analyse_target_declaration(env)
if self.else_clause is not None:
self.else_clause.analyse_declarations(env)
if not self.args or len(self.args) > 3:
error(self.pos, "Invalid number of positional arguments to prange")
return
if len(self.args) == 1:
self.stop, = self.args
elif len(self.args) == 2:
self.start, self.stop = self.args
else:
self.start, self.stop, self.step = self.args
if hasattr(self.schedule, 'decode'):
self.schedule = self.schedule.decode('ascii')
if self.schedule not in (None, 'static', 'dynamic', 'guided',
'runtime'):
error(self.pos, "Invalid schedule argument to prange: %s" %
(self.schedule,))
def analyse_expressions(self, env):
if self.nogil:
was_nogil = env.nogil
env.nogil = True
if self.target is None:
error(self.pos, "prange() can only be used as part of a for loop")
return
self.target.analyse_target_types(env)
if not self.target.type.is_numeric:
# Not a valid type, assume one for now anyway
if not self.target.type.is_pyobject:
# nogil_check will catch the is_pyobject case
error(self.target.pos,
"Must be of numeric type, not %s" % self.target.type)
self.index_type = PyrexTypes.c_py_ssize_t_type
else:
self.index_type = self.target.type
# Setup start, stop and step, allocating temps if needed
self.names = 'start', 'stop', 'step'
start_stop_step = self.start, self.stop, self.step
for node, name in zip(start_stop_step, self.names):
if node is not None:
node.analyse_types(env)
if not node.type.is_numeric:
error(node.pos, "%s argument must be numeric" % name)
continue
if not node.is_literal:
node = node.coerce_to_temp(env)
setattr(self, name, node)
# As we range from 0 to nsteps, computing the index along the
# way, we need a fitting type for 'i' and 'nsteps'
self.index_type = PyrexTypes.widest_numeric_type(
self.index_type, node.type)
if self.else_clause is not None:
self.else_clause.analyse_expressions(env)
# Although not actually an assignment in this scope, it should be
# treated as such to ensure it is unpacked if a closure temp, and to
# ensure lastprivate behaviour and propagation. If the target index is
# not a NameNode, it won't have an entry, and an error was issued by
# ParallelRangeTransform
if hasattr(self.target, 'entry'):
self.assignments[self.target.entry] = self.target.pos, None
super(ParallelRangeNode, self).analyse_expressions(env)
if self.nogil:
env.nogil = was_nogil
def nogil_check(self, env):
names = 'start', 'stop', 'step', 'target'
nodes = self.start, self.stop, self.step, self.target
for name, node in zip(names, nodes):
if node is not None and node.type.is_pyobject:
error(node.pos, "%s may not be a Python object "
"as we don't have the GIL" % name)
def generate_execution_code(self, code):
"""
Generate code in the following steps
1) copy any closure variables determined thread-private
into temporaries
2) allocate temps for start, stop and step
3) generate a loop that calculates the total number of steps,
which then computes the target iteration variable for every step:
for i in prange(start, stop, step):
...
becomes
nsteps = (stop - start) / step;
i = start;
#pragma omp parallel for lastprivate(i)
for (temp = 0; temp < nsteps; temp++) {
i = start + step * temp;
...
}
Note that accumulation of 'i' would have a data dependency
between iterations.
Also, you can't do this
for (i = start; i < stop; i += step)
...
as the '<' operator should become '>' for descending loops.
'for i from x < i < y:' does not suffer from this problem
as the relational operator is known at compile time!
4) release our temps and write back any private closure variables
"""
self.declare_closure_privates(code)
# This can only be a NameNode
target_index_cname = self.target.entry.cname
# This will be used as the dict to format our code strings, holding
# the start, stop , step, temps and target cnames
fmt_dict = {
'target': target_index_cname,
}
# Setup start, stop and step, allocating temps if needed
start_stop_step = self.start, self.stop, self.step
defaults = '0', '0', '1'
for node, name, default in zip(start_stop_step, self.names, defaults):
if node is None:
result = default
elif node.is_literal:
result = node.get_constant_c_result_code()
else:
node.generate_evaluation_code(code)
result = node.result()
fmt_dict[name] = result
fmt_dict['i'] = code.funcstate.allocate_temp(self.index_type, False)
fmt_dict['nsteps'] = code.funcstate.allocate_temp(self.index_type, False)
# TODO: check if the step is 0 and if so, raise an exception in a
# 'with gil' block. For now, just abort
code.putln("if (%(step)s == 0) abort();" % fmt_dict)
self.setup_parallel_control_flow_block(code) # parallel control flow block
self.control_flow_var_code_point = code.insertion_point()
# Note: nsteps is private in an outer scope if present
code.putln("%(nsteps)s = (%(stop)s - %(start)s) / %(step)s;" % fmt_dict)
# The target iteration variable might not be initialized, do it only if
# we are executing at least 1 iteration, otherwise we should leave the
# target unaffected. The target iteration variable is firstprivate to
# shut up compiler warnings caused by lastprivate, as the compiler
# erroneously believes that nsteps may be <= 0, leaving the private
# target index uninitialized
code.putln("if (%(nsteps)s > 0)" % fmt_dict)
code.begin_block() # if block
code.putln("%(target)s = 0;" % fmt_dict)
self.generate_loop(code, fmt_dict)
code.end_block() # end if block
self.restore_labels(code)
if self.else_clause:
if self.breaking_label_used:
code.put("if (%s < 2)" % Naming.parallel_why)
code.begin_block() # else block
code.putln("/* else */")
self.else_clause.generate_execution_code(code)
code.end_block() # end else block
# ------ cleanup ------
self.end_parallel_control_flow_block(code) # end parallel control flow block
# And finally, release our privates and write back any closure
# variables
for temp in start_stop_step:
if temp is not None:
temp.generate_disposal_code(code)
temp.free_temps(code)
code.funcstate.release_temp(fmt_dict['i'])
code.funcstate.release_temp(fmt_dict['nsteps'])
self.release_closure_privates(code)
def generate_loop(self, code, fmt_dict):
code.putln("#ifdef _OPENMP")
if not self.is_parallel:
code.put("#pragma omp for")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.parent.privatization_insertion_point
else:
code.put("#pragma omp parallel")
self.privatization_insertion_point = code.insertion_point()
reduction_codepoint = self.privatization_insertion_point
code.putln("")
code.putln("#endif /* _OPENMP */")
code.begin_block() # pragma omp parallel begin block
# Initialize the GIL if needed for this thread
self.begin_parallel_block(code)
code.putln("#ifdef _OPENMP")
code.put("#pragma omp for")
for entry, (op, lastprivate) in self.privates.iteritems():
# Don't declare the index variable as a reduction
if op and op in "+*-&^|" and entry != self.target.entry:
if entry.type.is_pyobject:
error(self.pos, "Python objects cannot be reductions")
else:
#code.put(" reduction(%s:%s)" % (op, entry.cname))
# This is the only way reductions + nesting works in gcc4.5
reduction_codepoint.put(
" reduction(%s:%s)" % (op, entry.cname))
else:
if entry == self.target.entry:
code.put(" firstprivate(%s)" % entry.cname)
code.put(" lastprivate(%s)" % entry.cname)
continue
if not entry.type.is_pyobject:
if lastprivate:
private = 'lastprivate'
else:
private = 'private'
code.put(" %s(%s)" % (private, entry.cname))
if self.schedule:
code.put(" schedule(%s)" % self.schedule)
self.put_num_threads(reduction_codepoint)
code.putln("")
code.putln("#endif /* _OPENMP */")
code.put("for (%(i)s = 0; %(i)s < %(nsteps)s; %(i)s++)" % fmt_dict)
code.begin_block() # for loop block
guard_around_body_codepoint = code.insertion_point()
# Start if guard block around the body. This may be unnecessary, but
# at least it doesn't spoil indentation
code.begin_block()
code.putln("%(target)s = %(start)s + %(step)s * %(i)s;" % fmt_dict)
self.initialize_privates_to_nan(code, exclude=self.target.entry)
if self.is_parallel:
code.funcstate.start_collecting_temps()
self.body.generate_execution_code(code)
self.trap_parallel_exit(code, should_flush=True)
self.privatize_temps(code)
if self.breaking_label_used:
# Put a guard around the loop body in case return, break or
# exceptions might be used
guard_around_body_codepoint.putln("if (%s < 2)" % Naming.parallel_why)
code.end_block() # end guard around loop body
code.end_block() # end for loop block
if self.is_parallel:
# Release the GIL and deallocate the thread state
self.end_parallel_block(code)
code.end_block() # pragma omp parallel end block
class CnameDecoratorNode(StatNode):
"""
This node is for the cname decorator in CythonUtilityCode:
@cname('the_cname')
cdef func(...):
...
In case of a cdef class the cname specifies the objstruct_cname.
node the node to which the cname decorator is applied
cname the cname the node should get
"""
child_attrs = ['node']
def analyse_declarations(self, env):
self.node.analyse_declarations(env)
self.is_function = isinstance(self.node, FuncDefNode)
is_struct_or_enum = isinstance(self.node, (CStructOrUnionDefNode,
CEnumDefNode))
e = self.node.entry
if self.is_function:
e.cname = self.cname
e.func_cname = self.cname
elif is_struct_or_enum:
e.cname = e.type.cname = self.cname
else:
scope = self.node.scope
e.cname = self.cname
e.type.objstruct_cname = self.cname + '_obj'
e.type.typeobj_cname = Naming.typeobj_prefix + self.cname
e.type.typeptr_cname = self.cname + '_type'
e.as_variable.cname = py_object_type.cast_code(e.type.typeptr_cname)
scope.scope_prefix = self.cname + "_"
for name, entry in scope.entries.iteritems():
if entry.func_cname:
cname = entry.cname
if '.' in cname:
# remove __pyx_base from func_cname
cname = cname.split('.')[-1]
entry.func_cname = '%s_%s' % (self.cname, cname)
def analyse_expressions(self, env):
self.node.analyse_expressions(env)
def generate_function_definitions(self, env, code):
"Ensure a prototype for every @cname method in the right place"
if self.is_function and env.is_c_class_scope:
# method in cdef class, generate a prototype in the header
h_code = code.globalstate['utility_code_proto']
if isinstance(self.node, DefNode):
self.node.generate_function_header(
h_code, with_pymethdef=False, proto_only=True)
else:
import ModuleNode
entry = self.node.entry
cname = entry.cname
entry.cname = entry.func_cname
ModuleNode.generate_cfunction_declaration(
entry,
env.global_scope(),
h_code,
definition=True)
entry.cname = cname
self.node.generate_function_definitions(env, code)
def generate_execution_code(self, code):
self.node.generate_execution_code(code)
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
utility_function_predeclarations = \
"""
/* inline attribute */
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
/* unused attribute */
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
"""
if Options.gcc_branch_hints:
branch_prediction_macros = \
"""
#ifdef __GNUC__
/* Test for GCC > 2.95 */
#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* __GNUC__ > 2 ... */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ > 2 ... */
#else /* __GNUC__ */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
"""
else:
branch_prediction_macros = \
"""
#define likely(x) (x)
#define unlikely(x) (x)
"""
#get_name_predeclaration = \
#"static PyObject *__Pyx_GetName(PyObject *dict, char *name); /*proto*/"
#get_name_interned_predeclaration = \
#"static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/"
#------------------------------------------------------------------------------------
printing_utility_code = UtilityCode(
proto = """
static int __Pyx_Print(PyObject*, PyObject *, int); /*proto*/
#if PY_MAJOR_VERSION >= 3
static PyObject* %s = 0;
static PyObject* %s = 0;
#endif
""" % (Naming.print_function, Naming.print_function_kwargs),
cleanup = """
#if PY_MAJOR_VERSION >= 3
Py_CLEAR(%s);
Py_CLEAR(%s);
#endif
""" % (Naming.print_function, Naming.print_function_kwargs),
impl = r"""
#if PY_MAJOR_VERSION < 3
static PyObject *__Pyx_GetStdout(void) {
PyObject *f = PySys_GetObject((char *)"stdout");
if (!f) {
PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout");
}
return f;
}
static int __Pyx_Print(PyObject* f, PyObject *arg_tuple, int newline) {
PyObject* v;
int i;
if (!f) {
if (!(f = __Pyx_GetStdout()))
return -1;
}
for (i=0; i < PyTuple_GET_SIZE(arg_tuple); i++) {
if (PyFile_SoftSpace(f, 1)) {
if (PyFile_WriteString(" ", f) < 0)
return -1;
}
v = PyTuple_GET_ITEM(arg_tuple, i);
if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0)
return -1;
if (PyString_Check(v)) {
char *s = PyString_AsString(v);
Py_ssize_t len = PyString_Size(v);
if (len > 0 &&
isspace(Py_CHARMASK(s[len-1])) &&
s[len-1] != ' ')
PyFile_SoftSpace(f, 0);
}
}
if (newline) {
if (PyFile_WriteString("\n", f) < 0)
return -1;
PyFile_SoftSpace(f, 0);
}
return 0;
}
#else /* Python 3 has a print function */
static int __Pyx_Print(PyObject* stream, PyObject *arg_tuple, int newline) {
PyObject* kwargs = 0;
PyObject* result = 0;
PyObject* end_string;
if (unlikely(!%(PRINT_FUNCTION)s)) {
%(PRINT_FUNCTION)s = __Pyx_GetAttrString(%(BUILTINS)s, "print");
if (!%(PRINT_FUNCTION)s)
return -1;
}
if (stream) {
kwargs = PyDict_New();
if (unlikely(!kwargs))
return -1;
if (unlikely(PyDict_SetItemString(kwargs, "file", stream) < 0))
goto bad;
if (!newline) {
end_string = PyUnicode_FromStringAndSize(" ", 1);
if (unlikely(!end_string))
goto bad;
if (PyDict_SetItemString(kwargs, "end", end_string) < 0) {
Py_DECREF(end_string);
goto bad;
}
Py_DECREF(end_string);
}
} else if (!newline) {
if (unlikely(!%(PRINT_KWARGS)s)) {
%(PRINT_KWARGS)s = PyDict_New();
if (unlikely(!%(PRINT_KWARGS)s))
return -1;
end_string = PyUnicode_FromStringAndSize(" ", 1);
if (unlikely(!end_string))
return -1;
if (PyDict_SetItemString(%(PRINT_KWARGS)s, "end", end_string) < 0) {
Py_DECREF(end_string);
return -1;
}
Py_DECREF(end_string);
}
kwargs = %(PRINT_KWARGS)s;
}
result = PyObject_Call(%(PRINT_FUNCTION)s, arg_tuple, kwargs);
if (unlikely(kwargs) && (kwargs != %(PRINT_KWARGS)s))
Py_DECREF(kwargs);
if (!result)
return -1;
Py_DECREF(result);
return 0;
bad:
if (kwargs != %(PRINT_KWARGS)s)
Py_XDECREF(kwargs);
return -1;
}
#endif
""" % {'BUILTINS' : Naming.builtins_cname,
'PRINT_FUNCTION' : Naming.print_function,
'PRINT_KWARGS' : Naming.print_function_kwargs}
)
printing_one_utility_code = UtilityCode(
proto = """
static int __Pyx_PrintOne(PyObject* stream, PyObject *o); /*proto*/
""",
impl = r"""
#if PY_MAJOR_VERSION < 3
static int __Pyx_PrintOne(PyObject* f, PyObject *o) {
if (!f) {
if (!(f = __Pyx_GetStdout()))
return -1;
}
if (PyFile_SoftSpace(f, 0)) {
if (PyFile_WriteString(" ", f) < 0)
return -1;
}
if (PyFile_WriteObject(o, f, Py_PRINT_RAW) < 0)
return -1;
if (PyFile_WriteString("\n", f) < 0)
return -1;
return 0;
/* the line below is just to avoid compiler
* compiler warnings about unused functions */
return __Pyx_Print(f, NULL, 0);
}
#else /* Python 3 has a print function */
static int __Pyx_PrintOne(PyObject* stream, PyObject *o) {
int res;
PyObject* arg_tuple = PyTuple_New(1);
if (unlikely(!arg_tuple))
return -1;
Py_INCREF(o);
PyTuple_SET_ITEM(arg_tuple, 0, o);
res = __Pyx_Print(stream, arg_tuple, 1);
Py_DECREF(arg_tuple);
return res;
}
#endif
""",
requires=[printing_utility_code])
#------------------------------------------------------------------------------------
# Exception raising code
#
# Exceptions are raised by __Pyx_Raise() and stored as plain
# type/value/tb in PyThreadState->curexc_*. When being caught by an
# 'except' statement, curexc_* is moved over to exc_* by
# __Pyx_GetException()
restore_exception_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
""",
impl = """
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
""")
# The following function is based on do_raise() from ceval.c. There
# are separate versions for Python2 and Python3 as exception handling
# has changed quite a lot between the two versions.
raise_utility_code = UtilityCode(
proto = """
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
""",
impl = """
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
/* cause is unused */
Py_XINCREF(type);
Py_XINCREF(value);
Py_XINCREF(tb);
/* First, check the traceback argument, replacing None with NULL. */
if (tb == Py_None) {
Py_DECREF(tb);
tb = 0;
}
else if (tb != NULL && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
/* Next, replace a missing value with None */
if (value == NULL) {
value = Py_None;
Py_INCREF(value);
}
#if PY_VERSION_HEX < 0x02050000
if (!PyClass_Check(type))
#else
if (!PyType_Check(type))
#endif
{
/* Raising an instance. The value should be a dummy. */
if (value != Py_None) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
/* Normalize to raise <class>, <instance> */
Py_DECREF(value);
value = type;
#if PY_VERSION_HEX < 0x02050000
if (PyInstance_Check(type)) {
type = (PyObject*) ((PyInstanceObject*)type)->in_class;
Py_INCREF(type);
}
else {
type = 0;
PyErr_SetString(PyExc_TypeError,
"raise: exception must be an old-style class or instance");
goto raise_error;
}
#else
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
#endif
}
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else /* Python 3+ */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (!PyExceptionClass_Check(type)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
}
else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
}
else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
if (!value) {
value = PyObject_CallObject(type, NULL);
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
}
bad:
return;
}
#endif
""",
requires=[restore_exception_utility_code])
#------------------------------------------------------------------------------------
get_exception_utility_code = UtilityCode(
proto = """
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
""",
impl = """
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *local_type, *local_value, *local_tb;
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
if (unlikely(tstate->curexc_type))
goto bad;
#if PY_MAJOR_VERSION >= 3
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
#endif
*type = local_type;
*value = local_value;
*tb = local_tb;
Py_INCREF(local_type);
Py_INCREF(local_value);
Py_INCREF(local_tb);
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
/* Make sure tstate is in a consistent state when we XDECREF
these objects (XDECREF may run arbitrary code). */
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
""")
#------------------------------------------------------------------------------------
get_exception_tuple_utility_code = UtilityCode(proto="""
static PyObject *__Pyx_GetExceptionTuple(void); /*proto*/
""",
# I doubt that calling __Pyx_GetException() here is correct as it moves
# the exception from tstate->curexc_* to tstate->exc_*, which prevents
# exception handlers later on from receiving it.
impl = """
static PyObject *__Pyx_GetExceptionTuple(void) {
PyObject *type = NULL, *value = NULL, *tb = NULL;
if (__Pyx_GetException(&type, &value, &tb) == 0) {
PyObject* exc_info = PyTuple_New(3);
if (exc_info) {
Py_INCREF(type);
Py_INCREF(value);
Py_INCREF(tb);
PyTuple_SET_ITEM(exc_info, 0, type);
PyTuple_SET_ITEM(exc_info, 1, value);
PyTuple_SET_ITEM(exc_info, 2, tb);
return exc_info;
}
}
return NULL;
}
""",
requires=[get_exception_utility_code])
#------------------------------------------------------------------------------------
reset_exception_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
""",
impl = """
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) {
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
""")
#------------------------------------------------------------------------------------
swap_exception_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
""",
impl = """
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
""")
#------------------------------------------------------------------------------------
arg_type_test_utility_code = UtilityCode(
proto = """
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact); /*proto*/
""",
impl = """
static int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (!type) {
PyErr_Format(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (Py_TYPE(obj) == type) return 1;
}
else {
if (PyObject_TypeCheck(obj, type)) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%s' has incorrect type (expected %s, got %s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
""")
#------------------------------------------------------------------------------------
#
# __Pyx_RaiseArgtupleInvalid raises the correct exception when too
# many or too few positional arguments were found. This handles
# Py_ssize_t formatting correctly.
raise_argtuple_invalid_utility_code = UtilityCode(
proto = """
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
""",
impl = """
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%s() takes %s %"PY_FORMAT_SIZE_T"d positional argument%s (%"PY_FORMAT_SIZE_T"d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
""")
raise_keyword_required_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseKeywordRequired(const char* func_name, PyObject* kw_name); /*proto*/
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseKeywordRequired(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() needs keyword-only argument %U", func_name, kw_name);
#else
"%s() needs keyword-only argument %s", func_name,
PyString_AS_STRING(kw_name));
#endif
}
""")
raise_double_keywords_utility_code = UtilityCode(
proto = """
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name, PyObject* kw_name); /*proto*/
""",
impl = """
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AS_STRING(kw_name));
#endif
}
""")
#------------------------------------------------------------------------------------
#
# __Pyx_CheckKeywordStrings raises an error if non-string keywords
# were passed to a function, or if any keywords were passed to a
# function that does not accept them.
keyword_string_check_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_CheckKeywordStrings(PyObject *kwdict,
const char* function_name, int kw_allowed); /*proto*/
""",
impl = """
static CYTHON_INLINE int __Pyx_CheckKeywordStrings(
PyObject *kwdict,
const char* function_name,
int kw_allowed)
{
PyObject* key = 0;
Py_ssize_t pos = 0;
while (PyDict_Next(kwdict, &pos, &key, 0)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key)))
#else
if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key)))
#endif
goto invalid_keyword_type;
}
if ((!kw_allowed) && unlikely(key))
goto invalid_keyword;
return 1;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%s() keywords must be strings", function_name);
return 0;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%s() got an unexpected keyword argument '%s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
return 0;
}
""")
#------------------------------------------------------------------------------------
#
# __Pyx_ParseOptionalKeywords copies the optional/unknown keyword
# arguments from the kwds dict into kwds2. If kwds2 is NULL, unknown
# keywords will raise an invalid keyword error.
#
# Three kinds of errors are checked: 1) non-string keywords, 2)
# unexpected keywords and 3) overlap with positional arguments.
#
# If num_posargs is greater 0, it denotes the number of positional
# arguments that were passed and that must therefore not appear
# amongst the keywords as well.
#
# This method does not check for required keyword arguments.
#
parse_keywords_utility_code = UtilityCode(
proto = """
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
const char* function_name); /*proto*/
""",
impl = """
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
} else {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) {
#else
if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) {
#endif
goto invalid_keyword_type;
} else {
for (name = first_kw_arg; *name; name++) {
#if PY_MAJOR_VERSION >= 3
if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
PyUnicode_Compare(**name, key) == 0) break;
#else
if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
_PyString_Eq(**name, key)) break;
#endif
}
if (*name) {
values[name-argnames] = value;
} else {
/* unexpected keyword found */
for (name=argnames; name != first_kw_arg; name++) {
if (**name == key) goto arg_passed_twice;
#if PY_MAJOR_VERSION >= 3
if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice;
#else
if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
_PyString_Eq(**name, key)) goto arg_passed_twice;
#endif
}
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
}
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, **name);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%s() got an unexpected keyword argument '%s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
""",
requires=[raise_double_keywords_utility_code])
#------------------------------------------------------------------------------------
traceback_utility_code = UtilityCode(
proto = """
static void __Pyx_AddTraceback(const char *funcname, int %(CLINENO)s,
int %(LINENO)s, const char *%(FILENAME)s); /*proto*/
""" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
'CLINENO': Naming.clineno_cname,
},
impl = """
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static void __Pyx_AddTraceback(const char *funcname, int %(CLINENO)s,
int %(LINENO)s, const char *%(FILENAME)s) {
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
PyObject *py_globals = 0;
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(%(FILENAME)s);
#else
py_srcfile = PyUnicode_FromString(%(FILENAME)s);
#endif
if (!py_srcfile) goto bad;
if (%(CLINENO)s) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%%s (%%s:%%d)", funcname, %(CFILENAME)s, %(CLINENO)s);
#else
py_funcname = PyUnicode_FromFormat( "%%s (%%s:%%d)", funcname, %(CFILENAME)s, %(CLINENO)s);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_globals = PyModule_GetDict(%(GLOBALS)s);
if (!py_globals) goto bad;
py_code = __Pyx_PyCode_New(
0, /*int argcount,*/
0, /*int kwonlyargcount,*/
0, /*int nlocals,*/
0, /*int stacksize,*/
0, /*int flags,*/
%(EMPTY_BYTES)s, /*PyObject *code,*/
%(EMPTY_TUPLE)s, /*PyObject *consts,*/
%(EMPTY_TUPLE)s, /*PyObject *names,*/
%(EMPTY_TUPLE)s, /*PyObject *varnames,*/
%(EMPTY_TUPLE)s, /*PyObject *freevars,*/
%(EMPTY_TUPLE)s, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
%(LINENO)s, /*int firstlineno,*/
%(EMPTY_BYTES)s /*PyObject *lnotab*/
);
if (!py_code) goto bad;
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
py_globals, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = %(LINENO)s;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
""" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
'CFILENAME': Naming.cfilenm_cname,
'CLINENO': Naming.clineno_cname,
'GLOBALS': Naming.module_cname,
'EMPTY_TUPLE' : Naming.empty_tuple,
'EMPTY_BYTES' : Naming.empty_bytes,
})
#------------------------------------------------------------------------------------
unraisable_exception_utility_code = UtilityCode(
proto = """
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename); /*proto*/
""",
impl = """
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
}
""",
requires=[restore_exception_utility_code])
#------------------------------------------------------------------------------------
set_vtable_utility_code = UtilityCode(
proto = """
static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
""",
impl = """
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0)
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItemString(dict, "__pyx_vtable__", ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
""")
#------------------------------------------------------------------------------------
get_vtable_utility_code = UtilityCode(
proto = """
static void* __Pyx_GetVtable(PyObject *dict); /*proto*/
""",
impl = r"""
static void* __Pyx_GetVtable(PyObject *dict) {
void* ptr;
PyObject *ob = PyMapping_GetItemString(dict, (char *)"__pyx_vtable__");
if (!ob)
goto bad;
#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0)
ptr = PyCapsule_GetPointer(ob, 0);
#else
ptr = PyCObject_AsVoidPtr(ob);
#endif
if (!ptr && !PyErr_Occurred())
PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type");
Py_DECREF(ob);
return ptr;
bad:
Py_XDECREF(ob);
return NULL;
}
""")
#------------------------------------------------------------------------------------
init_string_tab_utility_code = UtilityCode(
proto = """
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
""",
impl = """
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else /* Python 3+ has unicode identifiers */
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
""")
#------------------------------------------------------------------------------------
force_init_threads_utility_code = UtilityCode(
proto="""
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
""")
init_threads = UtilityCode(
init="PyEval_InitThreads();\n",
)
#------------------------------------------------------------------------------------
# Note that cPython ignores PyTrace_EXCEPTION,
# but maybe some other profilers don't.
profile_utility_code = UtilityCode(proto="""
#ifndef CYTHON_PROFILE
#define CYTHON_PROFILE 1
#endif
#ifndef CYTHON_PROFILE_REUSE_FRAME
#define CYTHON_PROFILE_REUSE_FRAME 0
#endif
#if CYTHON_PROFILE
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
#if CYTHON_PROFILE_REUSE_FRAME
#define CYTHON_FRAME_MODIFIER static
#define CYTHON_FRAME_DEL
#else
#define CYTHON_FRAME_MODIFIER
#define CYTHON_FRAME_DEL Py_DECREF(%(FRAME)s)
#endif
#define __Pyx_TraceDeclarations \\
static PyCodeObject *%(FRAME_CODE)s = NULL; \\
CYTHON_FRAME_MODIFIER PyFrameObject *%(FRAME)s = NULL; \\
int __Pyx_use_tracing = 0;
#define __Pyx_TraceCall(funcname, srcfile, firstlineno) \\
if (unlikely(PyThreadState_GET()->use_tracing && PyThreadState_GET()->c_profilefunc)) { \\
__Pyx_use_tracing = __Pyx_TraceSetupAndCall(&%(FRAME_CODE)s, &%(FRAME)s, funcname, srcfile, firstlineno); \\
}
#define __Pyx_TraceException() \\
if (unlikely(__Pyx_use_tracing( && PyThreadState_GET()->use_tracing && PyThreadState_GET()->c_profilefunc) { \\
PyObject *exc_info = __Pyx_GetExceptionTuple(); \\
if (exc_info) { \\
PyThreadState_GET()->c_profilefunc( \\
PyThreadState_GET()->c_profileobj, %(FRAME)s, PyTrace_EXCEPTION, exc_info); \\
Py_DECREF(exc_info); \\
} \\
}
#define __Pyx_TraceReturn(result) \\
if (unlikely(__Pyx_use_tracing) && PyThreadState_GET()->use_tracing && PyThreadState_GET()->c_profilefunc) { \\
PyThreadState_GET()->c_profilefunc( \\
PyThreadState_GET()->c_profileobj, %(FRAME)s, PyTrace_RETURN, (PyObject*)result); \\
CYTHON_FRAME_DEL; \\
}
static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno); /*proto*/
static int __Pyx_TraceSetupAndCall(PyCodeObject** code, PyFrameObject** frame, const char *funcname, const char *srcfile, int firstlineno); /*proto*/
#else
#define __Pyx_TraceDeclarations
#define __Pyx_TraceCall(funcname, srcfile, firstlineno)
#define __Pyx_TraceException()
#define __Pyx_TraceReturn(result)
#endif /* CYTHON_PROFILE */
"""
% {
"FRAME": Naming.frame_cname,
"FRAME_CODE": Naming.frame_code_cname,
},
impl = """
#if CYTHON_PROFILE
static int __Pyx_TraceSetupAndCall(PyCodeObject** code,
PyFrameObject** frame,
const char *funcname,
const char *srcfile,
int firstlineno) {
if (*frame == NULL || !CYTHON_PROFILE_REUSE_FRAME) {
if (*code == NULL) {
*code = __Pyx_createFrameCodeObject(funcname, srcfile, firstlineno);
if (*code == NULL) return 0;
}
*frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate*/
*code, /*PyCodeObject *code*/
PyModule_GetDict(%(MODULE)s), /*PyObject *globals*/
0 /*PyObject *locals*/
);
if (*frame == NULL) return 0;
}
else {
(*frame)->f_tstate = PyThreadState_GET();
}
return PyThreadState_GET()->c_profilefunc(PyThreadState_GET()->c_profileobj, *frame, PyTrace_CALL, NULL) == 0;
}
static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno) {
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
PyCodeObject *py_code = 0;
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
py_srcfile = PyString_FromString(srcfile);
#else
py_funcname = PyUnicode_FromString(funcname);
py_srcfile = PyUnicode_FromString(srcfile);
#endif
if (!py_funcname | !py_srcfile) goto bad;
py_code = PyCode_New(
0, /*int argcount,*/
#if PY_MAJOR_VERSION >= 3
0, /*int kwonlyargcount,*/
#endif
0, /*int nlocals,*/
0, /*int stacksize,*/
0, /*int flags,*/
%(EMPTY_BYTES)s, /*PyObject *code,*/
%(EMPTY_TUPLE)s, /*PyObject *consts,*/
%(EMPTY_TUPLE)s, /*PyObject *names,*/
%(EMPTY_TUPLE)s, /*PyObject *varnames,*/
%(EMPTY_TUPLE)s, /*PyObject *freevars,*/
%(EMPTY_TUPLE)s, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
firstlineno, /*int firstlineno,*/
%(EMPTY_BYTES)s /*PyObject *lnotab*/
);
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return py_code;
}
#endif /* CYTHON_PROFILE */
""" % {
'EMPTY_TUPLE' : Naming.empty_tuple,
'EMPTY_BYTES' : Naming.empty_bytes,
"MODULE": Naming.module_cname,
})
################ Utility code for cython.parallel stuff ################
invalid_values_utility_code = UtilityCode(
proto="""\
#include <string.h>
void __pyx_init_nan(void);
static float %(PYX_NAN)s;
""" % vars(Naming),
init="""
/* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
a quiet NaN. */
memset(&%(PYX_NAN)s, 0xFF, sizeof(%(PYX_NAN)s));
""" % vars(Naming))
#------------------------------------------------------------------------------------
raise_import_error_utility_code = UtilityCode(
proto = '''
static CYTHON_INLINE void __Pyx_RaiseImportError(PyObject *name);
''',
impl = '''
static CYTHON_INLINE void __Pyx_RaiseImportError(PyObject *name) {
#if PY_MAJOR_VERSION < 3
PyErr_Format(PyExc_ImportError, "cannot import name %.230s",
PyString_AsString(name));
#else
PyErr_Format(PyExc_ImportError, "cannot import name %S", name);
#endif
}
''')
|
hpfem/cython
|
Cython/Compiler/Nodes.py
|
Python
|
apache-2.0
| 322,384
|
from lxml import etree
from anansi.xml import XMLMessage,gen_element,XMLError
class TCCError(Exception):
def __init__(self,msg):
super(TCCError,self).__init__(msg)
class TCCMessage(XMLMessage):
def __init__(self,user,comment=""):
super(TCCMessage,self).__init__(gen_element('tcc_request'))
self.user_info(user,comment)
def server_command(self,command):
elem = gen_element("server_command")
elem.append(gen_element("command",text=command))
self.root.append(elem)
def user_info(self,username,comment):
elem = gen_element("user_info")
elem.append(gen_element("name",text=username))
elem.append(gen_element("comment",text=comment))
self.root.append(elem)
def tcc_command(self,command):
elem = gen_element("tcc_command")
elem.append(gen_element("command",text=command))
self.root.append(elem)
def tcc_pointing(self,x,y,
ns_east_state="auto",ns_west_state="auto",
md_east_state="auto",md_west_state="auto",
ns_east_offset=0.0,ns_west_offset=0.0,
md_east_offset=0.0,md_west_offset=0.0,
offset_units="degrees",**attributes):
elem = gen_element("tcc_command")
elem.append(gen_element("command",text="point"))
pointing = gen_element("pointing",attributes=attributes)
pointing.append(gen_element("xcoord",text=str(x)))
pointing.append(gen_element("ycoord",text=str(y)))
ns = gen_element("ns")
ns_east = gen_element("east")
ns_east.append(gen_element("state",text=ns_east_state))
ns_east.append(gen_element("offset",text=str(ns_east_offset),attributes={'units':offset_units}))
ns_west = gen_element("west")
ns_west.append(gen_element("state",text=ns_west_state))
ns_west.append(gen_element("offset",text=str(ns_west_offset),attributes={'units':offset_units}))
ns.append(ns_east)
ns.append(ns_west)
md = gen_element("md")
md_east = gen_element("east")
md_east.append(gen_element("state",text=md_east_state))
md_east.append(gen_element("offset",text=str(md_east_offset),attributes={'units':offset_units}))
md_west = gen_element("west")
md_west.append(gen_element("state",text=md_west_state))
md_west.append(gen_element("offset",text=str(md_west_offset),attributes={'units':offset_units}))
md.append(md_east)
md.append(md_west)
elem.append(pointing)
elem.append(ns)
elem.append(md)
self.root.append(elem)
class TCCResponseHandler(XMLMessage):
def __init__(self,msg):
try:
super(MPSRDefaultResponse,self).__init__(etree.fromstring(msg))
except:
logger.error("Unknown TCC message: %s"%msg)
raise XMLError(msg)
self._parse()
def _parse(self):
if self.root.find('success') is not None:
self.passed = True
self.message = self.root.find('success').text
elif self.root.find('error') is not None:
self.passed = False
self.message = self.root.find('error').text
raise TCCError(self.message)
class TCCControls(object):
def __init__(self,user="anansi"):
conf = config.tcc_server
self.ip = conf.ip
self.port = conf.port
self.user = user
def _send(self,msg):
client = TCPClient(self.ip,self.port,timeout=10.0)
client.send(msg)
return TCCResponseHandler(client.receive())
def track(self,x,y,system,units,**kwargs):
msg = TCCMessage(self.user)
msg.tcc_pointing(x,y,system=system,units=units,**kwargs)
return self._send(str(msg))
def stop(self):
msg = TCCMessage(self.user)
msg.tcc_command("stop")
return self._send(str(msg))
def maintenance_stow(self):
msg = TCCMessage(self.user)
msg.tcc_command("maintenance_stow")
return self._send(str(msg))
def wind_stow(self):
msg = TCCMessage(self.user)
msg.tcc_command("wind")
return self._send(str(msg))
|
ewanbarr/anansi
|
anansi/tcc/tcc_utils.py
|
Python
|
apache-2.0
| 4,234
|
"""Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
# Django settings for graphite project.
# DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead
from os.path import dirname, join, abspath
TEMPLATE_DIRS = (
join(dirname( abspath(__file__) ), 'templates'),
)
#Django settings below, do not touch!
APPEND_SLASH = False
TEMPLATE_DEBUG = False
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
}
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
# Absolute path to the directory that holds media.
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = ''
# Absolute path to the directory that holds static content.
STATIC_ROOT = '/opt/graphite/webapp/content/'
# URL that handles the static content from STATIC_ROOT.
STATIC_URL = '/content/'
MIDDLEWARE_CLASSES = (
'graphite.middleware.LogExceptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'graphite.urls'
INSTALLED_APPS = (
'graphite.metrics',
'graphite.render',
'graphite.browser',
'graphite.composer',
'graphite.account',
'graphite.dashboard',
'graphite.whitelist',
'graphite.events',
'graphite.url_shortener',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'tagging',
'mod_wsgi.server',
)
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend']
GRAPHITE_WEB_APP_SETTINGS_LOADED = True
|
graphite-server/graphite-web
|
webapp/graphite/app_settings.py
|
Python
|
apache-2.0
| 2,450
|
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.insert(0, os.getcwd() + '/../../tools/')
import wb
import trf
import wer
def process_nbest(fread, fwrite):
nEmptySentNum = 0
with open(fread, 'rt') as f1, open(fwrite, 'wt') as f2:
for a in [line.split() for line in f1]:
if len(a) == 1:
nEmptySentNum += 1
a.append('<UNK>')
f2.write(' '.join(a) + '\n')
print('[nbest] empty sentence num = {}'.format(nEmptySentNum))
def main():
if len(sys.argv) == 1:
print('\"python run.py -train\" train LSTM\n',
'\"python run.py -rescore\" rescore nbest\n',
'\"python run.py -wer\" compute WER'
)
bindir = '../../tools/trf/bin/'
workdir = 'trflm/'
fres = wb.FRes('models_ppl.txt')
model = trf.model(bindir, workdir)
nbest_root = 'data/nbest/'
nbest_type_list = ['nbest_mvdr_single_heq_multi']
class_num = 200
train = workdir + 'train.id'
valid = workdir + 'valid.id'
test = workdir + 'test.id'
vocab = workdir + 'vocab_c{}.list'.format(class_num)
order = 4
feat = 'g4_w_c_ws_cs_wsh_csh_tied.fs'
#feat = 'g4_w_c_ws_cs_wsh_csh.fs'
maxlen = 0
tmax = 20000
t0 = 0
minibatch = 100
gamma_lambda = '3000,0'
gamma_zeta = '0,0.6'
reg = 1e-6
thread = 8
write_model = workdir + 'trf_c{}_{}_2'.format(class_num, feat[0:-3])
if '-train' in sys.argv or '-all' in sys.argv:
config = '-vocab {} -train {} -valid {} -test {} '.format(vocab, train, valid, test)
config += ' -read {}.model'.format(write_model[0:-2])
config += ' -order {} -feat {} '.format(order, feat)
config += ' -len {} '.format(maxlen)
config += ' -write {0}.model -log {0}.log '.format(write_model)
config += ' -t0 {} -iter {}'.format(t0, tmax)
config += ' -gamma-lambda {} -gamma-zeta {}'.format(gamma_lambda, gamma_zeta)
config += ' -L2 {} '.format(reg)
config += ' -mini-batch {} '.format(minibatch)
config += ' -thread {} '.format(thread)
config += ' -print-per-iter 10 '
config += ' -write-at-iter [{}:10000:{}]'.format(tmax-30000, tmax) # output the intermediate models
model.prepare('data/train', 'data/valid', 'data/valid', class_num)
model.train(config)
if '-plot' in sys.argv:
baseline = fres.Get('KN5')
trf.PlotLog([write_model], [baseline])
if '-rescore' in sys.argv or '-all' in sys.argv:
for nbest_type in nbest_type_list:
nbest_dir = nbest_root + nbest_type + '/'
for tsk in ['nbestlist_{}_{}'.format(a, b) for a in ['dt05', 'et05'] for b in ['real', 'simu']]:
write_dir = workdir + nbest_type + '/' + tsk + '/'
wb.mkdir(write_dir)
print('{} : {}'.format(nbest_type, tsk))
print(' write -> {}'.format(write_dir))
write_lmscore = write_dir + os.path.split(write_model)[-1]
# fill the empty lines
process_nbest(nbest_dir + tsk + '/words_text', write_lmscore + '.nbest')
config = ' -vocab {} '.format(vocab)
config += ' -read {}.model '.format(write_model)
config += ' -nbest {} '.format(write_lmscore + '.nbest')
config += ' -lmscore {0}.lmscore -lmscore-test-id {0}.test-id '.format(write_lmscore)
model.use(config)
if '-wer' in sys.argv or '-all' in sys.argv:
for nbest_type in nbest_type_list:
nbest_dir = nbest_root + nbest_type + '/'
lmpaths = {'KN5': nbest_dir + '<tsk>/lmwt.lmonly',
'RNN': nbest_dir + '<tsk>/lmwt.rnn',
'LSTM': 'lstm/' + nbest_type + '/<tsk>/lmwt.lstm',
'TRF': workdir + nbest_type + '/<tsk>/' + os.path.split(write_model)[-1] + '.lmscore'}
# 'TRF': nbestdir + '<tsk>/lmwt.trf'}
# lmtypes = ['LSTM', 'KN5', 'RNN', 'TRF', 'RNN+KN5', 'LSTM+KN5', 'RNN+TRF', 'LSTM+TRF']
lmtypes = ['TRF','RNN','KN5', 'RNN+TRF']
wer_workdir = 'wer/' + nbest_type + '/'
print('wer_workdir = ' + wer_workdir)
wer.wer_all(wer_workdir, nbest_dir, lmpaths, lmtypes)
config = wer.wer_tune(wer_workdir)
wer.wer_print(wer_workdir, config)
if __name__ == '__main__':
main()
|
wbengine/SPMILM
|
egs/chime4/run_trf_2.py
|
Python
|
apache-2.0
| 4,455
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A variational ansatz based on a linear swap network Trotter step."""
from typing import Iterable, Optional, Sequence, Tuple, cast
import numpy
import sympy
import cirq
from openfermioncirq import swap_network
from openfermioncirq.variational.ansatz import VariationalAnsatz
from openfermioncirq.variational.letter_with_subscripts import (
LetterWithSubscripts)
class SwapNetworkTrotterHubbardAnsatz(VariationalAnsatz):
"""A Hubbard model ansatz based on the fermionic swap network Trotter step.
Each Trotter step includes 3 parameters: one for the horizontal hopping
terms, one for the vertical hopping terms, and one for the on-site
interaction. This ansatz is similar to the one used in arXiv:1507.08969,
but corresponds to a different ordering for simulating the Hamiltonian
terms.
"""
def __init__(self,
x_dim: float,
y_dim: float,
tunneling: float,
coulomb: float,
periodic: bool=True,
iterations: int=1,
adiabatic_evolution_time: Optional[float]=None,
qubits: Optional[Sequence[cirq.Qid]]=None
) -> None:
"""
Args:
iterations: The number of iterations of the basic template to
include in the circuit. The number of parameters grows linearly
with this value.
adiabatic_evolution_time: The time scale for Hamiltonian evolution
used to determine the default initial parameters of the ansatz.
This is the value A from the docstring of this class.
If not specified, defaults to the sum of the absolute values
of the entries of the two-body tensor of the Hamiltonian.
qubits: Qubits to be used by the ansatz circuit. If not specified,
then qubits will automatically be generated by the
`_generate_qubits` method.
"""
self.x_dim = x_dim
self.y_dim = y_dim
self.tunneling = tunneling
self.coulomb = coulomb
self.periodic = periodic
self.iterations = iterations
if adiabatic_evolution_time is None:
adiabatic_evolution_time = 0.1*abs(coulomb)*iterations
self.adiabatic_evolution_time = cast(float, adiabatic_evolution_time)
super().__init__(qubits)
def params(self) -> Iterable[sympy.Symbol]:
"""The parameters of the ansatz."""
for i in range(self.iterations):
if self.x_dim > 1:
yield LetterWithSubscripts('Th', i)
if self.y_dim > 1:
yield LetterWithSubscripts('Tv', i)
yield LetterWithSubscripts('V', i)
def param_bounds(self) -> Optional[Sequence[Tuple[float, float]]]:
"""Bounds on the parameters."""
bounds = []
for param in self.params():
s = 1.0 if param.letter == 'V' else 2.0
bounds.append((-s, s))
return bounds
def _generate_qubits(self) -> Sequence[cirq.Qid]:
"""Produce qubits that can be used by the ansatz circuit."""
n_qubits = 2*self.x_dim*self.y_dim
return cirq.LineQubit.range(n_qubits)
def operations(self, qubits: Sequence[cirq.Qid]) -> cirq.OP_TREE:
"""Produce the operations of the ansatz circuit."""
for i in range(self.iterations):
# Apply one- and two-body interactions with a swap network that
# reverses the order of the modes
def one_and_two_body_interaction(p, q, a, b) -> cirq.OP_TREE:
th_symbol = LetterWithSubscripts('Th', i)
tv_symbol = LetterWithSubscripts('Tv', i)
v_symbol = LetterWithSubscripts('V', i)
if _is_horizontal_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-th_symbol).on(a, b)
if _is_vertical_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-tv_symbol).on(a, b)
if _are_same_site_opposite_spin(p, q, self.x_dim*self.y_dim):
yield cirq.CZPowGate(exponent=v_symbol).on(a, b)
yield swap_network(
qubits, one_and_two_body_interaction, fermionic=True)
qubits = qubits[::-1]
# Apply one- and two-body interactions again. This time, reorder
# them so that the entire iteration is symmetric
def one_and_two_body_interaction_reversed_order(p, q, a, b
) -> cirq.OP_TREE:
th_symbol = LetterWithSubscripts('Th', i)
tv_symbol = LetterWithSubscripts('Tv', i)
v_symbol = LetterWithSubscripts('V', i)
if _are_same_site_opposite_spin(p, q, self.x_dim*self.y_dim):
yield cirq.CZPowGate(exponent=v_symbol).on(a, b)
if _is_vertical_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-tv_symbol).on(a, b)
if _is_horizontal_edge(
p, q, self.x_dim, self.y_dim, self.periodic):
yield cirq.ISwapPowGate(exponent=-th_symbol).on(a, b)
yield swap_network(
qubits, one_and_two_body_interaction_reversed_order,
fermionic=True, offset=True)
qubits = qubits[::-1]
def default_initial_params(self) -> numpy.ndarray:
"""Approximate evolution by H(t) = T + (t/A)V.
Sets the parameters so that the ansatz circuit consists of a sequence
of second-order Trotter steps approximating the dynamics of the
time-dependent Hamiltonian H(t) = T + (t/A)V, where T is the one-body
term and V is the two-body term of the Hamiltonian used to generate the
ansatz circuit, and t ranges from 0 to A, where A is equal to
`self.adibatic_evolution_time`. The number of Trotter steps
is equal to the number of iterations in the ansatz. This choice is
motivated by the idea of state preparation via adiabatic evolution.
The dynamics of H(t) are approximated as follows. First, the total
evolution time of A is split into segments of length A / r, where r
is the number of Trotter steps. Then, each Trotter step simulates H(t)
for a time length of A / r, where t is the midpoint of the
corresponding time segment. As an example, suppose A is 100 and the
ansatz has two iterations. Then the approximation is achieved with two
Trotter steps. The first Trotter step simulates H(25) for a time length
of 50, and the second Trotter step simulates H(75) for a time length
of 50.
"""
total_time = self.adiabatic_evolution_time
step_time = total_time / self.iterations
params = []
for param, scale_factor in zip(self.params(),
self.param_scale_factors()):
if param.letter == 'Th' or param.letter == 'Tv':
params.append(_canonicalize_exponent(
-self.tunneling * step_time / numpy.pi, 4) / scale_factor)
elif param.letter == 'V':
i, = param.subscripts
# Use the midpoint of the time segment
interpolation_progress = 0.5 * (2 * i + 1) / self.iterations
params.append(_canonicalize_exponent(
-0.5 * self.coulomb * interpolation_progress *
step_time / numpy.pi, 2) / scale_factor)
return numpy.array(params)
def _is_horizontal_edge(p, q, x_dim, y_dim, periodic):
n_sites = x_dim*y_dim
if p < n_sites and q >= n_sites or q < n_sites and p >= n_sites:
return False
if p >= n_sites and q >= n_sites:
p -= n_sites
q -= n_sites
return (q == _right_neighbor(p, x_dim, y_dim, periodic)
or p == _right_neighbor(q, x_dim, y_dim, periodic))
def _is_vertical_edge(p, q, x_dim, y_dim, periodic):
n_sites = x_dim*y_dim
if p < n_sites and q >= n_sites or q < n_sites and p >= n_sites:
return False
if p >= n_sites and q >= n_sites:
p -= n_sites
q -= n_sites
return (q == _bottom_neighbor(p, x_dim, y_dim, periodic)
or p == _bottom_neighbor(q, x_dim, y_dim, periodic))
def _are_same_site_opposite_spin(p, q, n_sites):
return abs(p-q) == n_sites
def _right_neighbor(site, x_dimension, y_dimension, periodic):
if x_dimension == 1:
return None
if (site + 1) % x_dimension == 0:
if periodic:
return site + 1 - x_dimension
else:
return None
return site + 1
def _bottom_neighbor(site, x_dimension, y_dimension, periodic):
if y_dimension == 1:
return None
if site + x_dimension + 1 > x_dimension*y_dimension:
if periodic:
return site + x_dimension - x_dimension*y_dimension
else:
return None
return site + x_dimension
def _canonicalize_exponent(exponent: float, period: int) -> float:
# Shift into [-p/2, +p/2).
exponent += period / 2
exponent %= period
exponent -= period / 2
# Prefer (-p/2, +p/2] over [-p/2, +p/2).
if exponent <= -period / 2:
exponent += period # coverage: ignore
return exponent
|
quantumlib/OpenFermion-Cirq
|
openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard.py
|
Python
|
apache-2.0
| 10,098
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import logging
from functools import wraps
import six
from six.moves import urllib
from st2client.utils import httpclient
LOG = logging.getLogger(__name__)
def add_auth_token_to_kwargs_from_env(func):
@wraps(func)
def decorate(*args, **kwargs):
if not kwargs.get('token') and os.environ.get('ST2_AUTH_TOKEN', None):
kwargs['token'] = os.environ.get('ST2_AUTH_TOKEN')
return func(*args, **kwargs)
return decorate
class Resource(object):
# An alias to use for the resource if different than the class name.
_alias = None
# Display name of the resource. This may be different than its resource
# name specifically when the resource name is composed of multiple words.
_display_name = None
# URL path for the resource.
_url_path = None
# Plural form of the resource name. This will be used to build the
# latter part of the REST URL.
_plural = None
# Plural form of the resource display name.
_plural_display_name = None
# A list of class attributes which will be included in __repr__ return value
_repr_attributes = []
def __init__(self, *args, **kwargs):
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
def to_dict(self, exclude_attributes=None):
"""
Return a dictionary representation of this object.
:param exclude_attributes: Optional list of attributes to exclude.
:type exclude_attributes: ``list``
:rtype: ``dict``
"""
exclude_attributes = exclude_attributes or []
attributes = self.__dict__.keys()
attributes = [attr for attr in attributes if not attr.startswith('__') and
attr not in exclude_attributes]
result = {}
for attribute in attributes:
value = getattr(self, attribute, None)
result[attribute] = value
return result
@classmethod
def get_alias(cls):
return cls._alias if cls._alias else cls.__name__
@classmethod
def get_display_name(cls):
return cls._display_name if cls._display_name else cls.__name__
@classmethod
def get_plural_name(cls):
if not cls._plural:
raise Exception('The %s class is missing class attributes '
'in its definition.' % cls.__name__)
return cls._plural
@classmethod
def get_plural_display_name(cls):
return (cls._plural_display_name
if cls._plural_display_name
else cls._plural)
@classmethod
def get_url_path_name(cls):
if cls._url_path:
return cls._url_path
return cls.get_plural_name().lower()
def serialize(self):
return dict((k, v)
for k, v in six.iteritems(self.__dict__)
if not k.startswith('_'))
@classmethod
def deserialize(cls, doc):
if type(doc) is not dict:
doc = json.loads(doc)
return cls(**doc)
def __str__(self):
return str(self.__repr__())
def __repr__(self):
if not self._repr_attributes:
return super(Resource, self).__repr__()
attributes = []
for attribute in self._repr_attributes:
value = getattr(self, attribute, None)
attributes.append('%s=%s' % (attribute, value))
attributes = ','.join(attributes)
class_name = self.__class__.__name__
result = '<%s %s>' % (class_name, attributes)
return result
class ResourceManager(object):
def __init__(self, resource, endpoint, cacert=None, debug=False):
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(endpoint, cacert=cacert, debug=debug)
@staticmethod
def handle_error(response):
try:
content = response.json()
fault = content.get('faultstring', '') if content else ''
if fault:
response.reason += '\nMESSAGE: %s' % fault
except Exception as e:
response.reason += ('\nUnable to retrieve detailed message '
'from the HTTP response. %s\n' % str(e))
response.raise_for_status()
@add_auth_token_to_kwargs_from_env
def get_all(self, **kwargs):
# TODO: This is ugly, stop abusing kwargs
url = '/%s' % self.resource.get_url_path_name()
limit = kwargs.pop('limit', None)
pack = kwargs.pop('pack', None)
prefix = kwargs.pop('prefix', None)
user = kwargs.pop('user', None)
params = {}
if limit and limit <= 0:
limit = None
if limit:
params['limit'] = limit
if pack:
params['pack'] = pack
if prefix:
params['prefix'] = prefix
if user:
params['user'] = user
response = self.client.get(url=url, params=params, **kwargs)
if response.status_code != 200:
self.handle_error(response)
return [self.resource.deserialize(item)
for item in response.json()]
@add_auth_token_to_kwargs_from_env
def get_by_id(self, id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), id)
response = self.client.get(url, **kwargs)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
return self.resource.deserialize(response.json())
@add_auth_token_to_kwargs_from_env
def get_property(self, id_, property_name, self_deserialize=True, **kwargs):
"""
Gets a property of a Resource.
id_ : Id of the resource
property_name: Name of the property
self_deserialize: #Implies use the deserialize method implemented by this resource.
"""
token = None
if kwargs:
token = kwargs.pop('token', None)
url = '/%s/%s/%s/?%s' % (self.resource.get_url_path_name(), id_, property_name,
urllib.parse.urlencode(kwargs))
else:
url = '/%s/%s/%s/' % (self.resource.get_url_path_name(), id_, property_name)
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return None
if response.status_code != 200:
self.handle_error(response)
if self_deserialize:
return [self.resource.deserialize(item) for item in response.json()]
else:
return response.json()
@add_auth_token_to_kwargs_from_env
def get_by_ref_or_id(self, ref_or_id, **kwargs):
return self.get_by_id(id=ref_or_id, **kwargs)
@add_auth_token_to_kwargs_from_env
def query(self, **kwargs):
if not kwargs:
raise Exception('Query parameter is not provided.')
if 'limit' in kwargs and kwargs.get('limit') <= 0:
kwargs.pop('limit')
token = kwargs.get('token', None)
params = {}
for k, v in six.iteritems(kwargs):
if k != 'token':
params[k] = v
url = '/%s/?%s' % (self.resource.get_url_path_name(),
urllib.parse.urlencode(params))
response = self.client.get(url, token=token) if token else self.client.get(url)
if response.status_code == 404:
return []
if response.status_code != 200:
self.handle_error(response)
items = response.json()
instances = [self.resource.deserialize(item) for item in items]
return instances
@add_auth_token_to_kwargs_from_env
def get_by_name(self, name_or_id, **kwargs):
instances = self.query(name=name_or_id, **kwargs)
if not instances:
return None
else:
if len(instances) > 1:
raise Exception('More than one %s named "%s" are found.' %
(self.resource.__name__.lower(), name_or_id))
return instances[0]
@add_auth_token_to_kwargs_from_env
def create(self, instance, **kwargs):
url = '/%s' % self.resource.get_url_path_name()
response = self.client.post(url, instance.serialize(), **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
@add_auth_token_to_kwargs_from_env
def update(self, instance, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id)
response = self.client.put(url, instance.serialize(), **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
@add_auth_token_to_kwargs_from_env
def delete(self, instance, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance.id)
response = self.client.delete(url, **kwargs)
if response.status_code not in [200, 204, 404]:
self.handle_error(response)
return False
return True
@add_auth_token_to_kwargs_from_env
def delete_by_id(self, instance_id, **kwargs):
url = '/%s/%s' % (self.resource.get_url_path_name(), instance_id)
response = self.client.delete(url, **kwargs)
if response.status_code not in [200, 204, 404]:
self.handle_error(response)
return False
try:
resp_json = response.json()
if resp_json:
return resp_json
except:
pass
return True
class ActionAliasResourceManager(ResourceManager):
def __init__(self, resource, endpoint, cacert=None, debug=False):
self.resource = resource
self.debug = debug
self.client = httpclient.HTTPClient(root=endpoint, cacert=cacert, debug=debug)
class LiveActionResourceManager(ResourceManager):
@add_auth_token_to_kwargs_from_env
def re_run(self, execution_id, parameters=None, **kwargs):
url = '/%s/%s/re_run' % (self.resource.get_url_path_name(), execution_id)
data = {}
if parameters:
data['parameters'] = parameters
response = self.client.post(url, data, **kwargs)
if response.status_code != 200:
self.handle_error(response)
instance = self.resource.deserialize(response.json())
return instance
class TriggerInstanceResourceManager(ResourceManager):
@add_auth_token_to_kwargs_from_env
def re_emit(self, trigger_instance_id, **kwargs):
url = '/%s/%s/re_emit' % (self.resource.get_url_path_name(), trigger_instance_id)
response = self.client.post(url, None)
if response.status_code != 200:
self.handle_error(response)
return response.json()
|
alfasin/st2
|
st2client/st2client/models/core.py
|
Python
|
apache-2.0
| 11,745
|
import os
from configurations import values
from django.conf import global_settings
class DjangoSettings(object):
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = values.SecretValue()
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
class BaseSettings(DjangoSettings):
pass
|
gotche/django-basic-project
|
project_name/project_name/settings/base.py
|
Python
|
apache-2.0
| 1,387
|
import sys, os, math
import time
import numpy as np
from pandas.io.parsers import read_csv
from sklearn.decomposition import PCA
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn import metrics
import sklearn.svm as svm
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
from sklearn.cross_validation import train_test_split
import matplotlib.pyplot as plt
cut_pt = 1
print ("Reading the file...")
input_res = read_csv(os.path.expanduser("input/train.csv"), nrows=3000) # load pandas dataframe
input_res = input_res.as_matrix()
shape = input_res.shape
number_of_rows = shape[0]
number_of_columns = shape[1]
number_of_fv = number_of_columns - cut_pt
print ("Number of rows: %d (document)" % number_of_rows)
print ("Number of columns: %d (feature vector(preprocessed) + topics class labels(preprocessed))" % number_of_columns)
print ("Number of class_labels: %d" % number_of_fv)
# initialize training x and y's
x = input_res[:,cut_pt:number_of_columns]
y = input_res[:,0:cut_pt].transpose().ravel()
x = x / 255.
data = x[0]
print (data)
print (data.shape[0])
img = data.reshape(28, 28)
img = img.astype(np.float32)
plt.imshow(img, cmap="gray")
plt.show()
|
flyingpoops/kaggle-digit-recognizer-team-learning
|
plot.py
|
Python
|
apache-2.0
| 1,298
|