gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import random
import sys
# transposes pitch to be same pitch within range [lo,hi] inclusive
# if multiple pitch matches exist within range, match closest to given pitch
def transpose_pitch(pitch, lo, hi):
"""transposes given pitch to be same pitch within range [lo,hi] inclusive
if pitch cannot be tranposed into given range, prints error message and exits
Args:
pitch (int): pitch to be transposed
lo (int): lower bound of transpose range
hi (int): higher bound of transpose range
Returns:
int: succesfully transposed pitch
"""
octave = 12
if (hi - lo) < (octave - 1):
print "Transpose Error: hi - lo < 11, make sure range spans a full octave"
sys.exit(1)
while pitch < lo:
pitch += octave
while pitch > hi:
pitch -= octave
return pitch
# transpose fragment (ed, dur) by random number of degrees
def mut1(genotype, start_index, end_index):
"""transpose chromosome fragment by random number of degrees from [-5, 5] inclusive
Args:
genotype ((int, int)[]): list of tuples (pitch, dur) representing genotype of a chromosome
start_index (int): index where fragment begins
end_index (int): index where fragment ends
Returns:
(int, int)[]: returns new genotype with mutated fragment
"""
transpose = random.randint(-5, 5)
for i, (ed, dur) in enumerate(genotype):
if i >= start_index and i <= end_index:
genotype[i] = (transpose_pitch(genotype[i][0] + transpose, 1, 21), genotype[i][1])
return genotype
# permute in time
def mut2(genotype, start_index, end_index):
"""permutes chromosome fragment randomly in time
Args:
genotype ((int, int)[]): list of tuples (pitch, dur) representing genotype of a chromosome
start_index (int): index where fragment begins
end_index (int): index where fragment ends
Returns:
(int, int)[]: returns new genotype with mutated fragment
"""
fragment = genotype[start_index:end_index+1]
random.shuffle(fragment)
for i, (ed, dur) in enumerate(genotype):
if i >= start_index and i <= end_index:
genotype[i] = fragment[i-start_index]
return genotype
# sort into ascending or descending pitch
def mut3(genotype, start_index, end_index):
"""sorts chromosome fragment into either ascending or descending pitch
Args:
genotype ((int, int)[]): list of tuples (pitch, dur) representing genotype of a chromosome
start_index (int): index where fragment begins
end_index (int): index where fragment ends
Returns:
(int, int)[]: returns new genotype with mutated fragment
"""
fragment = genotype[start_index:end_index+1]
# 50% chance ascneding/descending
if random.random() < .5:
fragment.sort(key=lambda x: x[0], reverse=False)
else:
fragment.sort(key=lambda x: x[0], reverse=True)
for i, (ed, dur) in enumerate(genotype):
if i >= start_index and i <= end_index:
genotype[i] = fragment[i-start_index]
return genotype
# reverse in time
def mut4(genotype, start_index, end_index):
"""reverses chromosome fragment in time
Args:
genotype ((int, int)[]): list of tuples (pitch, dur) representing genotype of a chromosome
start_index (int): index where fragment begins
end_index (int): index where fragment ends
Returns:
(int, int)[]: returns new genotype with mutated fragment
"""
fragment = genotype[start_index:end_index+1]
fragment.reverse()
for i, (ed, dur) in enumerate(genotype):
if i >= start_index and i <= end_index:
genotype[i] = fragment[i-start_index]
return genotype
# change few pitches while maintaining same rhythm
def mut5(genotype, start_index, end_index):
"""changes few pitches in chromosome fragment while maintaing the same rhythm
Args:
genotype ((int, int)[]): list of tuples (pitch, dur) representing genotype of a chromosome
start_index (int): index where fragment begins
end_index (int): index where fragment ends
Returns:
(int, int)[]: returns new genotype with mutated fragment
"""
for i, (ed, dur) in enumerate(genotype):
if i >= start_index and i <= end_index:
if random.random() < .3:
genotype[i] = (random.randint(1, 21), genotype[i][1])
return genotype
# one-note mutation which changes pitch of all notes in range note up or down
def mut6(genotype, start_index, end_index):
"""one-note mutation which changes pitch of all notes in fragment up or down by 1
Args:
genotype ((int, int)[]): list of tuples (pitch, dur) representing genotype of a chromosome
start_index (int): index where fragment begins
end_index (int): index where fragment ends
Returns:
(int, int)[]: returns new genotype with mutated fragment
"""
for i, (ed, dur) in enumerate(genotype):
if i >= start_index and i <= end_index:
if random.random() < .5:
genotype[i] = (transpose_pitch(1 + genotype[i][0], 1, 21), genotype[i][1])
else:
genotype[i] = (transpose_pitch(-1 + genotype[i][0], 1, 21), genotype[i][1])
return genotype
# concatenate contiguous rests and identical pitches
def mut7(genotype, start_index, end_index):
"""concatenates contiguous rests and identical pitches in chromosome fragment
Args:
genotype ((int, int)[]): list of tuples (pitch, dur) representing genotype of a chromosome
start_index (int): index where fragment begins
end_index (int): index where fragment ends
Returns:
(int, int)[]: returns new genotype with mutated fragment
"""
last_pitch = -99
last_dur = -1000
new_genotype = []
for i, (ed, dur) in enumerate(genotype):
if i >= start_index and i <= end_index and i > 0:
if ed == last_pitch:
new_genotype.pop()
new_genotype.append((ed, last_dur + dur))
last_pitch = ed
last_dur = last_dur + dur
else:
new_genotype.append((ed, dur))
last_pitch = ed
last_dur = dur
else:
new_genotype.append((ed, dur))
last_pitch = ed
last_dur = dur
return new_genotype
# inserts randomly chosen fragment to a different position
# trims end to make dur consistent
def mut8(genotype, start_index, end_index):
"""inserts randomly chosen fragment to a different position in the same chromosome genotype
Args:
genotype ((int, int)[]): list of tuples (pitch, dur) representing genotype of a chromosome
start_index (int): index where fragment begins
end_index (int): index where fragment ends
Returns:
(int, int)[]: returns new genotype with mutated fragment
"""
old_genotype_dur = sum([d for (_, d) in genotype])
fragment = genotype[start_index:end_index]
new_genotype = []
for elem in reversed(fragment):
genotype.insert(start_index, elem)
# trim end of genotype
total_dur = 0
for i, (ed, dur) in enumerate(genotype):
total_dur += dur
if total_dur >= old_genotype_dur:
new_genotype.append((ed, dur - (total_dur - old_genotype_dur)))
break
else:
new_genotype.append((ed, dur))
if sum([d for (_, d) in new_genotype]) != old_genotype_dur:
print 'mut8 duration error'
return new_genotype
def get_random_start_end(len_genotype):
"""gets a random start and end type for genotype
used to get start and end times for fragments, used in mutations
Args:
len_genotype (int): length of genotype list
Returns:
(int, int): tuple of two integers representing randomly generated (start_index, end_index)
"""
end_index = random.randint(0, len_genotype-1)
start_index = random.randint(0, len_genotype-1)
if start_index > end_index:
temp = end_index
end_index = start_index
random_end_index = temp
return (start_index, end_index)
# mutates in one of many ways locally
# returns new mutated genotype
def local_mutation(chromosome, d, prob_local=.5):
"""mutates chromosome in one of many ways locally
Args:
chromosome ((int, (int, int)[])): Chromosome is a tuple of (fitness, genotype). Genotype is a list of tuples (pitch, dur) representing music to be played
d (int): Length, in durks, representing total length of the full song
prob_local (float, optional): Defaults to .5. Probability that any mutation will happen
Returns:
(int, int)[]: returns new randomly mutated genotype
"""
genotype = chromosome[1]
len_genotype = len(genotype)
if random.random() < prob_local:
(start_index, end_index) = get_random_start_end(len_genotype)
genotype = mut1(genotype, start_index, end_index)
if random.random() < prob_local:
(start_index, end_index) = get_random_start_end(len_genotype)
genotype = mut2(genotype, start_index, end_index)
if random.random() < prob_local: # this mutation seems problematic
(start_index, end_index) = get_random_start_end(len_genotype)
genotype = mut3(genotype, start_index, end_index)
if random.random() < prob_local:
(start_index, end_index) = get_random_start_end(len_genotype)
genotype = mut4(genotype, start_index, end_index)
if random.random() < prob_local:
(start_index, end_index) = get_random_start_end(len_genotype)
genotype = mut5(genotype, start_index, end_index)
if random.random() < prob_local: # one note mutation
(start_index, end_index) = get_random_start_end(len_genotype)
genotype = mut6(genotype, start_index, end_index)
if random.random() < prob_local:
(start_index, end_index) = get_random_start_end(len_genotype)
genotype = mut7(genotype, start_index, end_index)
if random.random() < prob_local:
(start_index, end_index) = get_random_start_end(len_genotype)
genotype = mut8(genotype, start_index, end_index)
return genotype
# returns mutated chromosome
def mutate(chromosome, d, prob_local=.5):
"""given chromosome, returns mutated chromosome
calls local_mutation
Args:
chromosome ((int, (int, int)[])): Chromosome is a tuple of (fitness, genotype). Genotype is a list of tuples (pitch, dur) representing music to be played
d (int): Length, in durks, representing total length of the full song
prob_local (float, optional): Defaults to .5. Probability that any mutation will happen
Returns:
(int, int)[]: a genotype, which is a list of tuples (pitch, dur)
"""
genotype = local_mutation(chromosome, d, prob_local)
return genotype
| |
import os
import subprocess
import re
import codecs
import shutil
import sublime
ST3 = int(sublime.version()) >= 3006
try:
from . import git_helper
from .view_collection import ViewCollection
except (ImportError, ValueError):
import git_helper
from view_collection import ViewCollection
class GitGutterHandler:
git_binary_path_error_shown = False
git_binary_path_fallback = None
def __init__(self, view):
self.load_settings()
self.view = view
self.git_temp_file = ViewCollection.git_tmp_file(self.view)
self.buf_temp_file = ViewCollection.buf_tmp_file(self.view)
self.git_tree = None
self.git_dir = None
self.git_path = None
def _get_view_encoding(self):
# get encoding and clean it for python ex: "Western (ISO 8859-1)"
# NOTE(maelnor): are we need regex here?
pattern = re.compile(r'.+\((.*)\)')
encoding = self.view.encoding()
if encoding == "Undefined":
encoding = self.view.settings().get('default_encoding')
if pattern.match(encoding):
encoding = pattern.sub(r'\1', encoding)
encoding = encoding.replace('with BOM', '')
encoding = encoding.replace('Windows', 'cp')
encoding = encoding.replace('-', '_')
encoding = encoding.replace(' ', '')
# work around with ConvertToUTF8 plugin
origin_encoding = self.view.settings().get('origin_encoding')
return origin_encoding or encoding
def on_disk(self):
# if the view is saved to disk
on_disk = self.view.file_name() is not None
if on_disk:
self.git_tree = self.git_tree or git_helper.git_tree(self.view)
self.git_dir = self.git_dir or git_helper.git_dir(self.git_tree)
self.git_path = self.git_path or git_helper.git_file_path(
self.view, self.git_tree
)
return on_disk
def reset(self):
if self.on_disk() and self.git_path and self.view.window():
self.view.window().run_command('git_gutter')
def get_git_path(self):
return self.git_path
def update_buf_file(self):
chars = self.view.size()
region = sublime.Region(0, chars)
# Try conversion
try:
contents = self.view.substr(
region).encode(self._get_view_encoding())
except UnicodeError:
# Fallback to utf8-encoding
contents = self.view.substr(region).encode('utf-8')
except LookupError:
# May encounter an encoding we don't have a codec for
contents = self.view.substr(region).encode('utf-8')
contents = contents.replace(b'\r\n', b'\n')
contents = contents.replace(b'\r', b'\n')
with open(self.buf_temp_file, 'wb') as f:
if self.view.encoding() == "UTF-8 with BOM":
f.write(codecs.BOM_UTF8)
f.write(contents)
def update_git_file(self):
# the git repo won't change that often
# so we can easily wait 5 seconds
# between updates for performance
if ViewCollection.git_time(self.view) > 5:
with open(self.git_temp_file, 'w'):
pass
args = [
self.git_binary_path,
'--git-dir=' + self.git_dir,
'--work-tree=' + self.git_tree,
'show',
ViewCollection.get_compare(self.view) + ':' + self.git_path,
]
try:
contents = self.run_command(args)
contents = contents.replace(b'\r\n', b'\n')
contents = contents.replace(b'\r', b'\n')
with open(self.git_temp_file, 'wb') as f:
f.write(contents)
ViewCollection.update_git_time(self.view)
except Exception:
pass
def total_lines(self):
chars = self.view.size()
region = sublime.Region(0, chars)
lines = self.view.lines(region)
return len(lines)
# Parse unified diff with 0 lines of context.
# Hunk range info format:
# @@ -3,2 +4,0 @@
# Hunk originally starting at line 3, and occupying 2 lines, now
# starts at line 4, and occupies 0 lines, i.e. it was deleted.
# @@ -9 +10,2 @@
# Hunk size can be omitted, and defaults to one line.
# Dealing with ambiguous hunks:
# "A\nB\n" -> "C\n"
# Was 'A' modified, and 'B' deleted? Or 'B' modified, 'A' deleted?
# Or both deleted? To minimize confusion, let's simply mark the
# hunk as modified.
def process_diff(self, diff_str):
inserted = []
modified = []
deleted = []
hunk_re = '^@@ \-(\d+),?(\d*) \+(\d+),?(\d*) @@'
hunks = re.finditer(hunk_re, diff_str, re.MULTILINE)
for hunk in hunks:
start = int(hunk.group(3))
old_size = int(hunk.group(2) or 1)
new_size = int(hunk.group(4) or 1)
if not old_size:
inserted += range(start, start + new_size)
elif not new_size:
deleted += [start + 1]
else:
modified += range(start, start + new_size)
return (inserted, modified, deleted)
def diff_str(self):
if self.on_disk() and self.git_path:
self.update_git_file()
self.update_buf_file()
args = [
self.git_binary_path, 'diff', '-U0', '--no-color', '--no-index',
self.ignore_whitespace,
self.patience_switch,
self.git_temp_file,
self.buf_temp_file,
]
args = list(filter(None, args)) # Remove empty args
results = self.run_command(args)
encoding = self._get_view_encoding()
try:
decoded_results = results.decode(encoding.replace(' ', ''))
except UnicodeError:
try:
decoded_results = results.decode("utf-8")
except UnicodeDecodeError:
decoded_results = ""
except LookupError:
try:
decoded_results = codecs.decode(results)
except UnicodeDecodeError:
decoded_results = ""
return decoded_results
else:
return ""
def process_diff_line_change(self, diff_str, line_nr):
hunk_re = '^@@ \-(\d+),?(\d*) \+(\d+),?(\d*) @@'
hunks = re.finditer(hunk_re, diff_str, re.MULTILINE)
# we also want to extract the position of the surrounding changes
first_change = prev_change = next_change = None
for hunk in hunks:
start = int(hunk.group(3))
size = int(hunk.group(4) or 1)
if first_change is None:
first_change = start
# special handling to also match the line below deleted
# content
if size == 0 and line_nr == start + 1:
pass
# continue if the hunk is before the line
elif start + size < line_nr:
prev_change = start
continue
# break if the hunk is after the line
elif line_nr < start:
break
# in the following the line is inside the hunk
try:
next_hunk = next(hunks)
hunk_end = next_hunk.start()
next_change = int(next_hunk.group(3))
except:
hunk_end = len(diff_str)
# extract the content of the hunk
hunk_content = diff_str[hunk.start():hunk_end]
# store all deleted lines (starting with -)
lines = [line[1:] for line in hunk_content.split("\n")[1:]
if line.startswith("-")]
# if wrap is disable avoid wrapping
wrap = self.settings.get('next_prev_change_wrap', True)
if not wrap:
if prev_change is None:
prev_change = start
if next_change is None:
next_change = start
# if prev change is None set it to the wrap around the
# document: prev -> last hunk, next -> first hunk
if prev_change is None:
try:
remaining_hunks = list(hunks)
if remaining_hunks:
last_hunk = remaining_hunks[-1]
prev_change = int(last_hunk.group(3))
elif next_change is not None:
prev_change = next_change
else:
prev_change = start
except:
prev_change = start
if next_change is None:
next_change = first_change
meta = {
"first_change": first_change,
"next_change": next_change,
"prev_change": prev_change
}
return lines, start, size, meta
return [], -1, -1, {}
def diff_line_change(self, line):
diff_str = self.diff_str()
return self.process_diff_line_change(diff_str, line)
def diff(self):
diff_str = self.diff_str()
if diff_str:
return self.process_diff(diff_str)
else:
return ([], [], [])
def untracked(self):
return self.handle_files([])
def ignored(self):
return self.handle_files(['-i'])
def handle_files(self, additionnal_args):
if self.on_disk() and self.git_path:
args = [
self.git_binary_path,
'--git-dir=' + self.git_dir,
'--work-tree=' + self.git_tree,
'ls-files', '--other', '--exclude-standard',
] + additionnal_args + [
os.path.join(self.git_tree, self.git_path),
]
args = list(filter(None, args)) # Remove empty args
results = self.run_command(args)
encoding = self._get_view_encoding()
try:
decoded_results = results.decode(encoding.replace(' ', ''))
except UnicodeError:
decoded_results = results.decode("utf-8")
return (decoded_results != "")
else:
return False
def git_commits(self):
args = [
self.git_binary_path,
'--git-dir=' + self.git_dir,
'--work-tree=' + self.git_tree,
'log', '--all',
'--pretty=%s\a%h %an <%aE>\a%ad (%ar)',
'--date=local', '--max-count=9000'
]
results = self.run_command(args)
return results
def git_branches(self):
args = [
self.git_binary_path,
'--git-dir=' + self.git_dir,
'--work-tree=' + self.git_tree,
'for-each-ref',
'--sort=-committerdate',
'--format=%(subject)\a%(refname)\a%(objectname)',
'refs/heads/'
]
results = self.run_command(args)
return results
def git_tags(self):
args = [
self.git_binary_path,
'--git-dir=' + self.git_dir,
'--work-tree=' + self.git_tree,
'show-ref',
'--tags',
'--abbrev=7'
]
results = self.run_command(args)
return results
def git_current_branch(self):
args = [
self.git_binary_path,
'--git-dir=' + self.git_dir,
'--work-tree=' + self.git_tree,
'rev-parse',
'--abbrev-ref',
'HEAD'
]
result = self.run_command(args)
return result
def run_command(self, args):
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
startupinfo=startupinfo, stderr=subprocess.PIPE)
return proc.stdout.read()
def load_settings(self):
self.settings = sublime.load_settings('GitGutter.sublime-settings')
self.user_settings = sublime.load_settings(
'Preferences.sublime-settings')
# Git Binary Setting
git_binary_setting = self.user_settings.get("git_binary") or \
self.settings.get("git_binary")
if isinstance(git_binary_setting, dict):
self.git_binary_path = git_binary_setting.get(sublime.platform())
if not self.git_binary_path:
self.git_binary_path = git_binary_setting.get('default')
else:
self.git_binary_path = git_binary_setting
if self.git_binary_path:
self.git_binary_path = os.path.expandvars(self.git_binary_path)
elif self.git_binary_path_fallback:
self.git_binary_path = self.git_binary_path_fallback
elif ST3:
self.git_binary_path = shutil.which("git")
GitGutterHandler.git_binary_path_fallback = self.git_binary_path
else:
git_exe = "git.exe" if sublime.platform() == "windows" else "git"
for folder in os.environ["PATH"].split(os.pathsep):
path = os.path.join(folder.strip('"'), git_exe)
if os.path.isfile(path) and os.access(path, os.X_OK):
self.git_binary_path = path
GitGutterHandler.git_binary_path_fallback = path
break
if not self.git_binary_path:
if not GitGutterHandler.git_binary_path_error_shown:
GitGutterHandler.git_binary_path_error_shown = True
msg = ("Your Git binary cannot be found. If it is installed, add it "
"to your PATH environment variable, or add a `git_binary` setting "
"in the `User/GitGutter.sublime-settings` file.")
sublime.error_message(msg)
raise ValueError("Git binary not found.")
# Ignore White Space Setting
self.ignore_whitespace = self.settings.get('ignore_whitespace')
if self.ignore_whitespace == 'all':
self.ignore_whitespace = '-w'
elif self.ignore_whitespace == 'eol':
self.ignore_whitespace = '--ignore-space-at-eol'
else:
self.ignore_whitespace = ''
# Patience Setting
self.patience_switch = ''
patience = self.settings.get('patience')
if patience:
self.patience_switch = '--patience'
# Untracked files
self.show_untracked = self.settings.get(
'show_markers_on_untracked_file')
# Show in minimap
self.show_in_minimap = self.user_settings.get('show_in_minimap') or self.settings.get('show_in_minimap')
# Show information in status bar
self.show_status = self.user_settings.get('show_status') or self.settings.get('show_status')
if self.show_status != 'all' and self.show_status != 'none':
self.show_status = 'default'
| |
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
from django.core.validators import RegexValidator
class Personnel(models.Model):
nom = models.CharField(max_length=60, verbose_name="Prenom Nom")
mail = models.CharField(max_length=60, verbose_name="Mail")
def __str__(self):
return self.nom
class Local(models.Model):
nom = models.CharField(max_length=60, verbose_name="Local")
batiment = models.ForeignKey(
'Batiment',
on_delete=models.CASCADE,
)
def __str__(self):
return self.nom
class Batiment(models.Model):
nom = models.CharField(max_length=60, verbose_name="Batiment")
site = models.ForeignKey(
'Site',
on_delete=models.CASCADE,
)
def __str__(self):
return self.nom
class Site(models.Model):
nom = models.CharField(max_length=60, verbose_name="Site")
def __str__(self):
return self.nom
class Article(models.Model):
TYPE_ARTICLE = (
('0','SIFAC P@RC'),
('1','SIFAC'),
('2','UTC P@RC')
)
auteur_modification = models.ForeignKey(
'Personnel',
on_delete=models.CASCADE,
related_name='auteur_modification',
blank=True,
)
gestionnaire = models.ForeignKey(
'Personnel',
on_delete=models.CASCADE,
related_name='gestionnaire',
null=True,
blank=True,
)
utilisateur = models.ForeignKey(
'Personnel',
on_delete=models.CASCADE,
null=True,
blank=True,
)
local = models.ForeignKey(
'Local',
on_delete=models.CASCADE,
blank=True,
null=True,
)
batiment = models.ForeignKey(
'Batiment',
on_delete=models.CASCADE,
blank=True,
null=True,
)
site = models.ForeignKey(
'Site',
on_delete=models.CASCADE,
blank=True,
null=True,
)
type_article = models.CharField(
max_length=20,
choices=TYPE_ARTICLE,
verbose_name="Type",
blank=True,
default=None)
num_immo = models.CharField(
max_length=30,
null=True,
verbose_name="Numero immobilisation",
blank=True,
validators=[
RegexValidator(
regex = '^[0-9]+ / 0{4}$',
message='Doit etre de la forme : xyz... / 0000',
code='invalid_num_immo'
)
]
)
num_inv = models.CharField(
max_length=30,
null=True,
verbose_name="Numero inventaire",
blank=True,
validators=[
RegexValidator(
regex = '^[0-9]+$',
message='Doit etre numerique',
code='invalid_num_inv'
)
]
)
design = models.CharField(
max_length=100,
verbose_name="Designation",
#blank=True
)
design_suite = models.CharField(
max_length=50,
null=True,
verbose_name="Designation suite",
blank=True)
date_gest = models.DateField(
null=True,
verbose_name="Date de gestion",
blank=True)
mise_serv = models.DateField(
verbose_name="Mise en service",
#blank=True
)
last_inv = models.DateField(
verbose_name="Dernier inventaire",
#blank=True
)
sortie_inv = models.DateField(
null=True,
verbose_name="Sortie d'inventaire",
blank=True)
cf = models.CharField(
max_length=30,
verbose_name="Cf",
#blank=True
)
cpt_bud = models.IntegerField(
verbose_name="Cpt Bud.",
#blank=True
)
lib_cpt = models.CharField(
max_length=60,
verbose_name="Libelle Cpt Bud.",
#blank=True
)
eotp = models.CharField(
null=True,
max_length=30,
verbose_name="eOTP",
blank=True)
lib_eotp = models.CharField(
null=True,
max_length=30,
verbose_name="Libelle eOTP",
blank=True)
cle_enc = models.IntegerField(
null=True,
verbose_name="Cle encouragement",
blank=True)
design_enc = models.CharField(
null=True,
max_length=50,
verbose_name="Designation encouragement",
blank=True)
montant_enc = models.FloatField(
null=True,
max_length=50,
verbose_name="Montant encouragement",
blank=True)
num_comm = models.CharField(
max_length=50,
verbose_name="Numero commande",
blank=True,
default="/",
validators=[
RegexValidator(
regex = '[^[0-9]{10}/00010$]|/',
message='Doit etre de la forme : \"XX...XX/00010\" ou \"/\"',
code='invalid_num_com'
)
]
)
fournisseur = models.CharField(
null=True,
max_length=50,
verbose_name="Fournisseur",
blank=True)
qte = models.IntegerField(
verbose_name="Quantite",
#blank=True,
#default=0
)
val_aquis = models.FloatField(
null=True,
max_length=20,
verbose_name="Val. aquis.",
blank=True)
val_compt = models.FloatField(
null=True,
max_length=20,
verbose_name="Val. comptable",
blank=True)
val_res = models.FloatField(
null=True,
max_length=20,
verbose_name="Val. residuelle",
blank=True)
remarque = models.TextField(
null=True,
verbose_name="Remarque",
blank=True)
image = models.URLField(
null=True,
verbose_name="Image",
blank=True,
default="https://image.freepik.com/icones-gratuites/point-d-39-interrogation_318-52837.jpg")
date_modif = models.DateTimeField(
auto_now=True,
blank=True,
verbose_name="Derniere modification")
def __str__(self):
return self.design
def save(self, *args, **kwargs):
# num_inv ou num_immo doit etre non nul
if not self.num_immo and not self.num_inv:
raise ValueError('Un champ parmis num_inv et num_immo doit etre rempli')
# On verifie que les personnes entrees sont deja presetes dans la base
if Personnel.objects.filter(nom = self.auteur_modification).exists():
self.auteur_modification = Personnel.objects.get( nom=self.auteur_modification )
else:
raise ValueError('Le dernier auteur de modification n\' existe pas')
if Personnel.objects.filter(nom = self.gestionnaire).exists():
self.gestionnaire = Personnel.objects.get( nom=self.gestionnaire )
elif self.gestionnaire != None:
raise ValueError('Ce gestionnaire n\'existe pas')
if Personnel.objects.filter(nom = self.utilisateur).exists():
self.utilisateur = Personnel.objects.get( nom=self.utilisateur )
elif self.utilisateur != None:
raise ValueError('Cet utilisateur n\'existe pas')
# Verification des locaux
if Site.objects.filter( nom=self.site ).exists():
self.site = Site.objects.get( nom=self.site )
elif self.site != None:
raise ValueError('Ce site n\'existe pas')
if Batiment.objects.filter( nom=self.batiment, site=self.site ).exists():
self.batiment = Batiment.objects.get( nom=self.batiment, site=self.site )
elif self.batiment != None:
raise ValueError('Ce batiment n\'existe pas')
if Local.objects.filter( nom=self.local, batiment=self.batiment ).exists():
self.local = Local.objects.get( nom=self.local, batiment=self.batiment )
elif self.local != None:
raise ValueError('Ce local n\'existe pas')
item_identique = Article.objects.filter(
type_article = self.type_article,
num_immo = self.num_immo,
num_inv = self.num_inv,
design = self.design,
design_suite = self.design_suite,
date_gest = self.date_gest,
auteur_modification = self.auteur_modification,
mise_serv = self.mise_serv,
last_inv = self.last_inv,
sortie_inv = self.sortie_inv,
cf = self.cf,
cpt_bud = self.cpt_bud,
lib_cpt = self.lib_cpt,
eotp = self.eotp,
lib_eotp = self.lib_eotp,
cle_enc = self.cle_enc,
design_enc = self.design_enc,
montant_enc = self.montant_enc,
num_comm = self.num_comm,
fournisseur = self.fournisseur,
gestionnaire = self.gestionnaire,
utilisateur = self.utilisateur,
site = self.site,
batiment = self.batiment,
local = self.local,
qte = self.qte,
val_aquis = self.val_aquis,
val_compt = self.val_compt,
val_res = self.val_res,
remarque = self.remarque,
)
if item_identique.exists():
item_identique.update(date_modif = timezone.now())
else:
super(Article, self).save(*args, **kwargs)
class AutresRemarques(models.Model):
num_immo_article = models.CharField(
max_length=30,
null=True,
verbose_name="Numero immobilisation",
blank=True,
validators=[
RegexValidator(
regex = '^[0-9]+ / 0{4}$',
message='Doit etre de la forme : xyz... / 0000',
code='invalid_num_immo')])
num_inv_article = models.CharField(
max_length=30,
null=True,
verbose_name="Numero inventaire",
blank=True,
validators=[
RegexValidator(
regex = '^[0-9]+$',
message='Doit etre numerique',
code='invalid_num_inv')])
intitule = models.CharField(null=True,max_length=80, verbose_name="Intitule remarque", blank=True)
valeur = models.CharField(null=True,max_length=80, verbose_name="Valeur remarque", blank=True)
def __str__(self):
return self.intitule
def save(self, *args, **kwargs):
intitule_identique = AutresRemarques.objects.filter(
num_immo_article = self.num_immo_article,
num_inv_article = self.num_inv_article,
intitule = self.intitule,
)
if intitule_identique.exists():
if self.valeur == "":
intitule_identique.delete()
else:
intitule_identique.update(valeur = self.valeur)
elif self.intitule != "":
super(AutresRemarques, self).save(*args, **kwargs)
class PDF(models.Model):
id_article = models.ForeignKey(
'Article',
on_delete=models.CASCADE,
related_name='PDF',
blank=True,
)
lien = models.URLField(null=True, verbose_name="Image", blank=True, default="https://image.freepik.com/icones-gratuites/point-d-39-interrogation_318-52837.jpg")
def __str__(self):
return self.lien
| |
from datetime import timedelta
import operator
from sys import getsizeof
from typing import Any, Optional
import warnings
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import Label
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly, doc
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import _index_shared_docs, maybe_extract_name
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops.common import unpack_zerodim_and_defer
from pandas.io.formats.printing import pprint_thing
_empty_range = range(0)
class RangeIndex(Int64Index):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
name : object, optional
Name to be stored in the index.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_range: range
# check whether self._data has been called
_cached_data: Optional[np.ndarray] = None
# --------------------------------------------------------------------
# Constructors
def __new__(
cls, start=None, stop=None, step=None, dtype=None, copy=False, name=None,
):
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
start = start._range
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(cls, data: range, name=None, dtype=None) -> "RangeIndex":
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Label = None) -> "RangeIndex":
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result.name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self):
""" return the class to use for construction """
return Int64Index
@property
def _data(self):
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cached_data``. This allows us to
check if the array has been created without accessing ``_data`` and
triggering the construction.
"""
if self._cached_data is None:
self._cached_data = np.arange(
self.start, self.stop, self.step, dtype=np.int64
)
return self._cached_data
@cache_readonly
def _int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header, na_rep="NaN", **kwargs):
return header + list(map(pprint_thing, self._range))
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@cache_readonly
def start(self):
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self):
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@cache_readonly
def stop(self):
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self):
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@cache_readonly
def step(self):
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self):
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
@property
def has_duplicates(self) -> bool:
return False
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
@Appender(_index_shared_docs["get_indexer"])
def get_indexer(self, target, method=None, limit=None, tolerance=None):
if com.any_not_none(method, tolerance, limit) or not is_list_like(target):
return super().get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
target_array = np.asarray(target)
if not (is_integer_dtype(target_array) and target_array.ndim == 1):
# checks/conversions/roundings are delegated to general method
return super().get_indexer(target, method=method, tolerance=tolerance)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
def tolist(self):
return list(self._range)
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values=None, name: Label = no_default):
name = self.name if name is no_default else name
if values is None:
result = self._simple_new(self._range, name=name)
result._cache = self._cache.copy()
return result
else:
return Int64Index._simple_new(values, name=name)
@doc(Int64Index.copy)
def copy(self, name=None, deep=False, dtype=None, **kwargs):
self._validate_dtype(dtype)
if name is None:
name = self.name
return self.from_range(self._range, name=name)
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna=True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna=True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See Also
--------
numpy.ndarray.argsort
"""
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
return np.arange(len(self))
else:
return np.arange(len(self) - 1, -1, -1)
def equals(self, other) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Sort the resulting index if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default to ``False`` to match the behaviour
from before 0.24.0.
Returns
-------
intersection : Index
"""
self._validate_sort_keyword(sort)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not isinstance(other, RangeIndex):
return super().intersection(other, sort=sort)
if not len(self) or not len(other):
return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, t = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if not len(other) or self.equals(other) or not len(self):
return super()._union(other, sort=sort)
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (len(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (len(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
@doc(Int64Index.join)
def join(self, other, how="left", level=None, return_indexers=False, sort=False):
if how == "outer" and self is not other:
# note: could return RangeIndex in more circumstances
return self._int64index.join(other, how, level, return_indexers, sort)
return super().join(other, how, level, return_indexers, sort)
def _concat(self, indexes, name):
"""
Overriding parent method for the case of all RangeIndex instances.
When all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Int64Index otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
if not all(isinstance(x, RangeIndex) for x in indexes):
return super()._concat(indexes, name)
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in indexes if len(obj)]
for obj in non_empty_indexes:
rng: range = obj._range
if start is None:
# This is set by the first non-empty index
start = rng.start
if step is None and len(rng) > 1:
step = rng.step
elif step is None:
# First non-empty index had only one element
if rng.start == start:
result = Int64Index(np.concatenate([x._values for x in indexes]))
return result.rename(name)
step = rng.start - start
non_consecutive = (step != rng.step and len(rng) > 1) or (
next_ is not None and rng.start != next_
)
if non_consecutive:
result = Int64Index(np.concatenate([x._values for x in indexes]))
return result.rename(name)
if step is not None:
next_ = rng[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step).rename(name)
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0).rename(name)
def __len__(self) -> int:
"""
return the length of the RangeIndex
"""
return len(self._range)
@property
def size(self) -> int:
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
if isinstance(key, slice):
new_range = self._range[key]
return self._simple_new(new_range, name=self.name)
elif is_integer(key):
new_key = int(key)
try:
return self._range[new_key]
except IndexError as err:
raise IndexError(
f"index {key} is out of bounds for axis 0 with size {len(self)}"
) from err
elif is_scalar(key):
raise IndexError(
"only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices"
)
# fall back to Int64Index
return super().__getitem__(key)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_integer(other) and other != 0:
if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
start = self.start // other
step = self.step // other
stop = start + len(self) * step
new_range = range(start, stop, step or 1)
return self._simple_new(new_range, name=self.name)
if len(self) == 1:
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
return self._int64index // other
def all(self) -> bool:
return 0 not in self._range
def any(self) -> bool:
return any(self._range)
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods, specialized to RangeIndex """
def _make_evaluate_binop(op, step=False):
"""
Parameters
----------
op : callable that accepts 2 params
perform the binary op
step : callable, optional, default to False
op to apply to the step parm if not None
if False, use the existing step
"""
@unpack_zerodim_and_defer(op.__name__)
def _evaluate_numeric_binop(self, other):
if isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
other = extract_array(other, extract_numpy=True)
attrs = self._get_attributes_dict()
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all="ignore"):
rstep = step(left.step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left.step
with np.errstate(all="ignore"):
rstart = op(left.start, right)
rstop = op(left.stop, right)
result = type(self)(rstart, rstop, rstep, **attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in [rstart, rstop, rstep]):
result = result.astype("float64")
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
name = f"__{op.__name__}__"
return compat.set_function_name(_evaluate_numeric_binop, name, cls)
cls.__add__ = _make_evaluate_binop(operator.add)
cls.__radd__ = _make_evaluate_binop(ops.radd)
cls.__sub__ = _make_evaluate_binop(operator.sub)
cls.__rsub__ = _make_evaluate_binop(ops.rsub)
cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul)
cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul)
cls.__truediv__ = _make_evaluate_binop(operator.truediv, step=operator.truediv)
cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv, step=ops.rtruediv)
RangeIndex._add_numeric_methods()
| |
"""
Recurrent layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import copy
import theano
import theano.tensor as TT
# Nicer interface of scan
from theano.sandbox.scan import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
sample_weights_orth, \
init_bias, \
constant_shape
from basic import Layer
class RecurrentMultiLayer(Layer):
"""
Constructs a recurrent layer whose transition from h_tm1 to h_t is given
by an MLP or logistic regression. In our ICLR submission this is a
DT-RNN model.
"""
def __init__(self,
rng,
n_hids=[500,500],
activation = [TT.tanh, TT.tanh],
scale=.01,
sparsity = -1,
activ_noise=0.,
weight_noise=False,
dropout = 1.,
init_fn='sample_weights',
bias_fn='init_bias',
bias_scale = 0.,
grad_scale = 1.,
profile = 0,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: list of ints
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(n_hids) not in (list, tuple):
n_hids = [n_hids]
n_layers = len(n_hids)
if type(scale) not in (list, tuple):
scale = [scale] * n_layers
if type(sparsity) not in (list, tuple):
sparsity = [sparsity] * n_layers
for idx, sp in enumerate(sparsity):
if sp < 0: sparsity[idx] = n_hids[idx]
if type(activation) not in (list, tuple):
activation = [activation] * n_layers
if type(bias_scale) not in (list, tuple):
bias_scale = [bias_scale] * (n_layers-1)
if type(bias_fn) not in (list, tuple):
bias_fn = [bias_fn] * (n_layers-1)
if type(init_fn) not in (list, tuple):
init_fn = [init_fn] * n_layers
for dx in xrange(n_layers):
if dx < n_layers-1:
if type(bias_fn[dx]) is str or type(bias_fn[dx]) is unicode:
bias_fn[dx] = eval(bias_fn[dx])
if type(init_fn[dx]) is str or type(init_fn[dx]) is unicode:
init_fn[dx] = eval(init_fn[dx])
if type(activation[dx]) is str or type(activation[dx]) is unicode:
activation[dx] = eval(activation[dx])
self.scale = scale
self.n_layers = n_layers
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(RecurrentMultiLayer, self).__init__(n_hids[0],
n_hids[-1],
rng,
name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
init_state=None,
use_noise=True,
no_noise_bias=False):
"""
Constructs the computational graph of a single step of the recurrent
layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hss)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
preactiv = TT.dot(state_before, W_hhs[0]) +state_below
h = self.activation[0](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
for dx in xrange(1, self.n_layers):
preactiv = TT.dot(h, W_hhs[dx]) + b_hhs[dx-1]
h = self.activation[dx](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
def fprop(self,
state_below,
mask=None,
init_state=None,
n_steps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False):
"""
Evaluates the forward through a recurrent layer
:type state_below: theano variable
:param state_below: the input of the recurrent layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type init_state: theano variable or None
:param init_state: initial state for the hidden layer
:type n_steps: None or int or theano scalar
:param n_steps: Number of steps the recurrent netowrk does
:type batch_size: int
:param batch_size: the size of the minibatch over which scan runs
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type truncate_gradient: int
:param truncate_gradient: If negative, no truncation is used,
otherwise truncated BPTT is used, where you go backwards only this
amount of steps
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if n_steps is None:
n_steps = state_below.shape[0]
if batch_size and batch_size != 1:
n_steps = n_steps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((n_steps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids[0])
else:
init_state = TT.alloc(floatX(0), self.n_hids[0])
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,None, z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
if self.dropout < 1. and use_noise:
# build dropout mask outside scan
allhid = numpy.sum(self.n_hids)
shape = state_below.shape
if state_below.ndim == 3:
alldpmask = self.trng.binomial(
(n_steps, batch_size, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
else:
alldpmask = self.trng.binomial(
(n_steps, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
inps.append(alldpmask)
if mask:
fn = lambda x,y,z,u : self.step_fprop(x,y,z,u,use_noise=use_noise)
else:
fn = lambda tx, ty, tu: self.step_fprop(tx,None,ty,tu,
use_noise=use_noise)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [None]*(self.n_layers-1) +
[init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = n_steps)
if not isinstance(rval,(list, tuple)):
rval = [rval]
new_h = rval[-1]
self.out = rval[-1]
self.rval = rval
self.updates =updates
return self.out
class RecurrentMultiLayerInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayer, with the exception that the input is
fed into the top layer of the MLP (rather than being an input to the
MLP).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hss)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) + state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPath(RecurrentMultiLayer):
"""
A similar layer to RecurrentMultiLayer (the DT-RNN), with the difference
that we have shortcut connections in the MLP representing the transition
from previous hidden state to the next
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx-1])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPathInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayerShortPath class, just that the input
is fed into the last layer of the MLP (similar to
RecurrentMultiLayerInp).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.restricted_params = [x for x in self.params]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp, self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) +
TT.dot(state_before, W_shp[-1])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval += [h]
return rval
class RecurrentMultiLayerShortPathInpAll(RecurrentMultiLayer):
"""
Similar to RecurrentMultiLayerShortPathInp class, just that the input is
fed to all layers of the MLP depicting the deep transition between h_tm1
to h_t.
"""
def _init_params(self):
self.W_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.params = [x for x in self.W_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
W_shp = self.W_shortp
def slice_state_below(dx, sb = state_below):
st = 0
for p in xrange(dx):
st += self.n_hids[p]
ed = st + self.n_hids[dx]
if sb.ndim == 1:
return sb[st:ed]
else:
return sb[:,st:ed]
h = self.activation[0](TT.dot(state_before, W_hhs[0]) + slice_state_below(0))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h, W_hhs[dx]) +
TT.dot(state_before, W_shp[dx-1]) +
slice_state_below(dx))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentLayer(Layer):
"""
Standard recurrent layer with gates.
See arXiv verion of our paper.
"""
def __init__(self, rng,
n_hids=500,
scale=.01,
sparsity = -1,
activation = TT.tanh,
activ_noise=0.,
weight_noise=False,
bias_fn='init_bias',
bias_scale = 0.,
dropout = 1.,
init_fn='sample_weights',
kind_reg = None,
grad_scale = 1.,
profile = 0,
gating = False,
reseting = False,
gater_activation = TT.nnet.sigmoid,
reseter_activation = TT.nnet.sigmoid,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: int
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type gating: bool
:param gating: If true, an update gate is used
:type reseting: bool
:param reseting: If true, a reset gate is used
:type gater_activation: string or function
:param name: The activation function of the update gate
:type reseter_activation: string or function
:param name: The activation function of the reset gate
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
if type(gater_activation) is str or type(gater_activation) is unicode:
gater_activation = eval(gater_activation)
if type(reseter_activation) is str or type(reseter_activation) is unicode:
reseter_activation = eval(reseter_activation)
self.scale = scale
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
self.gating = gating
self.reseting = reseting
self.gater_activation = gater_activation
self.reseter_activation = reseter_activation
assert rng is not None, "random number generator should not be empty!"
super(RecurrentLayer, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W_%s"%self.name)
self.params = [self.W_hh]
if self.gating:
self.G_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="G_%s"%self.name)
self.params.append(self.G_hh)
if self.reseting:
self.R_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="R_%s"%self.name)
self.params.append(self.R_hh)
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hh = theano.shared(self.W_hh.get_value()*0, name='noise_'+self.W_hh.name)
self.nG_hh = theano.shared(self.G_hh.get_value()*0, name='noise_'+self.G_hh.name)
self.noise_params = [self.nW_hh,self.nG_hh]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask = None,
state_before = None,
gater_below = None,
reseter_below = None,
use_noise=True,
no_noise_bias = False):
"""
Constructs the computational graph of this layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type gater_below: theano variable
:param gater_below: the input to the update gate
:type reseter_below: theano variable
:param reseter_below: the input to the reset gate
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hh = self.W_hh + self.nW_hh
if self.gating:
G_hh = self.G_hh + self.nG_hh
if self.reseting:
R_hh = self.R_hh + self.nR_hh
else:
W_hh = self.W_hh
if self.gating:
G_hh = self.G_hh
if self.reseting:
R_hh = self.R_hh
if self.reseting and reseter_below:
reseter = self.reseter_activation(TT.dot(state_before, R_hh) +
reseter_below)
state_before_ = reseter * state_before
else:
state_before_ = state_before
preactiv = TT.dot(state_before_, W_hh) + state_below
h = self.activation(preactiv)
if self.gating and gater_below:
gater = self.gater_activation(TT.dot(state_before, G_hh) +
gater_below)
h = gater * h + (1-gater) * state_before
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
def fprop(self,
state_below,
mask=None,
init_state=None,
gater_below=None,
reseter_below=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False
):
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids)
else:
init_state = TT.alloc(floatX(0), self.n_hids)
# FIXME: Find a way to clean this up
if self.reseting and reseter_below:
if self.gating and gater_below:
if mask:
inps = [state_below, mask, gater_below, reseter_below]
fn = lambda x,y,g,r,z : self.step_fprop(x,y,z, gater_below=g, reseter_below=r, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, gater_below, reseter_below]
fn = lambda tx, tg,tr, ty: self.step_fprop(tx, None, ty, gater_below=tg,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if mask:
inps = [state_below, mask, reseter_below]
fn = lambda x,y,r,z : self.step_fprop(x,y,z, use_noise=use_noise,
reseter_below=r,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, reseter_below]
fn = lambda tx,tr,ty: self.step_fprop(tx, None, ty,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if self.gating and gater_below:
if mask:
inps = [state_below, mask, gater_below]
fn = lambda x,y,g,z : self.step_fprop(x,y,z, gater_below=g, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, gater_below]
fn = lambda tx, tg, ty: self.step_fprop(tx, None, ty, gater_below=tg,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
new_h = rval
self.out = rval
self.rval = rval
self.updates =updates
return self.out
| |
"""
A Deep Neural Network (multilayer Perceptron) sklearn-style classifier.
Similar to sklearn.neural_network.MLPClassifier, but using TensorFlow.
"""
import logging
import numpy as np
from sklearn.base import ClassifierMixin
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.multiclass import type_of_target
import tensorflow as tf
from tensorflow.python.ops import nn
from muffnn.mlp.base import MLPBaseEstimator
from muffnn.core import affine
_LOGGER = logging.getLogger(__name__)
class MLPClassifier(MLPBaseEstimator, ClassifierMixin):
"""
A deep neural network (multilayer perceptron) classifier using TensorFlow.
Parameters
----------
hidden_units : tuple or list, optional
A list of integers indicating the number of hidden layers and their
sizes.
batch_size : int, optional
The batch size for learning and prediction. If there are fewer
examples than the batch size during fitting, then the the number of
examples will be used instead.
n_epochs : int, optional
The number of epochs (iterations through the training data) when
fitting.
keep_prob : float, optional
The probability of keeping values in dropout. A value of 1.0 means that
dropout will not be used. cf. `TensorFlow documentation
<https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#dropout>`
activation : callable, optional
The activation function. See tensorflow.python.ops.nn. Setting this to
tf.nn.selu will also cause alpha dropout to be used, implementing a
Self-Normalizing Neural Network (Klambauer et al., 2017).
random_state : int, RandomState instance or None, optional
If int, the random number generator seed. If RandomState instance,
the random number generator itself. If None, then `np.random` will be
used.
solver : a subclass of `tf.train.Optimizer`, optional
The solver to use to minimize the loss.
solver_kwargs : dict, optional
Additional keyword arguments to pass to `solver` upon construction.
See the TensorFlow documentation for possible options. Typically,
one would want to set the `learning_rate`.
transform_layer_index : int, optional
The index of the hidden layer to use to transform inputs. If not given,
it defaults to the last hidden layer or output logits in the case that
no hidden layers are used.
Attributes
----------
input_layer_sz_ : int
The dimensionality of the input (i.e., number of features).
is_sparse_ : bool
Whether a model taking sparse input was fit.
classes_ : list
A list of the class labels.
graph_ : tensorflow.python.framework.ops.Graph
The TensorFlow graph for the model
Notes
-----
For multilabel classification, one can pass a 2D int array with 0 or more
1s per row to `fit`.
There is currently no dropout between the sparse input layer and first
hidden layer. Dropout on the sparse input layer would undo the benefits of
sparsity because the dropout layer is dense.
"""
def __init__(self, hidden_units=(256,), batch_size=64, n_epochs=5,
keep_prob=1.0, activation=nn.relu,
random_state=None, solver=tf.train.AdamOptimizer,
solver_kwargs=None, transform_layer_index=None):
self.hidden_units = hidden_units
self.batch_size = batch_size
self.n_epochs = n_epochs
self.keep_prob = keep_prob
self.activation = activation
self.random_state = random_state
self.solver = solver
self.solver_kwargs = solver_kwargs
self.transform_layer_index = transform_layer_index
def _init_model_output(self, t):
if self.multilabel_:
output_size = self.n_classes_
elif self.n_classes_ > 2:
output_size = self.n_classes_
else:
output_size = 1
if self.is_sparse_ and not self.hidden_units:
t = affine(t, output_size, input_size=self.input_layer_sz_,
scope='output_layer', sparse_input=True)
else:
if self.keep_prob != 1.0:
if self.activation is tf.nn.selu:
t = tf.contrib.nn.alpha_dropout(
t, keep_prob=self._keep_prob)
else:
t = tf.nn.dropout(t, keep_prob=self._keep_prob)
t = affine(t, output_size, scope='output_layer')
if self.multilabel_:
self.input_targets_ = \
tf.placeholder(tf.int64, [None, self.n_classes_], "targets")
self.output_layer_ = tf.nn.sigmoid(t)
self._zeros = tf.zeros_like(self.output_layer_)
elif self.n_classes_ > 2:
self.input_targets_ = tf.placeholder(tf.int64, [None], "targets")
self.output_layer_ = tf.nn.softmax(t)
else:
self.input_targets_ = tf.placeholder(tf.int64, [None], "targets")
t = tf.reshape(t, [-1]) # Convert to 1d tensor.
self.output_layer_ = tf.nn.sigmoid(t)
return t
def _init_model_objective_fn(self, t):
def reduce_weighted_mean(loss, weights):
weighted = tf.multiply(loss, weights)
return tf.divide(tf.reduce_sum(weighted),
tf.reduce_sum(weights))
if self.multilabel_:
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
logits=t, labels=tf.cast(self.input_targets_, np.float32))
y_finite = tf.equal(self.input_targets_, -1)
# reshape the weights of shape (batch_size,) to shape
# (batch_size, n_classes_) using ``tile`` to place the
# values from self._sample_weight into each column of the
# resulting matrix.
# This allows us to arrive at the correct divisor in the
# weighted mean calculation by summing the matrix.
sample_weight = tf.reshape(self._sample_weight, (-1, 1))
sample_weight = tf.tile(sample_weight, (1, self.n_classes_))
self._obj_func = reduce_weighted_mean(
tf.where(y_finite, self._zeros, cross_entropy),
tf.where(y_finite, self._zeros, sample_weight))
elif self.n_classes_ > 2:
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=t, labels=self.input_targets_)
self._obj_func = reduce_weighted_mean(
cross_entropy, self._sample_weight)
else:
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(
logits=t, labels=tf.cast(self.input_targets_, np.float32))
self._obj_func = reduce_weighted_mean(
cross_entropy, self._sample_weight)
def partial_fit(self, X, y,
monitor=None, sample_weight=None, classes=None):
"""Fit the model on a batch of training data.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data
y : numpy array of shape [n_samples, n_targets]
Target values
classes : array, shape (n_classes,)
Classes to be used across calls to partial_fit. If not set in the
first call, it will be inferred from the given targets. If
subsequent calls include additional classes, they will fail.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator, and a dictionary with
{'loss': loss_value} representing the loss calculated by the
objective function at this iteration.
If the callable returns True the fitting procedure is stopped.
The monitor can be used for various things such as computing
held-out estimates, early stopping, model introspection,
and snapshoting.
sample_weight : numpy array of shape (n_samples,)
Per-sample weights. Re-scale the loss per sample.
Higher weights force the estimator to put more emphasis
on these samples. Sample weights are normalized per-batch.
Returns
-------
self : returns an instance of self.
Notes
-----
This is based on
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html
"""
return super(MLPClassifier, self).partial_fit(
X, y,
monitor=monitor, sample_weight=sample_weight, classes=classes)
def _is_multilabel(self, y):
"""
Return whether the given target array corresponds to a multilabel
problem.
"""
temp_y = y.copy()
temp_y[np.zeros_like(temp_y, dtype=bool) | (temp_y == -1)] = 1
target_type = type_of_target(temp_y)
if target_type in ['binary', 'multiclass']:
return False
elif target_type == 'multilabel-indicator':
return True
else:
# Raise an error, as in
# sklearn.utils.multiclass.check_classification_targets.
raise ValueError("Unknown label type: %s" % target_type)
def _fit_targets(self, y, classes=None):
self.multilabel_ = self._is_multilabel(y)
# If provided, use classes to fit the encoded and set classes_.
# Otherwise, find the unique classes in y.
if classes is not None:
y = classes
if self.multilabel_:
self._enc = None
self.classes_ = np.arange(y.shape[1])
self.n_classes_ = y.shape[1]
else:
self._enc = LabelEncoder().fit(y)
self.classes_ = self._enc.classes_
self.n_classes_ = len(self.classes_)
def _transform_targets(self, y):
return y if self.multilabel_ else self._enc.transform(y)
def predict_proba(self, X):
"""Predict probabilities for each class.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Examples to make predictions about.
Returns
-------
C : array, shape = (n_samples, n_classes)
Predicted probabilities for each class
"""
y_pred = self._compute_output(X)
if len(y_pred.shape) == 1:
# The TF models returns a 1d array for binary models.
# To conform with sklearn's LogisticRegression, return a 2D array.
y_pred = np.column_stack((1.0 - y_pred, y_pred))
return y_pred
def predict(self, X):
"""Make predictions.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Examples to make predictions about.
Returns
-------
C : array
Predicted values. For multiclass or binary classification, this
returns a 1-d array with the highest scoring (most probable) labels
for each input. For multilabel, it returns a 2-d array with rows of
0/1 indicators, one per label, for each input.
"""
class_probs = self.predict_proba(X)
if self.multilabel_:
return (class_probs >= 0.5).astype(np.int)
else:
indices = class_probs.argmax(axis=1)
return self.classes_[indices]
def __getstate__(self):
state = super(MLPClassifier, self).__getstate__()
# Add the fitted attributes particular to this subclass.
if self._is_fitted:
state['_enc'] = self._enc
state['classes_'] = self.classes_
state['multilabel_'] = self.multilabel_
state['n_classes_'] = self.n_classes_
return state
def score(self, X, y):
accuracy = np.array(y) == self.predict(X)
accuracy = accuracy[np.array(y) != -1].mean()
return accuracy
| |
#!/usr/bin/env python
"""
Copyright (c) 2020 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.eth import GmiiFrame, GmiiSource, GmiiSink
from cocotbext.axi import AxiStreamBus, AxiStreamSource, AxiStreamSink
class TB:
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
self._enable_generator_rx = None
self._enable_generator_tx = None
self._enable_cr_rx = None
self._enable_cr_tx = None
cocotb.start_soon(Clock(dut.rx_clk, 8, units="ns").start())
cocotb.start_soon(Clock(dut.tx_clk, 8, units="ns").start())
self.gmii_source = GmiiSource(dut.gmii_rxd, dut.gmii_rx_er, dut.gmii_rx_dv,
dut.rx_clk, dut.rx_rst, dut.rx_clk_enable, dut.rx_mii_select)
self.gmii_sink = GmiiSink(dut.gmii_txd, dut.gmii_tx_er, dut.gmii_tx_en,
dut.tx_clk, dut.tx_rst, dut.tx_clk_enable, dut.tx_mii_select)
self.axis_source = AxiStreamSource(AxiStreamBus.from_prefix(dut, "tx_axis"), dut.tx_clk, dut.tx_rst)
self.axis_sink = AxiStreamSink(AxiStreamBus.from_prefix(dut, "rx_axis"), dut.rx_clk, dut.rx_rst)
dut.rx_clk_enable.setimmediatevalue(1)
dut.tx_clk_enable.setimmediatevalue(1)
dut.rx_mii_select.setimmediatevalue(0)
dut.tx_mii_select.setimmediatevalue(0)
dut.rx_ptp_ts.setimmediatevalue(0)
dut.tx_ptp_ts.setimmediatevalue(0)
dut.ifg_delay.setimmediatevalue(0)
async def reset(self):
self.dut.rx_rst.setimmediatevalue(0)
self.dut.tx_rst.setimmediatevalue(0)
await RisingEdge(self.dut.tx_clk)
await RisingEdge(self.dut.tx_clk)
self.dut.rx_rst <= 1
self.dut.tx_rst <= 1
await RisingEdge(self.dut.tx_clk)
await RisingEdge(self.dut.tx_clk)
self.dut.rx_rst <= 0
self.dut.tx_rst <= 0
await RisingEdge(self.dut.tx_clk)
await RisingEdge(self.dut.tx_clk)
def set_enable_generator_rx(self, generator=None):
if self._enable_cr_rx is not None:
self._enable_cr_rx.kill()
self._enable_cr_rx = None
self._enable_generator_rx = generator
if self._enable_generator_rx is not None:
self._enable_cr_rx = cocotb.start_soon(self._run_enable_rx())
def set_enable_generator_tx(self, generator=None):
if self._enable_cr_tx is not None:
self._enable_cr_tx.kill()
self._enable_cr_tx = None
self._enable_generator_tx = generator
if self._enable_generator_tx is not None:
self._enable_cr_tx = cocotb.start_soon(self._run_enable_tx())
def clear_enable_generator_rx(self):
self.set_enable_generator_rx(None)
def clear_enable_generator_tx(self):
self.set_enable_generator_tx(None)
async def _run_enable_rx(self):
for val in self._enable_generator_rx:
self.dut.rx_clk_enable <= val
await RisingEdge(self.dut.rx_clk)
async def _run_enable_tx(self):
for val in self._enable_generator_tx:
self.dut.tx_clk_enable <= val
await RisingEdge(self.dut.tx_clk)
async def run_test_rx(dut, payload_lengths=None, payload_data=None, ifg=12, enable_gen=None, mii_sel=False):
tb = TB(dut)
tb.gmii_source.ifg = ifg
tb.dut.ifg_delay <= ifg
tb.dut.rx_mii_select <= mii_sel
tb.dut.tx_mii_select <= mii_sel
if enable_gen is not None:
tb.set_enable_generator_rx(enable_gen())
tb.set_enable_generator_tx(enable_gen())
await tb.reset()
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
test_frame = GmiiFrame.from_payload(test_data)
await tb.gmii_source.send(test_frame)
for test_data in test_frames:
rx_frame = await tb.axis_sink.recv()
assert rx_frame.tdata == test_data
assert rx_frame.tuser == 0
assert tb.axis_sink.empty()
await RisingEdge(dut.rx_clk)
await RisingEdge(dut.rx_clk)
async def run_test_tx(dut, payload_lengths=None, payload_data=None, ifg=12, enable_gen=None, mii_sel=False):
tb = TB(dut)
tb.gmii_source.ifg = ifg
tb.dut.ifg_delay <= ifg
tb.dut.rx_mii_select <= mii_sel
tb.dut.tx_mii_select <= mii_sel
if enable_gen is not None:
tb.set_enable_generator_rx(enable_gen())
tb.set_enable_generator_tx(enable_gen())
await tb.reset()
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
await tb.axis_source.send(test_data)
for test_data in test_frames:
rx_frame = await tb.gmii_sink.recv()
assert rx_frame.get_payload() == test_data
assert rx_frame.check_fcs()
assert rx_frame.error is None
assert tb.gmii_sink.empty()
await RisingEdge(dut.tx_clk)
await RisingEdge(dut.tx_clk)
def size_list():
return list(range(60, 128)) + [512, 1514] + [60]*10
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
def cycle_en():
return itertools.cycle([0, 0, 0, 1])
if cocotb.SIM_NAME:
for test in [run_test_rx, run_test_tx]:
factory = TestFactory(test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("ifg", [12])
factory.add_option("enable_gen", [None, cycle_en])
factory.add_option("mii_sel", [False, True])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
def test_eth_mac_1g(request):
dut = "eth_mac_1g"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "axis_gmii_rx.v"),
os.path.join(rtl_dir, "axis_gmii_tx.v"),
os.path.join(rtl_dir, "lfsr.v"),
]
parameters = {}
parameters['DATA_WIDTH'] = 8
parameters['ENABLE_PADDING'] = 1
parameters['MIN_FRAME_LENGTH'] = 64
parameters['TX_PTP_TS_ENABLE'] = 0
parameters['TX_PTP_TS_WIDTH'] = 96
parameters['TX_PTP_TAG_ENABLE'] = parameters['TX_PTP_TS_ENABLE']
parameters['TX_PTP_TAG_WIDTH'] = 16
parameters['RX_PTP_TS_ENABLE'] = 0
parameters['RX_PTP_TS_WIDTH'] = 96
parameters['TX_USER_WIDTH'] = (parameters['TX_PTP_TAG_WIDTH'] if parameters['TX_PTP_TAG_ENABLE'] else 0) + 1
parameters['RX_USER_WIDTH'] = (parameters['RX_PTP_TS_WIDTH'] if parameters['RX_PTP_TS_ENABLE'] else 0) + 1
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| |
import os
from dateutil.parser import parse as date_parse
from datetime import timezone, datetime
from flask import Flask, request, make_response, render_template, Response, g, jsonify
from flask_cors import CORS, cross_origin
from flask_weasyprint import HTML, render_pdf
from functools import wraps
import mysql.connector
import bcrypt
import config
from util import (encode_json,
parse_range,
make_qr,
dict_dates_to_utc,
SQL_one_line)
from log import log
import udp
log.info ('Start.')
DEBUG = False
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
QR_CODE_PATH = os.path.join(APP_ROOT, 'static', 'img', 'qr')
app = Flask(__name__)
CORS(app)
udp.go()
@app.before_request
def before_request():
try:
g.cnx, g.cursor = get_database()
except Exception:
make_failed_response(code=503, error_message="No database connection could be established.")
@app.teardown_request
def teardown_request(exception):
try:
g.cursor.close()
g.cnx.close()
except AttributeError:
pass
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
# user name may be the user's id or their email
if username.isdigit():
cursor.execute(""" SELECT * FROM user
WHERE id = %s """, (username,))
else:
cursor.execute(""" SELECT * FROM user
WHERE email = %s """, (username,))
rows = cursor.fetchall()
g.user = rows[0]
if not len(rows):
return False
if bcrypt.checkpw(password.encode('ascii'), rows[0]['password'].encode('ascii')):
return True
else:
return False
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
def get_database():
""" returns a connection and cursor
e.g. cnx, cursor = get_database()
"""
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(named_tuple=True)
return cnx, cursor
def make_success_response(data, code=200, mimetype='application/json'):
payload = encode_json({'success': True, 'data': data})
resp = make_response(payload, code)
resp.mimetype = mimetype
return resp
def make_failed_response(error_message, code=400, mimetype='application/json', data=None):
payload = encode_json({'success': False, 'error': error_message, 'data':data})
resp = make_response(payload, code)
resp.mimetype = mimetype
return resp
def test_reservation(start, end, type):
'''
find reservations which collide with the given start and end datetime and match type
'''
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
cursor.execute("""
SELECT * FROM reservation
WHERE DATE_SUB(start_time, INTERVAL safe_zone hour_second) -- start of current reservations
< %s -- end of new reservation
AND end_time -- end of current reservations
>= %s -- start of new reservation
AND type = %s
; """, (start, end, type))
colliding_reservations = cursor.fetchall()
log.info('Executed SQL:' + SQL_one_line(cursor.statement))
# log.info('Colliding reservations ' + str([row['id'] for row in colliding_reservations]))
cursor.close()
cnx.close()
return colliding_reservations
@app.route('/api/v1/log')
def log_endpoint ():
log_lines = []
with open('app.log', 'r') as logfile:
log_lines = logfile.readlines()
log_lines.sort(key=lambda x:x[:25], reverse=True)
return "".join(log_lines)
@app.route('/api/v1/testauth', methods=['POST'])
def testauth():
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
test_user = request.get_json(force=True)
try:
# user name may be the user's id or their email
if test_user['username'].isdigit():
cursor.execute(""" SELECT * FROM user
WHERE id = %s """, (test_user['username'],))
else:
cursor.execute(""" SELECT * FROM user
WHERE email = %s """, (test_user['username'],))
except Exception as e:
return make_failed_response(str(e))
else:
rows = cursor.fetchall()
# user does not exist
if not len(rows):
return make_failed_response("id / email not found")
else:
# if not config.app['password_needed']:
# rows[0].pop('password', None)
# return make_success_response(rows[0])
if not test_user.get('password'):
rows[0].pop('password', None)
return make_success_response(rows[0])
# test password
if bcrypt.checkpw(test_user['password'].encode('ascii'), rows[0]['password'].encode('ascii')):
rows[0].pop('password', None)
return make_success_response(rows[0])
else:
return make_failed_response('incorrect password')
finally:
cursor.close()
cnx.close()
# -----------------------------------------------------------------------------
# Users
# -----------------------------------------------------------------------------
@app.route('/api/v1/user/<int:id>', methods=['GET', 'DELETE'])
def one_user(id):
# get a user
if request.method == "GET":
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
try:
cursor.execute(""" SELECT id, email, fname, lname, type, created_at
FROM user
WHERE id = %s """, (id,))
except Exception as e:
return make_failed_response(str(e))
else:
rows = cursor.fetchall()
if not len(rows):
return make_failed_response("id not found")
else:
# get their loaned devices
cursor.execute(""" SELECT * FROM device
WHERE loaned_by = %s; """,
(id,))
loaned = cursor.fetchall()
# get their privileges
cursor.execute(""" SELECT type FROM device_type_privilage
WHERE user_id = %s; """,
(id,))
privileges = cursor.fetchall()
# get their classes
cursor.execute(""" SELECT class.* FROM class, class_registration
WHERE class_registration.class_id = class.id
AND class_registration.user_id = %s; """,
(id,))
classes = cursor.fetchall()
rows[0]['loaned'] = loaned
rows[0]['privileges'] = privileges
rows[0]['classes'] = classes
return make_success_response(rows[0])
finally:
cursor.close()
cnx.close()
# delete a user
if request.method == "DELETE":
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor()
try:
cursor.execute(""" DELETE FROM user
WHERE id = %s """, (id,))
except Exception as e:
return make_failed_response(str(e))
else:
cnx.commit()
if not cursor.rowcount:
return make_failed_response("id not found")
else:
return make_success_response(dict(id=id))
finally:
cursor.close()
cnx.close()
@app.route("/api/v1/user", methods=['POST', 'GET'])
def user():
# add a new user
if request.method == "POST":
cnx, cursor = get_database()
new_user = request.get_json(force=True)
hashed_password = bcrypt.hashpw(new_user['password'].encode(), bcrypt.gensalt())
try:
cursor.execute(
""" INSERT INTO user (email, fname, lname, type, password)
VALUES (%s, %s, %s, %s, %s); """,
(
new_user["email"],
new_user["fname"],
new_user["lname"],
new_user["type"],
hashed_password
)
)
except Exception as e:
cnx.rollback()
msg = str(e)
if 'email_UNIQUE' in msg:
return make_failed_response("Sorry, that email is taken!")
return make_failed_response(msg)
else:
cnx.commit()
new_id = cursor.lastrowid
data = dict(id=new_id)
make_qr(new_id, QR_CODE_PATH)
return make_success_response(data)
finally:
cursor.close()
cnx.close()
# get all users
if request.method == "GET":
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
try:
cursor.execute(""" SELECT id, email, fname, lname, type, created_at
FROM user """)
except Exception as e:
return make_failed_response(str(e))
else:
return make_success_response(cursor.fetchall())
finally:
cursor.close()
cnx.close()
@app.route('/api/v1/user/search')
def user_search():
criteria = " OR ".join(["{} LIKE '%{}%'".format(key, request.args.get(key)) for key in request.args if request.args.get(key) not in (None, '')])
g.cursor.execute(""" SELECT id, email, fname, lname, type, created_at
FROM user
WHERE {};""".format(criteria))
log.debug(g.cursor.statement)
return make_success_response(g.cursor.fetchall())
@app.route('/api/v1/user/<int:user_id>/privilege/<type>', methods=['PUT', 'DELETE'])
def user_privilege (user_id, type):
cnx, cursor = get_database()
if request.method == 'PUT':
try:
cursor.execute(""" INSERT INTO device_type_privilage (user_id, type)
VALUES (%s, %s); """, (user_id, type))
except Exception as e:
log.error('Attempted SQL: ' + SQL_one_line(cursor.statement))
e = 'User {} already has privilege for device type "{}"'.format(user_id, type)
log.info(e)
return make_failed_response(e)
else:
cnx.commit()
return make_success_response(dict(id=cursor.lastrowid))
finally:
cursor.close()
cnx.close()
if request.method == 'DELETE':
try:
cursor.execute(""" DELETE FROM device_type_privilage
WHERE user_id = %s
AND type = %s; """, (user_id, type))
except Exception as e:
return make_failed_response(str(e))
else:
cnx.commit()
if not cursor.rowcount:
return make_failed_response("ids not found")
else:
log.info('Removed privilege {} for {}'.format(type, user_id))
return make_success_response(dict(user_id=user_id, type=type))
finally:
cursor.close()
cnx.close()
# -----------------------------------------------------------------------------
# Cards
# -----------------------------------------------------------------------------
@app.route('/api/v1/user/card/<user_selection>')
def user_card(user_selection):
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
ids = parse_range(user_selection)
sql_where = ', '.join(str(i) for i in ids)
cursor.execute(""" SELECT id, email, fname, lname, type, created_at FROM user
WHERE id IN ({});""".format(sql_where)) # yes, this is safe
data = cursor.fetchall()
cursor.close()
cnx.close()
return render_template('cards.html', users=data)
@app.route('/api/v1/user/card/<user_selection>/pdf')
def user_card_pdf(user_selection):
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
ids = parse_range(user_selection)
sql_where = ', '.join(str(i) for i in ids)
cursor.execute(""" SELECT id, email, fname, lname, type, created_at FROM user
WHERE id IN ({});""".format(sql_where)) # yes, this is safe
data = cursor.fetchall()
cursor.close()
cnx.close()
# Make a PDF straight from HTML in a string.
html = render_template('cards.html', users=data)
return render_pdf(HTML(string=html))
@app.route('/api/v1/user/generate_qr/<user_selection>')
def generate_qr(user_selection):
ids = parse_range(user_selection)
try:
for i in ids:
make_qr(i, QR_CODE_PATH)
except Exception as e:
return make_failed_response(str(e))
else:
return make_success_response(ids)
# -----------------------------------------------------------------------------
# Devices
# -----------------------------------------------------------------------------
@app.route('/api/v1/device', methods=['POST', 'GET'])
def device ():
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
if request.method == 'GET':
try:
cursor.execute(""" SELECT * FROM device; """)
except Exception as e:
return make_failed_response(str(e))
else:
data = cursor.fetchall()
for row in data:
row['is_active'] = True if row['is_active'] else False
return make_success_response(data)
finally:
cursor.close()
cnx.close()
# add a new device
if request.method == "POST":
cnx, cursor = get_database()
new_device = request.get_json(force=True)
try:
cursor.execute(
""" INSERT INTO device (serial_no, type, is_active)
VALUES (%s, %s, %s); """,
(
new_device["serial_no"],
new_device["type"],
new_device.get("is_active", False),
)
)
except Exception as e:
cnx.rollback()
return make_failed_response(str(e))
else:
cnx.commit()
new_id = cursor.lastrowid
make_qr(new_id, QR_CODE_PATH)
data = dict(id=new_id)
return make_success_response(data)
finally:
cursor.close()
cnx.close()
@app.route('/api/v1/device/<int:id>', methods=['GET', 'DELETE'])
def one_device(id):
# get a device
if request.method == "GET":
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
try:
cursor.execute(""" SELECT * FROM device
WHERE id = %s """, (id,))
except Exception as e:
return make_failed_response(str(e))
else:
rows = cursor.fetchall()
if not len(rows):
return make_failed_response("id not found")
else:
return make_success_response(rows[0])
finally:
cursor.close()
cnx.close()
# delete a device
if request.method == "DELETE":
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor()
try:
cursor.execute(""" DELETE FROM device
WHERE id = %s """, (id,))
except Exception as e:
return make_failed_response(str(e))
else:
cnx.commit()
if not cursor.rowcount:
return make_failed_response("id not found")
else:
return make_success_response(dict(id=id))
finally:
cursor.close()
cnx.close()
@app.route('/api/v1/device/<int:id>/active', methods=['PUT', 'DELETE'])
def device_active (id):
cnx, cursor = get_database()
is_active = True
if request.method == 'DELETE':
is_active = False
cursor.execute(""" UPDATE device SET is_active=%s WHERE id = %s; """,
(is_active, id))
cnx.commit()
cursor.close()
cnx.close()
return ('', 200)
@app.route('/api/v1/device/<int:device_id>/loan/<int:user_id>', methods=['PUT', 'DELETE'])
def loan (device_id, user_id):
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
# attempt to loan
if request.method == 'PUT':
# is user privileged for this device?
cursor.execute(""" SELECT COUNT(device_type_privilage.type) AS count
FROM device, device_type_privilage
WHERE device.type = device_type_privilage.type
AND device_type_privilage.user_id = %s
AND device.type = (SELECT type FROM device WHERE id = %s);""",
(user_id,device_id))
row = cursor.fetchone()
count = row['count']
log.info('[check] [privilege] Executed SQL: ' + SQL_one_line(cursor.statement))
if count == 0:
log.info('[check] [privilege] User {} not privileged to loan device {}'.format(user_id, device_id))
return make_failed_response(error_message = 1)
# is the device loaned by a user?
cursor.execute(""" SELECT COUNT(id) AS count, loaned_by, type
FROM device WHERE id = %s
AND loaned_by IS NOT NULL """, (device_id,))
device = cursor.fetchone()
if device['count']:
log.info('[check] [loan] Device {} already loaned.'.format(device_id))
cursor.execute(""" SELECT user.id, user.email, user.fname,
user.lname, user.type, user.created_at
FROM user
WHERE user.id = %s; """, (device['loaned_by'],))
user = cursor.fetchone()
return make_failed_response(error_message = 2, data = user)
else:
# we now check safety
now = datetime.utcnow().replace(tzinfo=timezone.utc)
cursor.execute("""SELECT type FROM device WHERE id = %s""", (device_id,))
device_type = cursor.fetchone()['type']
log.info('[check] [safety] Device type: {}'.format(device_type))
colliding_reservations = test_reservation(now, now, device_type)
log.info('[check] [safety] Colliding reservations: {}'.format(colliding_reservations))
cursor.execute(""" SELECT COUNT(*) AS count FROM device WHERE type = %s AND is_active = 1""", (device_type,))
total_devices = cursor.fetchone()['count']
total_reserved = sum([int(row['count']) for row in colliding_reservations])
log.info('[check] [safety] total_reserved: ' + str(total_reserved))
log.info('[check] [safety] Total active devices ({}): {}'.format(device_type, total_devices))
remaining = total_devices - total_reserved - 1
log.info ('[check] [safety] Active devices which will be left: ' + str(remaining))
if remaining < 0:
if len(colliding_reservations) == 0: # there are plainly no active unloaned devices left,
# even if there are no reservations
# ideally, this should never happen as an inactive
# and unloaned device wouldn't normally be requested
log.info ('[check] [safety] Failed, remaining = {}'.format(remaining))
return make_failed_response(error_message=3)
# now only allow if student is in one of the classes of any of the colliding reservations
cursor.execute(""" SELECT class_id FROM class_registration WHERE user_id = %s """, (user_id,))
user_classes = set([row['class_id'] for row in cursor.fetchall()])
reservation_classes = set([row['class_id'] for row in colliding_reservations])
common_classes = user_classes.intersection(reservation_classes)
log.info('[check] [safety] user classes: {}'.format(user_classes))
log.info('[check] [safety] reservation classes: {}'.format(reservation_classes))
log.info('[check] [safety] no. of common classes: {}'.format(len(common_classes)))
if len(common_classes) == 0:
log.info('[check] [safety] Failed. common classes = 0')
return make_failed_response(error_message=3, data = colliding_reservations)
log.info('[check] [safety] All checks passed.')
cursor.execute(""" UPDATE device SET loaned_by = %s
WHERE device.id = %s """, (user_id, device_id))
cnx.commit()
cursor.close()
cnx.close()
return make_success_response (data=dict(device_id=device_id, user_id=user_id))
# attempt to return device
if request.method == 'DELETE':
# check same user returning, device exists, or is loaned
cursor.execute(""" SELECT COUNT(id) AS count
FROM device WHERE id = %s
AND loaned_by = %s """, (device_id, user_id))
count = cursor.fetchone()['count']
if count == 0:
return make_failed_response (error_message='invalid user/device')
cursor.execute(""" UPDATE device
SET loaned_by = NULL
WHERE device.id = %s; """, (device_id,))
cnx.commit()
cursor.close()
cnx.close()
return make_success_response (data=dict(device_id=device_id, user_id=user_id))
@app.route('/api/v1/device/type', methods=['GET'])
def device_type ():
cnx, cursor = get_database()
cursor.execute(""" SELECT DISTINCT type FROM device; """)
types = [row.type for row in cursor.fetchall()]
cursor.close()
cnx.close()
log.info(str(types))
return make_success_response(data=types)
@app.route('/api/v1/device/card/<device_selection>/pdf')
def device_cards(device_selection):
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
ids = parse_range(device_selection)
sql_where = ', '.join(str(i) for i in ids)
cursor.execute(""" SELECT id, type, serial_no, type FROM device
WHERE id IN ({});""".format(sql_where))
data = cursor.fetchall()
cursor.close()
cnx.close()
# Make a PDF straight from HTML in a string.
html = render_template('devices.html', devices=data)
return render_pdf(HTML(string=html))
# -----------------------------------------------------------------------------
# Reservation
# -----------------------------------------------------------------------------
@app.route('/api/v1/reservation', methods=['GET', 'POST'])
def reservation ():
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
# get all reservations
if request.method == 'GET':
try:
cursor.execute(""" SELECT * FROM reservation; """)
except Exception as e:
return make_failed_response(str(e))
else:
reservations = cursor.fetchall()
dict_dates_to_utc(reservations)
return make_success_response(reservations)
finally:
cursor.close()
cnx.close()
# add reservation
if request.method == 'POST':
new_reservation = request.get_json(force=True)
start_time = date_parse(new_reservation['start_time']).astimezone(tz=timezone.utc)
end_time = date_parse(new_reservation['end_time']).astimezone(tz=timezone.utc)
# we now check safety
colliding_reservations = test_reservation(start_time, end_time, new_reservation['type'])
cursor.execute(""" SELECT COUNT(*) AS count FROM device WHERE type = %s AND is_active = 1""", (new_reservation['type'],))
total_devices = cursor.fetchone()['count']
total_reserved = sum([int(row['count']) for row in colliding_reservations])
log.info('Total reserved: {}'.format(total_reserved))
log.info('Total active Devices ({}): {}'.format(new_reservation['type'], total_devices))
remaining = total_devices - total_reserved - new_reservation['count']
log.info('Devices which may be left: {}'.format(remaining))
# there is likely not going to be enough devices of this type to go
# around at some point where the new reservation and current ones collide
if remaining < 0:
log.info('Failed. remaining devices will be <= 0')
return make_failed_response(error_message=1, data = colliding_reservations)
try:
cursor.execute(
""" INSERT INTO reservation (start_time,
end_time,
class_id,
type,
count,
user_id,
safe_zone)
VALUES (%s, %s, %s, %s, %s, %s, %s); """,
(
start_time,
end_time,
new_reservation['class_id'],
new_reservation['type'],
new_reservation['count'],
new_reservation['user_id'],
new_reservation.get('safe_zone', '01:00:00')
)
)
except Exception as e:
cnx.rollback()
return make_failed_response(str(e))
else:
log.info('Add success.')
cnx.commit()
new_id = cursor.lastrowid
data = dict(id=new_id)
return make_success_response(data)
finally:
cursor.close()
cnx.close()
@app.route('/api/v1/reservation/<int:id>', methods=['DELETE', 'GET'])
def one_reservation(id):
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
# delete (revoke) a reservation
if request.method == 'DELETE':
try:
cursor.execute(""" DELETE FROM reservation
WHERE id = %s """, (id,))
except Exception as e:
return make_failed_response(str(e))
else:
cnx.commit()
if not cursor.rowcount:
return make_failed_response("id not found")
else:
return make_success_response(dict(id=id))
finally:
cursor.close()
cnx.close()
if request.method == 'GET':
try:
cursor.execute(""" SELECT * FROM reservation
WHERE id = %s """, (id,))
except Exception as e:
return make_failed_response(str(e))
else:
rows = cursor.fetchall()
if not len(rows):
return make_failed_response("id not found")
else:
dict_dates_to_utc(rows)
return make_success_response(rows[0])
finally:
cursor.close()
cnx.close()
# -----------------------------------------------------------------------------
# Classes (Academic)
# -----------------------------------------------------------------------------
@app.route('/api/v1/class', methods=['GET', 'POST'])
def all_class ():
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
# get all classes
if request.method == 'GET':
try:
cursor.execute(""" SELECT * FROM class; """)
except Exception as e:
return make_failed_response(str(e))
else:
data = cursor.fetchall()
return make_success_response(data)
finally:
cursor.close()
cnx.close()
# add a new class
if request.method == "POST":
cnx, cursor = get_database()
new_class = request.get_json(force=True)
try:
cursor.execute(
""" INSERT INTO class (name)
VALUES (%s); """,
(
new_class["name"],
)
)
except Exception as e:
log.error('Attempted SQL: ' + SQL_one_line(cursor.statement))
cnx.rollback()
return make_failed_response(str(e))
else:
cnx.commit()
new_id = cursor.lastrowid
data = dict(id=new_id)
return make_success_response(data)
finally:
cursor.close()
cnx.close()
@app.route('/api/v1/class/<int:id>', methods=['GET', 'DELETE'])
def one_class (id):
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
# get a class
if request.method == "GET":
try:
cursor.execute(""" SELECT * FROM class
WHERE `class`.`id` = %s """, (id,))
classes = cursor.fetchall()
except Exception as e:
return make_failed_response(str(e))
else:
if not len(classes):
return make_failed_response("id not found")
else:
# now we get who is registered for this class
cursor.execute(""" SELECT user.id, user.email, user.fname,
user.lname, user.type, user.created_at
FROM class_registration, user
WHERE class_registration.class_id = %s
AND class_registration.user_id = user.id;""", (id,))
users = cursor.fetchall()
classes[0]['users'] = users
log.info (str(classes))
return make_success_response(classes[0])
finally:
cursor.close()
cnx.close()
# remove a class
if request.method == "DELETE":
cnx, cursor = get_database()
try:
cursor.execute(""" DELETE FROM class
WHERE id = %s """, (id,))
except Exception as e:
msg = 'Failed to delete class {}. There may still be a user part of it.'.format(id, str(e))
log.info(msg)
return make_failed_response(msg)
else:
cnx.commit()
if not cursor.rowcount:
return make_failed_response("id not found")
else:
log.info('Deleted class {}.'.format(id))
return make_success_response(dict(id=id))
finally:
cursor.close()
cnx.close()
@app.route('/api/v1/class/<int:class_id>/user/<int:user_id>', methods=['PUT', 'DELETE'])
def class_register (class_id, user_id):
cnx, cursor = get_database()
# register user
if request.method == 'PUT':
try:
cursor.execute(""" INSERT INTO class_registration (class_id, user_id)
VALUES (%s, %s); """, (class_id,user_id))
except Exception as e:
return make_failed_response(str(e))
else:
cnx.commit()
return make_success_response(dict(id=cursor.lastrowid))
finally:
cursor.close()
cnx.close()
# deregister user
if request.method == 'DELETE':
try:
cursor.execute(""" DELETE FROM class_registration
WHERE class_id = %s
AND user_id = %s; """, (class_id,user_id))
except Exception as e:
return make_failed_response(str(e))
else:
cnx.commit()
if not cursor.rowcount:
return make_failed_response("ids not found")
else:
return make_success_response(dict(class_id=class_id, user_id=user_id))
finally:
cursor.close()
cnx.close()
@app.route('/api/v1/lateness', methods=['GET', 'POST'])
def lateness():
cnx = mysql.connector.connect(**config.db)
cursor = cnx.cursor(dictionary=True)
if request.method == 'POST':
new_lateness = request.get_json(force=True)
d = date_parse(new_lateness['datetime'])
try:
d = d.astimezone(tz=timezone.utc)
except ValueError:
return make_failed_response(str(e))
log.info(str(d))
try:
cursor.execute(""" INSERT INTO lateness (user_id, datetime)
VALUES (%s, %s) """,
(new_lateness['user_id'],
d))
except Exception as e:
cnx.rollback()
return make_failed_response(str(e))
else:
cnx.commit()
new_id = cursor.lastrowid
data = dict(id=new_id)
return make_success_response(data)
finally:
cursor.close()
cnx.close()
if request.method == 'GET':
cursor.execute (""" SELECT * FROM lateness; """)
rows = cursor.fetchall()
dict_dates_to_utc(rows)
cursor.close()
cnx.close()
return make_success_response(data = rows)
# -----------------------------------------------------------------------------
# Config
# -----------------------------------------------------------------------------
@app.route('/api/v1/config')
def app_config():
return jsonify (config.app)
if __name__ == "__main__":
DEBUG = True
app.run(debug=DEBUG, host='0.0.0.0', port=53455)
| |
#!/usr/bin/python2.4
"""Tests for python.util.protobuf.compare."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import textwrap
from tensorflow.python.platform import googletest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.protobuf import compare_test_pb2
import six
from google.protobuf import text_format
def LargePbs(*args):
"""Converts ASCII string Large PBs to messages."""
pbs = []
for arg in args:
pb = compare_test_pb2.Large()
text_format.Merge(arg, pb)
pbs.append(pb)
return pbs
class Proto2CmpTest(googletest.TestCase):
def assertGreater(self, a, b):
"""Asserts that Proto2Cmp says a > b."""
a, b = LargePbs(a, b)
googletest.TestCase.assertGreater(self, compare.Proto2Cmp(a, b), 0)
googletest.TestCase.assertLess(self, compare.Proto2Cmp(b, a), 0)
def assertEquals(self, a, b):
"""Asserts that Proto2Cmp says a == b."""
a, b = LargePbs(a, b)
googletest.TestCase.assertEquals(self, compare.Proto2Cmp(a, b), 0)
def testPrimitives(self):
googletest.TestCase.assertEqual(self, 0, compare.Proto2Cmp('a', 'a'))
googletest.TestCase.assertLess(self, 0, compare.Proto2Cmp('b', 'a'))
pb = compare_test_pb2.Large()
googletest.TestCase.assertEquals(self, cmp('a', pb), compare.Proto2Cmp('a', pb))
googletest.TestCase.assertEqual(self, cmp(pb, 'a'), compare.Proto2Cmp(pb, 'a'))
def testEmpty(self):
self.assertEquals('', '')
def testPrimitiveFields(self):
self.assertGreater('string_: "a"', '')
self.assertEquals('string_: "a"', 'string_: "a"')
self.assertGreater('string_: "b"', 'string_: "a"')
self.assertGreater('string_: "ab"', 'string_: "aa"')
self.assertGreater('int64_: 0', '')
self.assertEquals('int64_: 0', 'int64_: 0')
self.assertGreater('int64_: -1', '')
self.assertGreater('int64_: 1', 'int64_: 0')
self.assertGreater('int64_: 0', 'int64_: -1')
self.assertGreater('float_: 0.0', '')
self.assertEquals('float_: 0.0', 'float_: 0.0')
self.assertGreater('float_: -0.1', '')
self.assertGreater('float_: 3.14', 'float_: 0')
self.assertGreater('float_: 0', 'float_: -0.1')
self.assertEquals('float_: -0.1', 'float_: -0.1')
self.assertGreater('bool_: true', '')
self.assertGreater('bool_: false', '')
self.assertGreater('bool_: true', 'bool_: false')
self.assertEquals('bool_: false', 'bool_: false')
self.assertEquals('bool_: true', 'bool_: true')
self.assertGreater('enum_: A', '')
self.assertGreater('enum_: B', 'enum_: A')
self.assertGreater('enum_: C', 'enum_: B')
self.assertEquals('enum_: C', 'enum_: C')
def testRepeatedPrimitives(self):
self.assertGreater('int64s: 0', '')
self.assertEquals('int64s: 0', 'int64s: 0')
self.assertGreater('int64s: 1', 'int64s: 0')
self.assertGreater('int64s: 0 int64s: 0', '')
self.assertGreater('int64s: 0 int64s: 0', 'int64s: 0')
self.assertGreater('int64s: 1 int64s: 0', 'int64s: 0')
self.assertGreater('int64s: 0 int64s: 1', 'int64s: 0')
self.assertGreater('int64s: 1', 'int64s: 0 int64s: 2')
self.assertGreater('int64s: 2 int64s: 0', 'int64s: 1')
self.assertEquals('int64s: 0 int64s: 0', 'int64s: 0 int64s: 0')
self.assertEquals('int64s: 0 int64s: 1', 'int64s: 0 int64s: 1')
self.assertGreater('int64s: 1 int64s: 0', 'int64s: 0 int64s: 0')
self.assertGreater('int64s: 1 int64s: 0', 'int64s: 0 int64s: 1')
self.assertGreater('int64s: 1 int64s: 0', 'int64s: 0 int64s: 2')
self.assertGreater('int64s: 1 int64s: 1', 'int64s: 1 int64s: 0')
self.assertGreater('int64s: 1 int64s: 1', 'int64s: 1 int64s: 0 int64s: 2')
def testMessage(self):
self.assertGreater('small <>', '')
self.assertEquals('small <>', 'small <>')
self.assertGreater('small < strings: "a" >', '')
self.assertGreater('small < strings: "a" >', 'small <>')
self.assertEquals('small < strings: "a" >', 'small < strings: "a" >')
self.assertGreater('small < strings: "b" >', 'small < strings: "a" >')
self.assertGreater('small < strings: "a" strings: "b" >',
'small < strings: "a" >')
self.assertGreater('string_: "a"', 'small <>')
self.assertGreater('string_: "a"', 'small < strings: "b" >')
self.assertGreater('string_: "a"', 'small < strings: "b" strings: "c" >')
self.assertGreater('string_: "a" small <>', 'small <>')
self.assertGreater('string_: "a" small <>', 'small < strings: "b" >')
self.assertEquals('string_: "a" small <>', 'string_: "a" small <>')
self.assertGreater('string_: "a" small < strings: "a" >',
'string_: "a" small <>')
self.assertEquals('string_: "a" small < strings: "a" >',
'string_: "a" small < strings: "a" >')
self.assertGreater('string_: "a" small < strings: "a" >',
'int64_: 1 small < strings: "a" >')
self.assertGreater('string_: "a" small < strings: "a" >', 'int64_: 1')
self.assertGreater('string_: "a"', 'int64_: 1 small < strings: "a" >')
self.assertGreater('string_: "a" int64_: 0 small < strings: "a" >',
'int64_: 1 small < strings: "a" >')
self.assertGreater('string_: "a" int64_: 1 small < strings: "a" >',
'string_: "a" int64_: 0 small < strings: "a" >')
self.assertEquals('string_: "a" int64_: 0 small < strings: "a" >',
'string_: "a" int64_: 0 small < strings: "a" >')
def testNestedMessage(self):
self.assertGreater('medium <>', '')
self.assertEquals('medium <>', 'medium <>')
self.assertGreater('medium < smalls <> >', 'medium <>')
self.assertEquals('medium < smalls <> >', 'medium < smalls <> >')
self.assertGreater('medium < smalls <> smalls <> >', 'medium < smalls <> >')
self.assertEquals('medium < smalls <> smalls <> >',
'medium < smalls <> smalls <> >')
self.assertGreater('medium < int32s: 0 >', 'medium < smalls <> >')
self.assertGreater('medium < smalls < strings: "a"> >',
'medium < smalls <> >')
def testTagOrder(self):
"""Tests that different fields are ordered by tag number.
For reference, here are the relevant tag numbers from compare_test.proto:
optional string string_ = 1;
optional int64 int64_ = 2;
optional float float_ = 3;
optional Small small = 8;
optional Medium medium = 7;
optional Small small = 8;
"""
self.assertGreater('string_: "a" ',
' int64_: 1 ')
self.assertGreater('string_: "a" int64_: 2 ',
' int64_: 1 ')
self.assertGreater('string_: "b" int64_: 1 ',
'string_: "a" int64_: 2 ')
self.assertEquals( 'string_: "a" int64_: 1 ',
'string_: "a" int64_: 1 ')
self.assertGreater('string_: "a" int64_: 1 float_: 0.0',
'string_: "a" int64_: 1 ')
self.assertEquals( 'string_: "a" int64_: 1 float_: 0.0',
'string_: "a" int64_: 1 float_: 0.0')
self.assertGreater('string_: "a" int64_: 1 float_: 0.1',
'string_: "a" int64_: 1 float_: 0.0')
self.assertGreater('string_: "a" int64_: 2 float_: 0.0',
'string_: "a" int64_: 1 float_: 0.1')
self.assertGreater('string_: "a" ',
' int64_: 1 float_: 0.1')
self.assertGreater('string_: "a" float_: 0.0',
' int64_: 1 ')
self.assertGreater('string_: "b" float_: 0.0',
'string_: "a" int64_: 1 ')
self.assertGreater('string_: "a"',
'small < strings: "a" >')
self.assertGreater('string_: "a" small < strings: "a" >',
'small < strings: "b" >')
self.assertGreater('string_: "a" small < strings: "b" >',
'string_: "a" small < strings: "a" >')
self.assertEquals('string_: "a" small < strings: "a" >',
'string_: "a" small < strings: "a" >')
self.assertGreater('string_: "a" medium <>',
'string_: "a" small < strings: "a" >')
self.assertGreater('string_: "a" medium < smalls <> >',
'string_: "a" small < strings: "a" >')
self.assertGreater('medium <>', 'small < strings: "a" >')
self.assertGreater('medium <> small <>', 'small < strings: "a" >')
self.assertGreater('medium < smalls <> >', 'small < strings: "a" >')
self.assertGreater('medium < smalls < strings: "a" > >',
'small < strings: "b" >')
class NormalizeRepeatedFieldsTest(googletest.TestCase):
def assertNormalizes(self, orig, expected_no_dedupe, expected_dedupe):
"""Checks NormalizeRepeatedFields(orig) against the two expected results."""
orig, expected_no_dedupe, expected_dedupe = LargePbs(
orig, expected_no_dedupe, expected_dedupe)
actual = compare.NormalizeRepeatedFields(copy.deepcopy(orig), dedupe=False)
self.assertEqual(expected_no_dedupe, actual)
actual = compare.NormalizeRepeatedFields(copy.deepcopy(orig), dedupe=True)
self.assertEqual(expected_dedupe, actual)
def testIgnoreNonRepeatedFields(self):
orig = """string_: "a" int64_: 1 float_: 0.1 bool_: true enum_: A
medium: {} small: {}"""
self.assertNormalizes(orig, orig, orig)
def testRepeatedPrimitive(self):
self.assertNormalizes('int64s: 3 int64s: -1 int64s: 2 int64s: -1 int64s: 3',
'int64s: -1 int64s: -1 int64s: 2 int64s: 3 int64s: 3',
'int64s: -1 int64s: 2 int64s: 3')
def testRepeatedMessage(self):
self.assertNormalizes("""medium: { smalls: { strings: "c" }
smalls: { strings: "a" }
smalls: { strings: "b" }
smalls: { strings: "a" }
smalls: { strings: "c" } }
""",
"""medium: { smalls: { strings: "a" }
smalls: { strings: "a" }
smalls: { strings: "b" }
smalls: { strings: "c" }
smalls: { strings: "c" } }
""",
"""medium: { smalls: { strings: "a" }
smalls: { strings: "b" }
smalls: { strings: "c" } }
""")
def testNestedRepeatedGroup(self):
self.assertNormalizes("""medium { GroupA { GroupB { strings: "c" }
GroupB { strings: "a" }
GroupB { strings: "b" }
GroupB { strings: "a" }
GroupB { strings: "c" } } }
""",
"""medium { GroupA { GroupB { strings: "a" }
GroupB { strings: "a" }
GroupB { strings: "b" }
GroupB { strings: "c" }
GroupB { strings: "c" } } }
""",
"""medium { GroupA { GroupB { strings: "a" }
GroupB { strings: "b" }
GroupB { strings: "c" } } }
""")
def testMapNormalizes(self):
self.assertNormalizes(
"""with_map: { value_message: { key: 2, value: { strings: "k2v1",
strings: "k2v2",
strings: "k2v1" } },
value_message: { key: 1, value: { strings: "k1v2",
strings: "k1v1" } } }
""",
"""with_map: { value_message: { key: 1, value: { strings: "k1v1",
strings: "k1v2" } },
value_message: { key: 2, value: { strings: "k2v1",
strings: "k2v1",
strings: "k2v2" } } }
""",
"""with_map: { value_message: { key: 1, value: { strings: "k1v1",
strings: "k1v2" } },
value_message: { key: 2, value: { strings: "k2v1",
strings: "k2v2" } } }
""")
class NormalizeNumbersTest(googletest.TestCase):
"""Tests for NormalizeNumberFields()."""
def testNormalizesInts(self):
pb = compare_test_pb2.Large()
pb.int64_ = 4
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
pb.int64_ = 4
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
pb.int64_ = 9999999999999999
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
def testNormalizesRepeatedInts(self):
pb = compare_test_pb2.Large()
pb.int64s.extend([1, 400, 999999999999999])
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64s[0], six.integer_types))
self.assertTrue(isinstance(pb.int64s[1], six.integer_types))
self.assertTrue(isinstance(pb.int64s[2], six.integer_types))
def testNormalizesFloats(self):
pb1 = compare_test_pb2.Large()
pb1.float_ = 1.2314352351231
pb2 = compare_test_pb2.Large()
pb2.float_ = 1.231435
self.assertNotEqual(pb1.float_, pb2.float_)
compare.NormalizeNumberFields(pb1)
compare.NormalizeNumberFields(pb2)
self.assertEqual(pb1.float_, pb2.float_)
def testNormalizesRepeatedFloats(self):
pb = compare_test_pb2.Large()
pb.medium.floats.extend([0.111111111, 0.111111])
compare.NormalizeNumberFields(pb)
for value in pb.medium.floats:
self.assertAlmostEqual(0.111111, value)
def testNormalizesDoubles(self):
pb1 = compare_test_pb2.Large()
pb1.double_ = 1.2314352351231
pb2 = compare_test_pb2.Large()
pb2.double_ = 1.2314352
self.assertNotEqual(pb1.double_, pb2.double_)
compare.NormalizeNumberFields(pb1)
compare.NormalizeNumberFields(pb2)
self.assertEqual(pb1.double_, pb2.double_)
def testNormalizesMaps(self):
pb = compare_test_pb2.WithMap()
pb.value_message[4].strings.extend(['a', 'b', 'c'])
pb.value_string['d'] = 'e'
compare.NormalizeNumberFields(pb)
class AssertTest(googletest.TestCase):
"""Tests both assertProto2Equal() and assertProto2SameElements()."""
def assertProto2Equal(self, a, b, **kwargs):
if isinstance(a, basestring) and isinstance(b, basestring):
a, b = LargePbs(a, b)
compare.assertProto2Equal(self, a, b, **kwargs)
def assertProto2SameElements(self, a, b, **kwargs):
if isinstance(a, basestring) and isinstance(b, basestring):
a, b = LargePbs(a, b)
compare.assertProto2SameElements(self, a, b, **kwargs)
def assertAll(self, a, **kwargs):
"""Checks that all possible asserts pass."""
self.assertProto2Equal(a, a, **kwargs)
self.assertProto2SameElements(a, a, number_matters=False, **kwargs)
self.assertProto2SameElements(a, a, number_matters=True, **kwargs)
def assertSameNotEqual(self, a, b):
"""Checks that assertProto2SameElements() passes with number_matters=False
and number_matters=True but not assertProto2Equal().
"""
self.assertProto2SameElements(a, b, number_matters=False)
self.assertProto2SameElements(a, b, number_matters=True)
self.assertRaises(AssertionError, self.assertProto2Equal, a, b)
def assertSameExceptNumber(self, a, b):
"""Checks that assertProto2SameElements() passes with number_matters=False
but not number_matters=True or assertProto2Equal().
"""
self.assertProto2SameElements(a, b, number_matters=False)
self.assertRaises(AssertionError, self.assertProto2SameElements, a, b,
number_matters=True)
self.assertRaises(AssertionError, self.assertProto2Equal, a, b)
def assertNone(self, a, b, message, **kwargs):
"""Checks that all possible asserts fail with the given message."""
message = re.escape(textwrap.dedent(message))
self.assertRaisesRegexp(AssertionError, message,
self.assertProto2SameElements, a, b,
number_matters=False, **kwargs)
self.assertRaisesRegexp(AssertionError, message,
self.assertProto2SameElements, a, b,
number_matters=True, **kwargs)
self.assertRaisesRegexp(AssertionError, message,
self.assertProto2Equal, a, b, **kwargs)
def testCheckInitialized(self):
# neither is initialized
a = compare_test_pb2.Labeled()
a.optional = 1
self.assertNone(a, a, 'Initialization errors: ', check_initialized=True)
self.assertAll(a, check_initialized=False)
# a is initialized, b isn't
b = copy.deepcopy(a)
a.required = 2
self.assertNone(a, b, 'Initialization errors: ', check_initialized=True)
self.assertNone(a, b,
"""
- required: 2
optional: 1
""",
check_initialized=False)
# both are initialized
a = compare_test_pb2.Labeled()
a.required = 2
self.assertAll(a, check_initialized=True)
self.assertAll(a, check_initialized=False)
b = copy.deepcopy(a)
b.required = 3
message = """
- required: 2
? ^
+ required: 3
? ^
"""
self.assertNone(a, b, message, check_initialized=True)
self.assertNone(a, b, message, check_initialized=False)
def testAssertEqualWithStringArg(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
compare.assertProto2Equal(
self,
"""
string_: 'abc'
float_: 1.234
""",
pb)
def testAssertSameElementsWithStringArg(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
pb.int64s.extend([7, 3, 5])
compare.assertProto2SameElements(
self,
"""
string_: 'abc'
float_: 1.234
int64s: 3
int64s: 7
int64s: 5
""",
pb)
def testProto2ContainsString(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
pb.small.strings.append('xyz')
compare.assertProto2Contains(
self,
"""
small {
strings: "xyz"
}
""",
pb)
def testProto2ContainsProto(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
pb.small.strings.append('xyz')
pb2 = compare_test_pb2.Large()
pb2.small.strings.append('xyz')
compare.assertProto2Contains(
self, pb2, pb)
def testNormalizesNumbers(self):
pb1 = compare_test_pb2.Large()
pb1.int64_ = 4
pb2 = compare_test_pb2.Large()
pb2.int64_ = 4
compare.assertProto2Equal(self, pb1, pb2)
def testNormalizesFloat(self):
pb1 = compare_test_pb2.Large()
pb1.double_ = 4.0
pb2 = compare_test_pb2.Large()
pb2.double_ = 4
compare.assertProto2Equal(self, pb1, pb2, normalize_numbers=True)
pb1 = compare_test_pb2.Medium()
pb1.floats.extend([4.0, 6.0])
pb2 = compare_test_pb2.Medium()
pb2.floats.extend([6, 4])
compare.assertProto2SameElements(self, pb1, pb2, normalize_numbers=True)
def testPrimitives(self):
self.assertAll('string_: "x"')
self.assertNone('string_: "x"',
'string_: "y"',
"""
- string_: "x"
? ^
+ string_: "y"
? ^
""")
def testRepeatedPrimitives(self):
self.assertAll('int64s: 0 int64s: 1')
self.assertSameNotEqual('int64s: 0 int64s: 1', 'int64s: 1 int64s: 0')
self.assertSameNotEqual('int64s: 0 int64s: 1 int64s: 2',
'int64s: 2 int64s: 1 int64s: 0')
self.assertSameExceptNumber('int64s: 0', 'int64s: 0 int64s: 0')
self.assertSameExceptNumber('int64s: 0 int64s: 1',
'int64s: 1 int64s: 0 int64s: 1')
self.assertNone('int64s: 0',
'int64s: 0 int64s: 2',
"""
int64s: 0
+ int64s: 2
""")
self.assertNone('int64s: 0 int64s: 1',
'int64s: 0 int64s: 2',
"""
int64s: 0
- int64s: 1
? ^
+ int64s: 2
? ^
""")
def testMessage(self):
self.assertAll('medium: {}')
self.assertAll('medium: { smalls: {} }')
self.assertAll('medium: { int32s: 1 smalls: {} }')
self.assertAll('medium: { smalls: { strings: "x" } }')
self.assertAll('medium: { smalls: { strings: "x" } } small: { strings: "y" }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" strings: "y" } }',
'medium: { smalls: { strings: "y" strings: "x" } }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" } smalls: { strings: "y" } }',
'medium: { smalls: { strings: "y" } smalls: { strings: "x" } }')
self.assertSameExceptNumber(
'medium: { smalls: { strings: "x" strings: "y" strings: "x" } }',
'medium: { smalls: { strings: "y" strings: "x" } }')
self.assertSameExceptNumber(
'medium: { smalls: { strings: "x" } int32s: 0 }',
'medium: { int32s: 0 smalls: { strings: "x" } int32s: 0 }')
self.assertNone('medium: {}',
'medium: { smalls: { strings: "x" } }',
"""
medium {
+ smalls {
+ strings: "x"
+ }
}
""")
self.assertNone('medium: { smalls: { strings: "x" } }',
'medium: { smalls: {} }',
"""
medium {
smalls {
- strings: "x"
}
}
""")
self.assertNone('medium: { int32s: 0 }',
'medium: { int32s: 1 }',
"""
medium {
- int32s: 0
? ^
+ int32s: 1
? ^
}
""")
def testMsgPassdown(self):
self.assertRaisesRegexp(AssertionError, 'test message passed down',
self.assertProto2Equal,
'medium: {}',
'medium: { smalls: { strings: "x" } }',
msg='test message passed down')
def testRepeatedMessage(self):
self.assertAll('medium: { smalls: {} smalls: {} }')
self.assertAll('medium: { smalls: { strings: "x" } } medium: {}')
self.assertAll('medium: { smalls: { strings: "x" } } medium: { int32s: 0 }')
self.assertAll('medium: { smalls: {} smalls: { strings: "x" } } small: {}')
self.assertSameNotEqual('medium: { smalls: { strings: "x" } smalls: {} }',
'medium: { smalls: {} smalls: { strings: "x" } }')
self.assertSameExceptNumber('medium: { smalls: {} }',
'medium: { smalls: {} smalls: {} }')
self.assertSameExceptNumber('medium: { smalls: {} smalls: {} } medium: {}',
'medium: {} medium: {} medium: { smalls: {} }')
self.assertSameExceptNumber(
'medium: { smalls: { strings: "x" } smalls: {} }',
'medium: { smalls: {} smalls: { strings: "x" } smalls: {} }')
self.assertNone('medium: {}',
'medium: {} medium { smalls: {} }',
"""
medium {
+ smalls {
+ }
}
""")
self.assertNone('medium: { smalls: {} smalls: { strings: "x" } }',
'medium: { smalls: {} smalls: { strings: "y" } }',
"""
medium {
smalls {
}
smalls {
- strings: "x"
? ^
+ strings: "y"
? ^
}
}
""")
class MixinTests(compare.Proto2Assertions, googletest.TestCase):
def testAssertEqualWithStringArg(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
self.assertProto2Equal(
"""
string_: 'abc'
float_: 1.234
""",
pb)
def testAssertSameElements(self):
a = compare_test_pb2.Large()
a.string_ = 'abc'
a.float_ = 1.234
a.int64s[:] = [4, 3, 2]
b = compare_test_pb2.Large()
b.CopyFrom(a)
b.int64s[:] = [2, 4, 3]
self.assertProto2SameElements(a, b)
if __name__ == '__main__':
googletest.main()
| |
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum, auto
from typing import Any, Iterable, NamedTuple, cast
from lark import Lark, Transformer
from more_itertools import chunked
from pyrsistent import pvector, v
from pyrsistent.typing import PVector
import pytest
from aoc_common import load_puzzle_input
class ItemKind(Enum):
GENERATOR = auto()
MICROCHIP = auto()
def __repr__(self) -> str:
return self.name.lower() # pylint: disable=no-member
def __lt__(self, other: ItemKind) -> bool:
return self is ItemKind.GENERATOR
@dataclass(frozen=True)
class Item:
element: str
kind: ItemKind
def __repr__(self) -> str:
return f"<{self.element} {self.kind!r}>"
@dataclass(frozen=True)
class Floor:
number: int
items: PVector[Item]
def __str__(self) -> str:
return f"Floor {self.number}: {list(self.items)}"
@property
def is_valid(self) -> bool:
"""True when there are no generators or each chip has its generator."""
gen_elements = {i.element for i in self.items if i.kind is ItemKind.GENERATOR}
chip_elements = {i.element for i in self.items if i.kind is ItemKind.MICROCHIP}
return not gen_elements or chip_elements <= gen_elements
@pytest.mark.parametrize(
"floor,expected",
[
# Empty floor is valid.
(Floor(0, v()), True),
# Floors with only one item are valid.
(Floor(0, v(Item(element="a", kind=ItemKind.GENERATOR))), True),
(Floor(0, v(Item(element="a", kind=ItemKind.MICROCHIP))), True),
# A floor with a matching pair is valid.
(
Floor(
0,
v(
Item(element="a", kind=ItemKind.GENERATOR),
Item(element="a", kind=ItemKind.MICROCHIP),
),
),
True,
),
# A floor with a differing pair is invalid.
(
Floor(
0,
v(
Item(element="a", kind=ItemKind.GENERATOR),
Item(element="b", kind=ItemKind.MICROCHIP),
),
),
False,
),
# A floor with a matched pair is valid even
# if there are other generators present.
(
Floor(
0,
v(
Item(element="a", kind=ItemKind.GENERATOR),
Item(element="b", kind=ItemKind.GENERATOR),
Item(element="b", kind=ItemKind.MICROCHIP),
),
),
True,
),
# A floor with an unmatched chip is invalid if there
# are (matched or unmatched) generators present.
(
Floor(
0,
v(
Item(element="a", kind=ItemKind.GENERATOR),
Item(element="a", kind=ItemKind.MICROCHIP),
Item(element="b", kind=ItemKind.MICROCHIP),
),
),
False,
),
(
Floor(
0,
v(
Item(element="a", kind=ItemKind.GENERATOR),
Item(element="b", kind=ItemKind.MICROCHIP),
Item(element="c", kind=ItemKind.MICROCHIP),
),
),
False,
),
# Floors with only one kind of item are valid.
(
Floor(
0,
v(
Item(element="a", kind=ItemKind.MICROCHIP),
Item(element="b", kind=ItemKind.MICROCHIP),
Item(element="c", kind=ItemKind.MICROCHIP),
),
),
True,
),
(
Floor(
0,
v(
Item(element="a", kind=ItemKind.GENERATOR),
Item(element="b", kind=ItemKind.GENERATOR),
Item(element="c", kind=ItemKind.GENERATOR),
),
),
True,
),
],
)
def test_floor_validity(floor: Floor, expected: bool) -> None:
assert floor.is_valid == expected
class _T(Transformer[Floor]):
ELEMENT = str
@staticmethod
def nth(args: tuple[str]) -> int:
idx = ["first", "second", "third", "fourth"].index(args[0])
return idx
@staticmethod
def generator(args: tuple[str]) -> Item:
return Item(args[0], kind=ItemKind.GENERATOR)
@staticmethod
def microchip(args: tuple[str]) -> Item:
return Item(args[0], kind=ItemKind.MICROCHIP)
@staticmethod
def nothing(_args: list[str]) -> None:
return None
@staticmethod
def floor(args: tuple[Any, ...]) -> Floor:
if args[1] is None:
args = (args[0],)
floor = cast(int, args[0])
items = cast(list[Item], args[1:])
return Floor(floor, pvector(items))
with open("aoc_2016_11.lark") as grammar_file:
grammar = grammar_file.read()
_parse = Lark(grammar).parse
_transform = _T().transform
def parse(line: str) -> Floor:
return _transform(_parse(line))
class LocationPair(NamedTuple):
generator: int
microchip: int
@dataclass(frozen=True)
class MinimumState:
elevator_floor: int
pairs: PVector[LocationPair]
@classmethod
def from_iterable(
cls, elevator_floor: int, iterable: Iterable[LocationPair]
) -> MinimumState:
return cls(elevator_floor, pvector(sorted(iterable)))
@dataclass(frozen=True)
class State:
elevator_floor: int
floors: PVector[Floor]
def _group(self) -> Iterable[LocationPair]:
nested = [
[(floor.number, item) for item in floor.items] for floor in self.floors
]
flat = [pair for floor in nested for pair in floor]
s = sorted(flat, key=lambda pair: (pair[1].element, pair[1].kind))
for generator, chip in chunked(s, 2):
gen_floor, _ = generator
chip_floor, _ = chip
yield LocationPair(gen_floor, chip_floor)
@property
def is_valid(self) -> bool:
...
def to_minimum_state(self) -> MinimumState:
return MinimumState.from_iterable(self.elevator_floor, self._group())
if __name__ == "__main__":
lines = load_puzzle_input(day=11).splitlines()
s = State(0, pvector([parse(line) for line in lines]))
s1 = State(0, pvector([parse(line) for line in lines]))
s2 = State(1, pvector([parse(line) for line in lines]))
seen = {s.to_minimum_state()}
print(seen)
seen.add(s1.to_minimum_state())
print(seen)
seen.add(s2.to_minimum_state())
print(seen)
| |
#!/usr/bin/env python
"""
Calculate elastic constants C_ij by least square fitting
using Green-Lagrange deformation tensors, which is the same
scheme as materialsproject.org does.
PREPARE mode creates directories and POSCAR files to be computed.
ANALYZE mode reads the stress values obtained by some code.
Usage:
elasticity.py prepare [options] POSCAR
elasticity.py analyze [options] POSCAR STRSFILE
Options:
-h, --help Shows this message and exit.
--delta1 DELTA1
Maximum strain value of diagonal elements. Available only with prepare. [default: 0.002]
--delta2 DELTA2
Maximum strain value of off-diagonal elements. Available only with prepare. [default: 0.01]
"""
from __future__ import print_function
import os
import numpy as np
from docopt import docopt
from scipy.optimize import curve_fit
import spglib
import yaml
import ase.io
from nappy.napsys import NAPSystem
from nappy.io import read, from_ase
import copy
__author__ = 'Ryo KOBAYASHI'
__version__ = '200521'
__licence__ = 'MIT'
_confname = 'conf.elast.yaml'
#...constants
_prefix = 'elast_'
def quad_func(x,a,b):
return a *x**2 +b
def get_erg(fname):
with open(fname) as f:
l = f.readline()
erg = float(l.split()[0])
return erg
def get_deformations(dlt1max,dlt2max):
dlt1s = [-dlt1max, -dlt1max/2, dlt1max/2, dlt1max]
dlt2s = [-dlt2max, -dlt2max/2, dlt2max/2, dlt2max]
fmats = []
for dlt1 in dlt1s:
for t in range(3):
fmat = np.identity(3,dtype=float)
fmat[t,t] += dlt1
fmats.append(fmat)
#elems = ((1,2),(0,2),(0,1),(2,1),(2,0),(1,0))
elems = ((1,2),(0,2),(0,1))
for dlt2 in dlt2s:
for e in elems:
fmat = np.identity(3,dtype=float)
fmat[e] = dlt2
fmats.append(fmat)
return fmats
def prepare(infname='POSCAR',dlt1max=0.01,dlt2max=0.06):
#...original system
#nsys0 = NAPSystem(fname=infname)
nsys0 = read(infname)
#orig_atoms = read(infname,format='vasp')
orig_atoms = nsys0.to_ase_atoms()
#...get deformations
fmats = get_deformations(dlt1max,dlt2max)
#...deform original system and save to _prefix_##/POSCAR
for i,fmat in enumerate(fmats):
atoms = orig_atoms.copy()
#nsys = copy.deepcopy(nsys0)
dname = _prefix +"{0:02d}".format(i)
os.system('mkdir -p {}'.format(dname))
print(dname)
cell0 = atoms.get_cell()
emat = 0.5 *(np.dot(fmat.T,fmat) -np.identity(3)) +np.identity(3)
cell = np.dot(emat,cell0)
#print(i,emat,cell)
# cell = np.dot(cell0,fmat.T)
atoms.set_cell(cell,scale_atoms=True)
atoms.write(dname+'/POSCAR',format='vasp',vasp5=True,direct=True,
sort=False)
print('prepare done')
print('')
print('After performing VASP or pmd calculations in these directories, '
+'run the following command:')
print(' $ python elasticity.py analyze str.ref')
print('or')
print(' $ python elasticity.py analyze strs.pmd')
print('')
def cdote(strns,*params):
"""
Compute C_ij*e_j to get s_i.
"""
ctnsr = params2ctnsr(params)
strss = np.zeros((len(strns),6),dtype=float)
for i,strn in enumerate(strns):
strs = np.dot(ctnsr,strn)
strss[i] = strs
return strss.flatten()
def func(x,*args):
"""
Objective function to be minimized in least square fitting.
.. math::
L= \sum_s 1/N_s \sum_i 1/6 [ S_i^s -\sum_j C_{ij} e_j^s]^2
Options
-------
x : N-dimensional array
Variable array to be optimized.
args : 2-dimensional array of parameter arrays
args[0] is the array of arrays of strains given by this script.
args[1] is the array of arrays of stresses obtained by the external program.
These arrays should not be flattened.
Returns
-------
val : float
Objective function value (scalar).
"""
ctnsr = params2ctnsr(x)
# print(args)
strns = args[0]
strss0 = args[1]
val = 0.0
n = 0
for i,strn in enumerate(strns):
strs = np.dot(ctnsr,strn)
strs0 = strss0[i]
for j in range(len(strs)):
val += (strs[j]-strs0[j])**2
n += 1
val /= n
print('val = ',val)
return val
def dfunc(x,*args):
"""
Derivative of the objective function to be minimized in least square fitting
with respect to the parameters.
Options
-------
x : N-dimensional float
Variable array to be optimized.
args : 2-dimensional array of parameter arrays
args[0] is the array of arrays of strains given by this script.
args[1] is the array of arrays of stresses obtained by the external program.
These arrays should not be flattened.
Returns
-------
df : N-dimensional float
Derivative of the objective function value.
"""
ctnsr = params2ctnsr(x)
strns = args[0]
strss0 = args[1]
print('ctnsr=',ctnsr)
# residue vector, (S_i -\sum_j C_{ij} e_j)
residue = np.zeros(6,dtype=float)
for i,strn in enumerate(strns):
strs = np.dot(ctnsr,strn)
strs0= strss0[i]
for j in range(len(strs)):
residue[j] += strs0[j] -strs[j]
print('residue = ',residue)
df = np.zeros(len(x),dtype=float)
n = 0
for i in range(6):
for j in range(i,6):
if i == j:
for istrn,strn in enumerate(strns):
df[n] += 2.0*(-strn[j])*residue[i]
else:
for istrn,strn in enumerate(strns):
df[n] += 2.0*(-strn[j])*residue[i] \
+2.0*(-strn[i])*residue[j]
n += 1
return df
def params2ctnsr(params):
"""
Create C_ij tensor from flat params vector assuring symmetry of C_ij.
"""
ctnsr = np.zeros((6,6),dtype=float)
n = 0
for i in range(6):
for j in range(i,6):
ctnsr[i,j] = params[n]
n += 1
for i in range(6-1):
for j in range(i+1,6):
ctnsr[j,i] = ctnsr[i,j]
return ctnsr
def analyze(infname,strsfname,dlt1max=0.01,dlt2max=0.06):
#...original system
#atoms0 = read(infname,format='vasp')
atoms0 = ase.io.read(infname,format='vasp')
#...get deformations
fmats = get_deformations(dlt1max,dlt2max)
strns = []
for i,fmat in enumerate(fmats):
emat = np.zeros((3,3),dtype=float)
emat = 0.5 *(np.dot(fmat.T,fmat) -np.identity(3))
strn = np.zeros(6,dtype=float)
strn[0] = emat[0,0]
strn[1] = emat[1,1]
strn[2] = emat[2,2]
strn[3] = emat[1,2] *2.0
strn[4] = emat[0,2] *2.0
strn[5] = emat[0,1] *2.0
strns.append(strn)
#...get stress values from external calculations
strss = np.zeros((len(fmats),6),dtype=float)
for i in range(len(fmats)):
dname = _prefix +"{0:02d}".format(i)
try:
with open(dname+'/'+strsfname,'r') as f:
data = f.readline().split()
strss[i] = np.array([ float(d) for d in data ])
except Exception as e:
raise
#...parameters 21 elements
params = np.zeros(21,dtype=float)
#...parameters 13 elements
#params = np.zeros(13,dtype=float)
#...fit
strss = strss.flatten()
opt,covar = curve_fit(cdote,strns,strss,p0=params)
ctnsr = params2ctnsr(opt)
# perr = np.sqrt(np.diag(covar))
# print('std dev = ',perr)
print(' delta1_max = {0:8.5f}'.format(dlt1max))
print(' delta2_max = {0:8.5f}'.format(dlt2max))
print('')
print(' C_ij [GPa]:')
for i in range(6):
for j in range(6):
print(' {0:10.3f}'.format(ctnsr[i,j]),end='')
print('')
cij = reduce_cij(atoms0,ctnsr)
print(' C_ij [GPa]:')
for i in range(6):
for j in range(6):
print(' {0:10.3f}'.format(ctnsr[i,j]),end='')
print('')
some_moduli(cij)
return cij
def some_moduli(cij):
sij = np.linalg.inv(cij)
c123 = cij[0,0]+cij[1,1]+cij[2,2]
c231 = cij[0,1]+cij[0,2]+cij[1,2]
c456 = cij[3,3]+cij[4,4]+cij[5,5]
s123 = sij[0,0]+sij[1,1]+sij[2,2]
s231 = sij[0,1]+sij[0,2]+sij[1,2]
s456 = sij[3,3]+sij[4,4]+sij[5,5]
kv = (c123 +2.0*c231)/9
kr = 1.0/(s123 +2.0*(s231))
gv = (c123 -c231 +3.0*c456)/15
gr = 15.0 /(4.0*s123 -4.0*s231 +3.0*s456)
kvrh = (kv+kr)/2
gvrh = (gv+gr)/2
prto2 = (3.0*kvrh -2.0*gvrh)/(6.0*kvrh +2.0*gvrh)
print('')
print(' Bulk modulus (K) = {0:10.3f} GPa'.format(kvrh))
print(' shear modulus (G) = {0:10.3f} GPa'.format(gvrh))
print(' Poisson\'s ratio (nu) = {0:10.3f}'.format(prto2))
print('')
print(' Definition of elastic moduli, see ' \
+'https://materialsproject.org/wiki/index.php/Elasticity_calculations')
txt = """ c123 = c11 +c22 +c33 = {0:10.3f}
c231 = c12 +c13 +c23 = {1:10.3f}
c456 = c44 +c55 +c66 = {2:10.3f}
s123 = s11 +s22 +s33 = {3:10.3f}
s231 = s12 +s13 +s23 = {4:10.3f}
s456 = s44 +s55 +s66 = {5:10.3f}
Kv = (c123 +2*c231)/9 = {6:10.3f}
Kr = 1.0 /(c123 +2*s231) = {7:10.3f}
Gv = (c123 -c231 +3*c456)/15 = {8:10.3f}
Gr = 15.0 /(4.0*s123 -4.0*s231 +3.0*s456) = {9:10.3f}
K = (Kv +Kr)/2
G = (Gv +Gr)/2
nu = (3*K -2*G)/(6*K +2*G)
""".format(c123,c231,c456,s123,s231,s456,kv,kr,gv,gr)
print(txt)
def reduce_cij(atoms0,cij0,eps=1.e-4):
"""
Reduce number of independent Cij according to the crystal system of original cell.
It is not Cij=Cji.
"""
cij = cij0
symdata = spglib.get_symmetry_dataset(atoms0)
#nsys = NAPSystem(ase_atoms=atoms0)
nsys = from_ase(atoms0)
sgnum = symdata['number']
a,b,c = nsys.get_lattice_lengths()
alpha,beta,gamma = nsys.get_lattice_angles()
aeqb = abs(a-b) < eps*min(a,b)
beqc = abs(b-c) < eps*min(b,c)
ceqa = abs(c-a) < eps*min(c,a)
aleqpi2 = abs(alpha-np.pi/2) < eps*np.pi/2
bteqpi2 = abs(beta -np.pi/2) < eps*np.pi/2
gmeqpi2 = abs(gamma-np.pi/2) < eps*np.pi/2
print('Spacegroup number = ',sgnum,' ==> ',end='')
if 0 < sgnum <= 2: # Triclinic
print('Triclinic')
pass
elif sgnum <= 15: # Monoclinic
print('Monoclinic')
pass
elif sgnum <= 74: # Orthorhombic
print('Orthorhombic')
pass
elif sgnum <= 142: # Tetragonal
print('Tetragonal')
pass
elif sgnum <= 194: # Hexagonal
print('Hexagonal')
print('Number of independent C_ij elements are reduced to 6.')
print('C_66 should be 1/2(C_11-C_12) but this is not enforced now.')
if not aleqpi2:
c22 = (cij[1,1] +cij[2,2])/2
c13 = (cij[0,1] +cij[0,2])/2
c55 = (cij[4,4] +cij[5,5])/2
cij[1,1] = cij[2,2] = c22
cij[0,1] = cij[0,2] = c13
cij[4,4] = cij[5,5] = c55
elif not bteqpi2:
c11 = (cij[0,0] +cij[2,2])/2
c12 = (cij[0,1] +cij[1,2])/2
c44 = (cij[3,3] +cij[5,5])/2
cij[0,0] = cij[2,2] = c11
cij[0,1] = cij[1,2] = c12
cij[3,3] = cij[5,5] = c44
elif not gmeqpi2:
c11 = (cij[0,0] +cij[1,1])/2
c12 = (cij[0,2] +cij[1,2])/2
c44 = (cij[3,3] +cij[4,4])/2
cij[0,0] = cij[1,1] = c11
cij[0,2] = cij[1,2] = c12
cij[3,3] = cij[4,4] = c44
elif sgnum <= 230: # Cubic
print('Cubic')
print('Number of independent C_ij elements are reduced to 3.')
c11 = (cij[0,0] +cij[1,1] +cij[2,2])/3
c12 = (cij[0,1] +cij[0,2] +cij[1,2])/3
c44 = (cij[3,3] +cij[4,4] +cij[5,5])/3
cij[0,0] = cij[1,1] = cij[2,2] = c11
cij[0,1] = cij[0,2] = cij[1,2] = c12
cij[3,3] = cij[4,4] = cij[5,5] = c44
else:
raise ValueError('Invalid space group number, ',sgnum)
# Just symmetrize Cij
for i in range(6):
for j in range(i,6):
cij[j,i] = cij[i,j]
return cij
if __name__ == '__main__':
args= docopt(__doc__,version=__version__)
dlt1max = float(args['--delta1'])
dlt2max = float(args['--delta2'])
infname = args['POSCAR']
if args['prepare']:
prepare(infname,dlt1max=dlt1max,dlt2max=dlt2max)
# overwrite conf.elast.yaml file
conf = {'delta1':dlt1max, 'delta2':dlt2max}
with open(_confname,'w') as f:
f.write(yaml.dump(conf,default_flow_style=False))
elif args['analyze']:
try:
with open(_confname,'r') as f:
conf = yaml.safe_load(f)
except Exception as e:
raise
#print('conf=',conf)
dlt1max = conf['delta1']
dlt2max = conf['delta2']
strsfname = args['STRSFILE']
analyze(infname,strsfname,dlt1max=dlt1max,dlt2max=dlt2max)
| |
import m5
from m5.objects import *
import os, optparse, sys
from m5.util import addToPath, fatal
addToPath('../common')
# --------------------
# Define Command Line Options
# ====================
parser = optparse.OptionParser()
parser.add_option("-d", "--detailed", action="store_true")
parser.add_option("-t", "--timing", action="store_true")
parser.add_option("-m", "--maxtick", type="int")
parser.add_option("-n", "--numcpus",
help="Number of cpus in total", type="int")
parser.add_option("-f", "--frequency",
default = "1GHz",
help="Frequency of each CPU")
#parser.add_option("-p", "--protocol",
# default="moesi",
# help="The coherence protocol to use for the L1'a (i.e. MOESI, MOSI)")
parser.add_option("--l1size",
default = "32kB")
#parser.add_option("--l1latency",
# default = 1)
parser.add_option("--l2size",
default = "256kB")
#parser.add_option("--l2latency",
# default = 10)
parser.add_option("--rootdir",
help="Root directory of Splash2",
default="/afs/cs.wisc.edu/u/n/i/nirvedh/private/ece757/benchmarks/parsec-x86/parsec-3.0/ext/splash2")
parser.add_option("-b", "--benchmark",
help="Splash 2 benchmark to run")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
if not options.numcpus:
print "Specify the number of cpus with -n"
sys.exit(1)
# --------------------
# Define Splash2 Benchmarks
# ====================
class Cholesky(LiveProcess):
cwd = options.rootdir + '/kernels/cholesky'
executable = options.rootdir + '/kernels/cholesky/CHOLESKY'
cmd = 'CHOLESKY -p' + str(options.numcpus) + ' '\
+ options.rootdir + '/kernels/cholesky/inputs/tk23.O'
class FFT(LiveProcess):
cwd = options.rootdir + '/kernels/fft'
executable = options.rootdir + '/kernels/fft/FFT'
cmd = 'FFT -p' + str(options.numcpus) + ' -m18'
class LU_contig(LiveProcess):
executable = options.rootdir + '/kernels/lu/contiguous_blocks/LU'
cmd = 'LU -p' + str(options.numcpus)
cwd = options.rootdir + '/kernels/lu/contiguous_blocks'
class LU_noncontig(LiveProcess):
executable = options.rootdir + '/kernels/lu/non_contiguous_blocks/LU'
cmd = 'LU -p' + str(options.numcpus)
cwd = options.rootdir + '/kernels/lu/non_contiguous_blocks'
class Radix(LiveProcess):
executable = options.rootdir + '/kernels/radix/RADIX'
cmd = 'RADIX -n524288 -p' + str(options.numcpus)
cwd = options.rootdir + '/kernels/radix'
class Barnes(LiveProcess):
executable = options.rootdir + '/apps/barnes/BARNES'
cmd = 'BARNES'
input = options.rootdir + '/apps/barnes/input.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/barnes'
class FMM(LiveProcess):
executable = options.rootdir + '/apps/fmm/FMM'
cmd = 'FMM'
if str(options.numcpus) == '1':
input = options.rootdir + '/apps/fmm/inputs/input.2048'
else:
input = options.rootdir + '/apps/fmm/inputs/input.2048.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/fmm'
class Ocean_contig(LiveProcess):
executable = options.rootdir + '/apps/ocean/contiguous_partitions/OCEAN'
cmd = 'OCEAN -p' + str(options.numcpus)
cwd = options.rootdir + '/apps/ocean/contiguous_partitions'
class Ocean_noncontig(LiveProcess):
executable = options.rootdir + '/apps/ocean/non_contiguous_partitions/OCEAN'
cmd = 'OCEAN -p' + str(options.numcpus)
cwd = options.rootdir + '/apps/ocean/non_contiguous_partitions'
class Raytrace(LiveProcess):
executable = options.rootdir + '/apps/raytrace/RAYTRACE'
cmd = 'RAYTRACE -p' + str(options.numcpus) + ' ' \
+ options.rootdir + '/apps/raytrace/inputs/teapot.env'
cwd = options.rootdir + '/apps/raytrace'
class Water_nsquared(LiveProcess):
executable = options.rootdir + '/apps/water-nsquared/WATER-NSQUARED'
cmd = 'WATER-NSQUARED'
if options.numcpus==1:
input = options.rootdir + '/apps/water-nsquared/input'
else:
input = options.rootdir + '/apps/water-nsquared/input.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/water-nsquared'
class Water_spatial(LiveProcess):
executable = options.rootdir + '/apps/water-spatial/WATER-SPATIAL'
cmd = 'WATER-SPATIAL'
if options.numcpus==1:
input = options.rootdir + '/apps/water-spatial/input'
else:
input = options.rootdir + '/apps/water-spatial/input.p' + str(options.numcpus)
cwd = options.rootdir + '/apps/water-spatial'
# --------------------
# Base L1 Cache Definition
# ====================
class L1(BaseCache):
# latency = options.l1latency
# block_size = 64
mshrs = 12
tgts_per_mshr = 8
#protocol = CoherenceProtocol(protocol=options.protocol)
# ----------------------
# Base L2 Cache Definition
# ----------------------
class L2(BaseCache):
# block_size = 64
# latency = options.l2latency
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
# ----------------------
# Define the cpus
# ----------------------
busFrequency = Frequency(options.frequency)
if options.timing:
cpus = [TimingSimpleCPU(cpu_id = i)
for i in xrange(options.numcpus)]
elif options.detailed:
cpus = [DerivO3CPU(cpu_id = i)
for i in xrange(options.numcpus)]
else:
cpus = [AtomicSimpleCPU(cpu_id = i)
for i in xrange(options.numcpus)]
# ----------------------
# Create a system, and add system wide objects
# ----------------------
system = System(cpu = cpus, physmem = PhysicalMemory(),
membus = Bus(clock = busFrequency))
system.toL2bus = Bus(clock = busFrequency)
system.l2 = L2(size = options.l2size, assoc = 8)
# ----------------------
# Connect the L2 cache and memory together
# ----------------------
system.physmem.port = system.membus.port
system.l2.cpu_side = system.toL2bus.port
system.l2.mem_side = system.membus.port
# ----------------------
# Connect the L2 cache and clusters together
# ----------------------
for cpu in cpus:
cpu.addPrivateSplitL1Caches(L1(size = options.l1size, assoc = 1),
L1(size = options.l1size, assoc = 4))
cpu.mem = cpu.dcache
# connect cpu level-1 caches to shared level-2 cache
cpu.connectMemPorts(system.toL2bus)
# ----------------------
# Define the root
# ----------------------
root = Root(system = system)
# --------------------
# Pick the correct Splash2 Benchmarks
# ====================
if options.benchmark == 'Cholesky':
root.workload = Cholesky()
elif options.benchmark == 'FFT':
root.workload = FFT()
elif options.benchmark == 'LUContig':
root.workload = LU_contig()
elif options.benchmark == 'LUNoncontig':
root.workload = LU_noncontig()
elif options.benchmark == 'Radix':
root.workload = Radix()
elif options.benchmark == 'Barnes':
root.workload = Barnes()
elif options.benchmark == 'FMM':
root.workload = FMM()
elif options.benchmark == 'OceanContig':
root.workload = Ocean_contig()
elif options.benchmark == 'OceanNoncontig':
root.workload = Ocean_noncontig()
elif options.benchmark == 'Raytrace':
root.workload = Raytrace()
elif options.benchmark == 'WaterNSquared':
root.workload = Water_nsquared()
elif options.benchmark == 'WaterSpatial':
root.workload = Water_spatial()
else:
panic("The --benchmark environment variable was set to something" \
+" improper.\nUse Cholesky, FFT, LUContig, LUNoncontig, Radix" \
+", Barnes, FMM, OceanContig,\nOceanNoncontig, Raytrace," \
+" WaterNSquared, or WaterSpatial\n")
# --------------------
# Assign the workload to the cpus
# ====================
for cpu in cpus:
cpu.workload = root.workload
# ----------------------
# Run the simulation
# ----------------------
if options.timing or options.detailed:
root.system.mem_mode = 'timing'
# instantiate configuration
m5.instantiate(root)
# simulate until program terminates
if options.maxtick:
exit_event = m5.simulate(options.maxtick)
else:
exit_event = m5.simulate(m5.MaxTick)
print 'Exiting <at> tick', m5.curTick(), 'because', exit_event.getCause()
| |
#!/usr/bin/env python
#title :dbif.py
#description :Top level file for db module in the SITAPT package
#author :aarora79
#date :20151003
#version :0.1
#usage :python dbif.py
#notes :
#python_version :2.7.10
#==============================================================================
import os
import sys
import argparse
import pkg_resources # part of setuptools
import wget
from bs4 import BeautifulSoup
import urlparse
from urlparse import urljoin
import shutil
import pickle
import pprint
import gzip
from pymongo import MongoClient
from bson.son import SON
import pandas as pd
#import submodules
from globals import globals
from utils import sa_logger
#global varialbes for this file
MONGODB_HOSTNAME = 'localhost'
MONGODB_PORT_NUMBER = 27017
REMOVE_IF_EXISTS = 'remove-if-exists'
DO_NOTHING_IF_EXISTS = 'do-nothing-if-collection-exists'
DBIF_OK = 0
DBIF_ERROR = -1
DBIF_NAME = globals.PACKAGE_NAME
logger = sa_logger.init(globals.PACKAGE_NAME)
db_parms = {'initted': False }
def db_add_record_to_collection(db_name, collection_name, rec):
status = DBIF_OK
error_text = ''
try:
db_parms['client'][db_name][collection_name].insert_one(rec)
except Exception,e:
error_text = str(e)
logger.error('Exception while inserting record ' + str(e))
status = DBIF_ERROR
return status, error_text
def db_add_records_to_collection(db_name, collection_name, records):
status = DBIF_OK
error_text = ''
try:
db_parms['client'][db_name][collection_name].insert(records)
except Exception,e:
error_text = str(e)
logger.error('Exception while inserting records ')
status = DBIF_ERROR
return status, error_text
def db_create_collection(db_name, collection_name, behavior_if_exists = REMOVE_IF_EXISTS):
status = DBIF_OK
error_text = ''
if db_parms['initted'] == False:
error_text = 'DB not yet initted, cannot create collection ' + collection_name
logger.error(error_text)
return DBIF_ERROR, error_text
try:
if behavior_if_exists == REMOVE_IF_EXISTS:
#check if collection exists
exists = collection_name in db_parms['client'][db_name].collection_names()
if exists:
logger.info('dropped collection ' + collection_name + ', creating new one by the same name')
db_parms['client'][db_name][collection_name].drop()
else:
logger.info('skip collection name check since behavior_if_exists is' + behavior_if_exists)
#now create new collection
collection = db_parms['client'][db_name][collection_name]
except Exception, e:
error_text = str(e)
logger.error('Exception while creating collection: ' + collection_name + ' : ' + str(e))
status = DBIF_ERROR, error_text
if status == DBIF_OK:
logger.info('created collection ' + collection_name)
return status, error_text
def db_init():
status = DBIF_OK
error_text = ''
logger.info('begin with DB initialization..')
try:
client = MongoClient(MONGODB_HOSTNAME, MONGODB_PORT_NUMBER)
logger.info('Mongo client created')
db_parms['client'] = client
db_parms['initted'] = True
logger.info('successfully opened connection to DB..')
except Exception, e:
logger.error('Exception occured while db_init: ' + str(e))
status = DBIF_ERROR
return status, error_text
def db_get_collection_names(db_name, name_filter = None):
status = DBIF_OK
error_text = ''
#chek if db initted correctly
if db_parms['initted'] == False:
error_text = 'DB not yet initted, cannot query collection ' + collection_name
logger.error(error_text)
return DBIF_ERROR, error_text
if name_filter is None:
name_list = db_parms['client'][db_name].collection_names()
else:
name_list = [coll_name for coll_name in db_parms['client'][db_name].collection_names() if name_filter in coll_name]
return name_list
def db_collection_find_records(db_name, collection_name, filter_query = None):
status = DBIF_OK
error_text = ''
records_cursor = ''
#chek if db initted correctly
if db_parms['initted'] == False:
error_text = 'DB not yet initted, cannot query collection ' + collection_name
logger.error(error_text)
return DBIF_ERROR, error_text, records_cursor
#check if collection exists
if collection_name not in db_parms['client'][db_name].collection_names():
error_text = 'Collection ' + collection_name + ' does not exist in db, exiting..'
logger.error(error_text)
return DBIF_ERROR, error_text, records_cursor
#read to query
collection_to_query = db_parms['client'][db_name][collection_name]
#find records
records_cursor = collection_to_query.find(filter_query)
num_records = records_cursor.count()
logger.info('found ' + str(num_records) + 'records in the collection that match the search criteria')
return status, error_text, records_cursor
def db_get_collection_count(db_name, coll_name):
count = 0
#chek if db initted correctly
if db_parms['initted'] == False:
error_text = 'DB not yet initted, cannot query collection ' + coll_name
logger.error(error_text)
return DBIF_ERROR, error_text, count
#check if collection exists
if coll_name not in db_parms['client'][db_name].collection_names():
error_text = 'Collection ' + coll_name + ' does not exist in db, exiting..'
logger.error(error_text)
return DBIF_ERROR, error_text, count
count = db_parms['client'][db_name][coll_name].count()
return DBIF_OK, '', count
def db_run_pipeline(db_name, coll_name, ppln):
cursor = ''
#chek if db initted correctly
if db_parms['initted'] == False:
error_text = 'DB not yet initted, cannot query collection ' + coll_name
logger.error(error_text)
return DBIF_ERROR, error_text, cursor
#check if collection exists
if coll_name not in db_parms['client'][db_name].collection_names():
error_text = 'Collection ' + coll_name + ' does not exist in db, exiting..'
logger.error(error_text)
return DBIF_ERROR, error_text, cursor
#ok ready to run pipeline
cursor = db_parms['client'][db_name][coll_name].aggregate(ppln)
return DBIF_OK, '', cursor
def db_get_all_records_in_collection(db_name, coll_name):
cursor = ''
#chek if db initted correctly
if db_parms['initted'] == False:
error_text = 'DB not yet initted, cannot query collection ' + coll_name
logger.error(error_text)
return DBIF_ERROR, error_text, cursor
#check if collection exists
if coll_name not in db_parms['client'][db_name].collection_names():
error_text = 'Collection ' + coll_name + ' does not exist in db, exiting..'
logger.error(error_text)
return DBIF_ERROR, error_text, cursor
#ok ready to run pipeline
cursor = db_parms['client'][db_name][coll_name].find()
return DBIF_OK, '', cursor
def db_do_mapreduce(db_name, coll_name, mapper, reducer, output):
result_coll = ''
#chek if db initted correctly
if db_parms['initted'] == False:
error_text = 'DB not yet initted, cannot query collection ' + coll_name
logger.error(error_text)
return DBIF_ERROR, error_text, result_coll
#check if collection exists
if coll_name not in db_parms['client'][db_name].collection_names():
error_text = 'Collection ' + coll_name + ' does not exist in db, exiting..'
logger.error(error_text)
return DBIF_ERROR, error_text, result_coll
#ok ready to run pipeline
result_coll = db_parms['client'][db_name][coll_name].map_reduce(mapper, reducer, output)
return DBIF_OK, '', result_coll
def db_is_doc_in_coll(db_name, coll_name, query):
found = False
#chek if db initted correctly
if db_parms['initted'] == False:
error_text = 'DB not yet initted, cannot query collection ' + coll_name
logger.error(error_text)
return found
#check if collection exists
if coll_name not in db_parms['client'][db_name].collection_names():
error_text = 'Collection ' + coll_name + 'does not exist in db, exiting..'
logger.error(error_text)
return found
#ok ready to run pipeline
doc = db_parms['client'][db_name][coll_name].find_one(query)
if doc != None:
found = True
return found
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import xdot
from cairis.core.Borg import Borg
from cairis.core.ARM import *
import gtk
import math
import cairo
import pangocairo
from cairis.core.armid import *
from ComponentModel import ComponentModel
__author__ = 'Shamal Faily'
class ComponentTextShape(xdot.TextShape):
def __init__(self, pen, x, y, j, w, t,dim):
xdot.TextShape.__init__(self,pen,x,y,j,w,t)
def draw(self, cr, highlight=False,zoom_ratio=-1):
xdot.TextShape.draw(self,cr,highlight,zoom_ratio)
class InterfaceLabelShape(xdot.TextShape):
def __init__(self, pen, x, y, j, w, objtName):
ifLbls = objtName.split('_')
xdot.TextShape.__init__(self,pen,x,y,j,w,ifLbls[1])
def draw(self, cr, highlight=False,zoom_ratio=-1):
xdot.TextShape.draw(self,cr,highlight,zoom_ratio)
class AttackerShape(xdot.Shape):
def __init__(self, pen, x0, y0, w, h, filled=False):
xdot.Shape.__init__(self)
self.pen = pen.copy()
self.x0 = x0
self.y0 = y0
self.w = w
self.h = h
self.filled = filled
def draw(self, cr, highlight=False,zoom_ratio=-1):
if (zoom_ratio == -1) or (zoom_ratio > HIGH_ZOOM_RATIO):
cr.save()
cr.set_source_rgb(0,0,0)
cr.translate(self.x0, self.y0)
cr.scale(self.w, self.h)
cr.rectangle(-0.25,-1.05,0.5,0.25)
cr.fill()
#hat
cr.new_path()
cr.move_to(-0.5,-0.8)
cr.line_to(0.5,-0.8)
cr.move_to(0.0,0.0)
cr.line_to(0.0,-0.5)
cr.new_sub_path()
cr.arc(0.0,-0.75,0.25,0,2.0 * math.pi)
cr.move_to(0.0,0.0)
cr.line_to(0.0,0.25)
cr.line_to(-0.4,0.9)
cr.move_to(0.0,0.25)
cr.line_to(0.4,0.9)
cr.move_to(-0.4,-0.2)
cr.line_to(0.4,-0.2)
cr.restore()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class PersonaShape(xdot.Shape):
def __init__(self, pen, x0, y0, w, h, filled=False):
xdot.Shape.__init__(self)
self.pen = pen.copy()
self.x0 = x0
self.y0 = y0
self.w = w
self.h = h
self.filled = filled
def draw(self, cr, highlight=False,zoom_ratio=-1):
if (zoom_ratio == -1) or (zoom_ratio > HIGH_ZOOM_RATIO):
cr.save()
cr.translate(self.x0, self.y0)
cr.scale(self.w, self.h)
cr.move_to(0.0,0.0)
cr.line_to(0.0,-0.5)
cr.new_sub_path()
cr.arc(0.0,-0.75,0.25,0,2.0 * math.pi)
cr.move_to(0.0,0.0)
cr.line_to(0.0,0.25)
cr.line_to(-0.4,0.9)
cr.move_to(0.0,0.25)
cr.line_to(0.4,0.9)
cr.move_to(-0.4,-0.2)
cr.line_to(0.4,-0.2)
cr.restore()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class RequiredInterfaceShape(xdot.EllipseShape):
def __init__(self, pen, x0, y0, w, h, objtName, filled=False):
xdot.EllipseShape.__init__(self,pen,x0,y0,w,h,filled)
self.theInterfaceName = objtName
def draw(self, cr, highlight=False,zoom_ratio=-1):
cr.save()
cr.translate(self.x0, self.y0)
cr.scale(self.w, self.h)
cr.rotate(0.5 * math.pi)
cr.move_to(1.0, 0.0)
cr.arc(0.0, 0.0, 1.0, 0, math.pi)
cr.restore()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class ProvidedInterfaceShape(xdot.EllipseShape):
def __init__(self, pen, x0, y0, w, h, objtName, filled=False):
xdot.EllipseShape.__init__(self,pen,x0,y0,w,h,filled)
def draw(self, cr, highlight=False,zoom_ratio=-1):
xdot.EllipseShape.draw(self,cr,highlight,zoom_ratio)
class MisuseCaseShape(xdot.EllipseShape):
def __init__(self, pen, x0, y0, w, h, filled=False):
xdot.EllipseShape.__init__(self,pen,x0,y0,w,h,filled)
def draw(self, cr, highlight=False,zoom_ratio=-1):
if (zoom_ratio == -1) or (zoom_ratio > HIGH_ZOOM_RATIO):
xdot.EllipseShape.draw(self,cr,highlight,-1)
class RoleShape(xdot.Shape):
def __init__(self, pen, x0, y0, w, h, filled=False):
xdot.Shape.__init__(self)
self.pen = pen.copy()
self.x0 = x0
self.y0 = y0
self.w = w
self.h = h
self.filled = filled
def draw(self, cr, highlight=False,zoom_ratio=-1):
if (zoom_ratio == -1) or (zoom_ratio > HIGH_ZOOM_RATIO):
cr.save()
cr.translate(self.x0, self.y0)
cr.scale(self.w, self.h)
cr.move_to(-0.5,-0.25)
cr.rectangle(-0.25,-0.25,0.5,0.75)
cr.move_to(0.0,-0.5)
cr.new_sub_path()
cr.arc(0.0,-0.5,0.25,0,2.0 * math.pi)
cr.restore()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
class GoalShape(xdot.Shape):
def __init__(self, pen, x0, y0, w, h, filled=False):
xdot.Shape.__init__(self)
self.pen = pen.copy()
self.x0 = x0
self.y0 = y0
self.w = w
self.h = h
self.filled = filled
def draw(self, cr, highlight=False,zoom_ratio=-1):
if (zoom_ratio == -1) or (zoom_ratio > HIGH_ZOOM_RATIO):
cr.save()
cr.translate(self.x0, self.y0)
cr.scale(self.w, self.h)
cr.move_to(0.0,-0.5)
cr.new_sub_path()
cr.arc(0.0,-0.5,0.5,180 * (math.pi / 180),0 * (math.pi / 180))
cr.move_to(0.5,0.0)
cr.new_sub_path()
cr.arc(0.5,0.0,0.5,270 * (math.pi / 180),90 * (math.pi / 180))
cr.move_to(0.0,0.5)
cr.new_sub_path()
cr.arc(0.0,0.5,0.5,0 * (math.pi / 180),180 * (math.pi / 180))
cr.move_to(-0.5,0.0)
cr.new_sub_path()
cr.arc(-0.5,0.0,0.5,90 * (math.pi / 180),270 * (math.pi / 180))
cr.restore()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
#class RequirementShape(xdot.Shape,ChernoffFace):
# def __init__(self, pen, x0, y0, w, h,objtName,dp,filled=False):
# xdot.Shape.__init__(self)
# ChernoffFace.__init__(self,objtName,dp)
# self.pen = pen.copy()
# self.x0 = x0
# self.y0 = y0
# self.w = w
# self.h = h
# self.filled = filled
# def draw(self, cr, highlight=False,zoom_ratio=-1):
# ChernoffFace.draw(self,cr,self.x0,self.y0,self.w,self.h,highlight,zoom_ratio)
class ComponentPolygonShape(xdot.PolygonShape):
def __init__(self, pen, points, dim, objt,filled=False):
xdot.PolygonShape.__init__(self,pen,points,filled)
b = Borg()
self.dbProxy = b.dbProxy
self.dim = dim
self.objt = objt
def draw(self, cr, highlight=False,zoom_ratio=-1):
if (zoom_ratio == -1) or (self.dim == 'risk') or (zoom_ratio > LOW_ZOOM_RATIO and (self.dim == 'asset' or self.dim == 'threat' or self.dim == 'vulnerability')) or (zoom_ratio > HIGH_ZOOM_RATIO):
if (self.dim != 'role'):
x0, y0 = self.points[-1]
cr.move_to(x0, y0)
for x, y in self.points:
cr.line_to(x, y)
cr.close_path()
pen = self.select_pen(highlight)
if self.filled:
cr.set_source_rgba(*pen.fillcolor)
cr.fill_preserve()
cr.fill()
else:
cr.set_dash(pen.dash)
cr.set_line_width(pen.linewidth)
cr.set_source_rgba(*pen.color)
cr.stroke()
if (len(self.points) == 4):
if (self.dim != 'role') and ((zoom_ratio == -1) or (zoom_ratio > HIGH_ZOOM_RATIO)):
if (self.points[0][0] == self.points[1][0]):
cr.set_source_rgba(*pen.color)
xs = self.points[0][0]
ys = self.points[1][1]
xe = self.points[3][0]
ye = self.points[2][1]
cr.move_to(xs,ys + 7.5)
cr.line_to(xe,ys + 7.5)
cr.stroke()
if (zoom_ratio > LOW_ZOOM_RATIO):
if (self.dim == 'asset'):
self.decorateAssetNode(cr,zoom_ratio)
elif (self.dim == 'threat'):
self.decorateThreatNode(cr,zoom_ratio)
elif (self.dim == 'vulnerability'):
self.decorateVulnerabilityNode(cr,zoom_ratio)
if (zoom_ratio > HIGH_ZOOM_RATIO and self.dim == 'role'):
self.decorateRoleNode(cr)
def decorateRoleNode(self,cr):
xs = self.points[0][0]
ys = self.points[0][1]
width = self.points[3][0] - self.points[0][0]
height = self.points[0][1] - self.points[1][1]
iconPath = b.iconDir + '/roleNode.png'
roleSurface = cairo.ImageSurface.create_from_png(iconPath)
imageWidth = roleSurface.get_width()
imageHeight = roleSurface.get_height()
cr.save()
cr.set_source_surface(roleSurface,xs + (width/4),ys - height)
cr.paint()
cr.restore()
def decorateAssetNode(self,cr,zoom_ratio):
if (zoom_ratio == -1) or (zoom_ratio > HIGH_ZOOM_RATIO):
xs = self.points[0][0]
ye = self.points[2][1]
xs = self.points[0][0]
ys = self.points[1][1]
xe = self.points[3][0]
cr.move_to(xs + 30,ye + 7.5)
cr.line_to(xs + 30,self.points[0][1])
cr.stroke()
asset = self.dbProxy.dimensionObject(self.objt,self.dim)
syProps = asset.securityProperties(self.environment.name(),self.environment.duplicateProperty(),self.environment.overridingEnvironment())
cx = xs
cy = ys + 10
cr.set_line_width(0.1)
cr.set_source_rgb(0,0,0)
cr.rectangle(cx,cy,syProps[0] * 10,3)
cr.fill()
ix = xs
iy = cy + 6
cr.set_line_width(0.1)
cr.set_source_rgb(1,0,0)
cr.rectangle(ix,iy,syProps[1] * 10,3)
cr.fill()
ax = xs
ay = iy + 6
cr.set_line_width(0.1)
cr.set_source_rgb(0,1,0)
cr.rectangle(ax,ay,syProps[2] * 10,3)
cr.fill()
avx = xs
avy = ay + 6
cr.set_line_width(0.1)
cr.set_source_rgb(0,0,1)
cr.rectangle(avx,avy,syProps[3] * 10,3)
cr.fill()
def decorateThreatNode(self,cr,zoom_ratio):
if (zoom_ratio > LOW_ZOOM_RATIO) and (zoom_ratio < HIGH_ZOOM_RATIO):
threat = self.dbProxy.dimensionObject(self.objt,self.dim)
likelihood = threat.likelihood(self.environment.name(),self.environment.duplicateProperty(),self.environment.overridingEnvironment())
if (likelihood == 'Incredible'):
lhoodScore = INCREDIBLE_COLOUR
elif (likelihood == 'Improbable'):
lhoodScore = IMPROBABLE_COLOUR
elif (likelihood == 'Remote'):
lhoodScore = REMOTE_COLOUR
elif (likelihood == 'Occasional'):
lhoodScore = OCCASIONAL_COLOUR
else:
lhoodScore = FREQUENT_COLOUR
x0, y0 = self.points[-1]
cr.move_to(x0, y0)
for x, y in self.points:
cr.line_to(x, y)
cr.close_path()
cr.set_source_rgb(lhoodScore[0],lhoodScore[1],lhoodScore[2])
cr.fill_preserve()
cr.fill()
elif (zoom_ratio > HIGH_ZOOM_RATIO):
xs = self.points[0][0]
ye = self.points[2][1]
xs = self.points[0][0]
ys = self.points[1][1]
xe = self.points[3][0]
cr.move_to(xe - 30,ye + 7.5)
cr.line_to(xe - 30,self.points[0][1])
cr.stroke()
threat = self.dbProxy.dimensionObject(self.objt,self.dim)
syProps = threat.securityProperties(self.environment.name(),self.environment.duplicateProperty(),self.environment.overridingEnvironment())
lx = xs
ly = ye + 7.5
w = xe - xs - 30
h = self.points[0][1] - (ye + 7.5)
lhoodScore = (0,0,0)
likelihood = threat.likelihood(self.environment.name(),self.environment.duplicateProperty(),self.environment.overridingEnvironment())
if (likelihood == 'Incredible'):
lhoodScore = INCREDIBLE_COLOUR
elif (likelihood == 'Improbable'):
lhoodScore = IMPROBABLE_COLOUR
elif (likelihood == 'Remote'):
lhoodScore = REMOTE_COLOUR
elif (likelihood == 'Occasional'):
lhoodScore = OCCASIONAL_COLOUR
else:
lhoodScore = FREQUENT_COLOUR
cr.set_source_rgb(lhoodScore[0],lhoodScore[1],lhoodScore[2])
cr.rectangle(lx,ly,w,h)
cr.fill()
cLength = syProps[0] * 10
cx = xe - cLength
cy = ys + 10
cr.set_line_width(0.1)
cr.set_source_rgb(0,0,0)
cr.rectangle(cx,cy,cLength,3)
cr.fill()
iLength = syProps[1] * 10
ix = xe - iLength
iy = cy + 6
cr.set_line_width(0.1)
cr.set_source_rgb(1,0,0)
cr.rectangle(ix,iy,iLength,3)
cr.fill()
avLength = syProps[2] * 10
avx = xe - avLength
avy = iy + 6
cr.set_line_width(0.1)
cr.set_source_rgb(0,1,0)
cr.rectangle(avx,avy,avLength,3)
cr.fill()
acLength = syProps[3] * 10
acx = xe - acLength
acy = avy + 6
cr.set_line_width(0.1)
cr.set_source_rgb(0,0,1)
cr.rectangle(acx,acy,acLength,3)
cr.fill()
def decorateVulnerabilityNode(self,cr,zoom_ratio):
if (zoom_ratio > LOW_ZOOM_RATIO) and (zoom_ratio < HIGH_ZOOM_RATIO):
vulnerability = self.dbProxy.dimensionObject(self.objt,self.dim)
severity = vulnerability.severity(self.environment.name(),self.environment.duplicateProperty(),self.environment.overridingEnvironment())
if (severity == 'Negligible'):
sevCol = NEGLIGIBLE_COLOUR
elif (severity == 'Marginal'):
sevCol = MARGINAL_COLOUR
elif (severity == 'Critical'):
sevCol = CRITICAL_COLOUR
else:
sevCol = CATASTROPHIC_COLOUR
x0, y0 = self.points[-1]
cr.move_to(x0, y0)
for x, y in self.points:
cr.line_to(x, y)
cr.close_path()
cr.set_source_rgb(sevCol[0],sevCol[1],sevCol[2])
cr.fill_preserve()
cr.fill()
elif (zoom_ratio > HIGH_ZOOM_RATIO):
xs = self.points[0][0]
ye = self.points[2][1]
xs = self.points[0][0]
ys = self.points[1][1]
xe = self.points[3][0]
vulnerability = self.dbProxy.dimensionObject(self.objt,self.dim)
sx = xs
sy = ye + 7.5
w = xe - xs
h = self.points[0][1] - (ye + 7.5)
sevCol = (0,0,0)
severity = vulnerability.severity(self.environment.name(),self.environment.duplicateProperty(),self.environment.overridingEnvironment())
if (severity == 'Negligible'):
sevCol = NEGLIGIBLE_COLOUR
elif (severity == 'Marginal'):
sevCol = MARGINAL_COLOUR
elif (severity == 'Critical'):
sevCol = CRITICAL_COLOUR
else:
sevCol = CATASTROPHIC_COLOUR
cr.set_source_rgb(sevCol[0],sevCol[1],sevCol[2])
cr.rectangle(sx,sy,w,h)
cr.fill()
class XDotAttrParser(xdot.XDotAttrParser):
def __init__(self, parser, buf,dim='',objt=''):
xdot.XDotAttrParser.__init__(self,parser,buf)
self.dim = dim
self.objt = objt
b = Borg()
self.dbProxy = b.dbProxy
def handle_text(self,x,y,j,w,t):
self.shapes.append(ComponentTextShape(self.pen, x, y, j, w, t,self.dim))
def handle_ellipse(self, x0, y0, w, h, filled=False):
if (self.dim == 'required_interface'):
if filled:
self.shapes.append(RequiredInterfaceShape(self.pen, x0, y0, w, h,self.objt,filled=True))
self.shapes.append(RequiredInterfaceShape(self.pen, x0, y0, w, h,self.objt))
self.shapes.append(InterfaceLabelShape(self.pen, x0, y0 + 13, 0, w * 2,self.objt))
elif (self.dim == 'provided_interface'):
if filled:
self.shapes.append(ProvidedInterfaceShape(self.pen, x0, y0, w, h,self.objt,filled=True))
self.shapes.append(ProvidedInterfaceShape(self.pen, x0, y0, w, h,self.objt))
self.shapes.append(InterfaceLabelShape(self.pen, x0, y0 + 13, 0, w * 2,self.objt))
elif (self.dim == ''):
if filled:
self.shapes.append(AttackerShape(self.pen, x0, y0, w, h,filled=True))
self.shapes.append(AttackerShape(self.pen, x0, y0, w, h))
elif (self.dim == 'persona'):
if filled:
self.shapes.append(PersonaShape(self.pen, x0, y0, w, h,filled=True))
self.shapes.append(PersonaShape(self.pen, x0, y0, w, h))
elif (self.dim == 'goal'):
if filled:
self.shapes.append(GoalShape(self.pen, x0, y0, w, h,filled=True))
self.shapes.append(GoalShape(self.pen, x0, y0, w, h))
elif (self.dim == 'role'):
if filled:
self.shapes.append(RoleShape(self.pen, x0, y0, w, h,filled=True))
self.shapes.append(RoleShape(self.pen, x0, y0, w, h))
elif (self.dim == 'task'):
if filled:
self.shapes.append(TaskShape(self.pen, x0, y0, w, h,filled=True))
self.shapes.append(TaskShape(self.pen, x0, y0, w, h))
else:
if filled:
self.shapes.append(MisuseCaseShape(self.pen, x0, y0, w, h,filled=True))
self.shapes.append(MisuseCaseShape(self.pen, x0, y0, w, h))
def handle_polygon(self,points,filled=False):
if filled:
self.shapes.append(ComponentPolygonShape(self.pen,points,self.dim, self.objt,filled=True))
self.shapes.append(ComponentPolygonShape(self.pen,points,self.dim, self.objt))
class ComponentXDotParser(xdot.XDotParser):
def __init__(self, xdotcode):
xdot.XDotParser.__init__(self,xdotcode)
b = Borg()
self.dbProxy = b.dbProxy
def handle_node(self, id, attrs):
try:
pos = attrs['pos']
except KeyError:
return
x, y = self.parse_node_pos(pos)
w = float(attrs['width'])*72
h = float(attrs['height'])*72
shapes = []
for attr in ("_draw_", "_ldraw_"):
if attr in attrs:
dimObjt = (attrs['URL']).split('#')
parser = XDotAttrParser(self, attrs[attr],dimObjt[0],dimObjt[1])
shapes.extend(parser.parse())
url = attrs.get('URL', None)
node = xdot.Node(x, y, w, h, shapes, url)
self.node_by_name[id] = node
if shapes:
self.nodes.append(node)
class ComponentDotWidget(xdot.DotWidget):
def __init__(self):
xdot.DotWidget.__init__(self)
def set_xdotcode(self, xdotcode):
parser = ComponentXDotParser(xdotcode)
self.graph = parser.parse()
self.zoom_image(self.zoom_ratio, center=True)
def printToFile(self,fileName,fileFormat):
s = None
if (fileFormat == 'svg'):
s = cairo.SVGSurface(fileName,self.graph.width,self.graph.height)
else:
s = cairo.PDFSurface(fileName,self.graph.width,self.graph.height)
c1 = cairo.Context(s)
c2 = pangocairo.CairoContext(c1)
c2.set_line_cap(cairo.LINE_CAP_BUTT)
c2.set_line_join(cairo.LINE_JOIN_MITER)
self.graph.draw(c2)
s.finish()
class ComponentDotWindow(gtk.Window):
ui = '''
<ui>
<toolbar name="ToolBar">
<toolitem action="Refresh"/>
<separator/>
<toolitem action="Print"/>
<separator/>
<toolitem action="ZoomIn"/>
<toolitem action="ZoomOut"/>
<toolitem action="ZoomFit"/>
<toolitem action="Zoom100"/>
</toolbar>
</ui>
'''
def __init__(self,windowTitle,cvName):
gtk.Window.__init__(self)
self.graph = xdot.Graph()
self.theViewName = cvName
self.traceModel = None
window = self
b = Borg()
self.dbProxy = b.dbProxy
window.set_title(windowTitle)
window.set_default_size(512, 512)
vbox = gtk.VBox()
window.add(vbox)
self.widget = ComponentDotWidget()
# Create a UIManager instance
uimanager = self.uimanager = gtk.UIManager()
# Add the accelerator group to the toplevel window
accelgroup = uimanager.get_accel_group()
window.add_accel_group(accelgroup)
# Create an ActionGroup
actiongroup = gtk.ActionGroup('Actions')
self.actiongroup = actiongroup
# Create actions
actiongroup.add_actions((
('Refresh', gtk.STOCK_REFRESH, None, None, None, self.on_refresh),
('Print', gtk.STOCK_PRINT, None, None, None, self.on_print),
('ZoomIn', gtk.STOCK_ZOOM_IN, None, None, None, self.widget.on_zoom_in),
('ZoomOut', gtk.STOCK_ZOOM_OUT, None, None, None, self.widget.on_zoom_out),
('ZoomFit', gtk.STOCK_ZOOM_FIT, None, None, None, self.widget.on_zoom_fit),
('Zoom100', gtk.STOCK_ZOOM_100, None, None, None, self.widget.on_zoom_100),
))
# Add the actiongroup to the uimanager
uimanager.insert_action_group(actiongroup, 0)
# Add a UI descrption
uimanager.add_ui_from_string(self.ui)
# Create a Toolbar
toolbar = uimanager.get_widget('/ToolBar')
vbox.pack_start(toolbar, False)
vbox.pack_start(self.widget)
lBox = gtk.HBox()
vbox.pack_start(lBox,False)
lFrame = gtk.Frame()
lFrame.set_label("Layout")
lFrame.set_shadow_type(gtk.SHADOW_ETCHED_IN)
lBox.pack_start(lFrame)
self.layoutCombo = gtk.ComboBoxEntry()
lFrame.add(self.layoutCombo)
layoutModel = gtk.ListStore(str)
layoutModel.append(['Hierarchical'])
layoutModel.append(['Spring'])
layoutModel.append(['Radial'])
layoutModel.append(['Circular'])
self.layoutCombo.set_model(layoutModel)
self.layoutCombo.set_text_column(0)
self.layoutCombo.set_active(1)
self.layoutHandlerId = self.layoutCombo.connect('changed',self.onLayoutChange)
self.set_focus(self.widget)
self.show_all()
def onLayoutChange(self,action):
layoutName = self.layoutCombo.get_active_text()
renderer = 'fdp'
if (layoutName == 'Hierarchical'):
renderer = 'dot'
elif (layoutName == 'Radial'):
renderer = 'twopi'
elif (layoutName == 'Circular'):
renderer = 'circo'
if (self.traceModel == None):
interfaces,connectors = self.dbProxy.componentView(self.theViewName)
self.traceModel = ComponentModel(interfaces,connectors)
self.set_xdotcode(self.traceModel.layout(renderer))
def set_filter(self, filter):
self.widget.set_filter(filter)
def set_dotcode(self, dotcode, filename='<stdin>'):
if self.widget.set_dotcode(dotcode, filename):
self.set_title(os.path.basename(filename) + ' - Dot Viewer')
self.widget.zoom_to_fit()
def set_xdotcode(self, xdotcode, filename='<stdin>'):
if self.widget.set_xdotcode(xdotcode):
self.set_title(os.path.basename(filename) + ' - Dot Viewer')
self.widget.zoom_to_fit()
def open_file(self, filename):
try:
fp = file(filename, 'rt')
self.set_dotcode(fp.read(), filename)
fp.close()
except IOError, ex:
dlg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
message_format=str(ex),
buttons=gtk.BUTTONS_OK)
dlg.set_title('Dot Viewer')
dlg.run()
dlg.destroy()
def on_refresh(self, action):
self.refreshModel()
def refreshModel(self):
try:
proxy = self.dbProxy
self.traceModel = ComponentModel(b.dbProxy.componentView(self.theViewName))
self.set_xdotcode(self.traceModel.graph())
self.widget.zoom_to_fit()
except ARMException, ex:
dlg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,message_format=str(ex),buttons=gtk.BUTTONS_OK)
dlg.set_title('Component Model Viewer')
dlg.run()
dlg.destroy()
def on_print(self, action):
chooser = gtk.FileChooserDialog(title="Print to file",action = gtk.FILE_CHOOSER_ACTION_SAVE,buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name('Images')
filter.add_mime_type('impages/svg')
filter.add_mime_type('impages/pdf')
filter.add_pattern('*.svg')
filter.add_pattern('*.pdf')
chooser.add_filter(filter)
if chooser.run() == gtk.RESPONSE_OK:
fileName = chooser.get_filename()
chooser.destroy()
fileNameComponents = fileName.split('.')
fileType = fileNameComponents[1]
if ((fileType != 'svg') and (fileType != 'pdf')):
dlg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,message_format='Unknown file type',buttons=gtk.BUTTONS_OK)
dlg.set_title('Environment Model Viewer')
dlg.run()
dlg.destroy()
else:
self.widget.printToFile(fileName,fileType)
else:
chooser.destroy()
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aqt.jax.flax_attention."""
import unittest
from absl.testing import absltest
from absl.testing import parameterized
from flax import jax_utils
import jax
from jax import random
from jax.nn import initializers
import jax.numpy as jnp
import numpy as onp
import tensorflow as tf
from aqt import flax_attention
from aqt import flax_layers
from aqt import get_bounds
from aqt import quant_config
from aqt import test_utils
from aqt.flax_attention import DotProductAttnHParams
from aqt.flax_attention import ExpHParams
from aqt.flax_attention import ReciprocalHParams
from aqt.flax_attention import SoftmaxHParams
from aqt.quantization import QuantOps
from aqt.quantization import QuantType
test_utils.configure_jax()
# Expressions to approximate Softmax
# Softmax with params sum_high_bound=1, low_bound=-200, clip_and_substract=True
def mod_softmax_clipped(sum_high_bound, low_bound, x, xs):
exp_max_sub = lambda x: onp.exp(max(x, low_bound)) - onp.exp(low_bound)
return exp_max_sub(x) / min(sum_high_bound, sum(exp_max_sub(x) for x in xs))
# Linearized exponential
def softmax_exp_lin(a, x, xs):
exp_lin = lambda x: max(0, a * x + 1)
return exp_lin(x) / sum(exp_lin(x) for x in xs)
# Linearized reciprocal
def softmax_recip_lin(a, x, xs):
recip_lin = lambda x: a + 1 - a * x
return onp.exp(x) * recip_lin(sum(onp.exp(x) for x in xs))
class SoftmaxTest(parameterized.TestCase, tf.test.TestCase):
# Test mock input and output of modified versions of softmax.
rand_2x2_tensor = onp.random.rand(2, 2)
# Substract maximum from each column
norm_dims = (0,)
tensor_sub_max = rand_2x2_tensor - onp.max(rand_2x2_tensor, norm_dims[0])
[[a11, a12], [a21, a22]] = tensor_sub_max
@parameterized.named_parameters(
dict(
testcase_name='original_softmax',
input_tensor=rand_2x2_tensor,
norm_dims=norm_dims,
softmax_hparams=SoftmaxHParams(None, None, None),
expected_output=onp.array(
[[
onp.exp(a11) / (onp.exp(a11) + onp.exp(a21)),
onp.exp(a12) / (onp.exp(a12) + onp.exp(a22))
],
[
onp.exp(a21) / (onp.exp(a11) + onp.exp(a21)),
onp.exp(a22) / (onp.exp(a12) + onp.exp(a22))
]])),
dict(
testcase_name='exponential_clip=-200_clip_and_substract=True_' +
'sum_high_bound=1.0',
input_tensor=rand_2x2_tensor,
norm_dims=norm_dims,
softmax_hparams=SoftmaxHParams(
exp_hparams=ExpHParams(
sum_high_bound=1.0,
low_bound=-200.0,
clip_and_subtract=True,
linear_gradient=None),
reciprocal_hparams=None,
quant_hparams=None),
expected_output=onp.array(
[[
mod_softmax_clipped(1.0, -200.0, a11, [a11, a21]),
mod_softmax_clipped(1.0, -200.0, a12, [a12, a22])
],
[
mod_softmax_clipped(1.0, -200.0, a21, [a11, a21]),
mod_softmax_clipped(1.0, -200.0, a22, [a12, a22])
]])),
dict(
testcase_name='linear_gradient=1.0',
input_tensor=rand_2x2_tensor,
norm_dims=norm_dims,
softmax_hparams=SoftmaxHParams(
exp_hparams=ExpHParams(
sum_high_bound=0.0,
low_bound=0.0,
clip_and_subtract=False,
linear_gradient=1.0),
reciprocal_hparams=None,
quant_hparams=None),
expected_output=onp.array([[
softmax_exp_lin(1.0, a11, [a11, a21]),
softmax_exp_lin(1.0, a12, [a12, a22])
],
[
softmax_exp_lin(1.0, a21, [a11, a21]),
softmax_exp_lin(1.0, a22, [a12, a22])
]])),
dict(
testcase_name='linear_gradient=1.0.',
input_tensor=rand_2x2_tensor,
norm_dims=norm_dims,
softmax_hparams=SoftmaxHParams(
exp_hparams=ExpHParams(
sum_high_bound=0.0,
low_bound=0.0,
clip_and_subtract=False,
linear_gradient=0.0),
reciprocal_hparams=ReciprocalHParams(
linear_gradient=1.0, low_bound=0.0),
quant_hparams=None),
expected_output=onp.array(
[[
softmax_recip_lin(1.0, a11, [a11, a21]),
softmax_recip_lin(1.0, a12, [a12, a22])
],
[
softmax_recip_lin(1.0, a21, [a11, a21]),
softmax_recip_lin(1.0, a22, [a12, a22])
]]),
),
)
def test_custom_softmax_vs_mock(self, input_tensor, norm_dims,
softmax_hparams, expected_output):
dtype = jax._src.numpy.lax_numpy.float32
output = flax_attention.softmax(
input_tensor, norm_dims, dtype, softmax_hparams,
quant_config.QuantContext(update_bounds=False, quantize_acts=False))
self.assertAllClose(expected_output, output, atol=1e-6)
# # Test modified softmax vs original softmax.
random_input = onp.random.rand(16, 16, 2, 2)
@parameterized.named_parameters(
dict(
testcase_name='modified_softmax_with_exponential_low_bound_at_-10',
input_tensor=random_input,
softmax_hparams=SoftmaxHParams(
exp_hparams=ExpHParams(
sum_high_bound=None,
low_bound=-10.0,
clip_and_subtract=False,
linear_gradient=None),
reciprocal_hparams=None,
quant_hparams=None,
)),
dict(
testcase_name='modified_softmax_with_exponential_low_bound_=-200_' +
'clip_substract=True',
input_tensor=random_input,
softmax_hparams=SoftmaxHParams(
exp_hparams=ExpHParams(
sum_high_bound=None,
low_bound=-200.0,
clip_and_subtract=True,
linear_gradient=None),
reciprocal_hparams=None,
quant_hparams=None),
),
# Test that 'downcasting' intermediate activations to a floating-point
# format similar to IEEE fp32 produces almost the same results as
# unquantized softmax.
dict(
testcase_name='fp_downcast_fp32',
input_tensor=random_input,
softmax_hparams=SoftmaxHParams(
exp_hparams=None,
reciprocal_hparams=None,
quant_hparams=flax_attention.SoftmaxQuantHParams(
reduction_prec=None,
prec=QuantOps.FloatQuant.FloatPrec(
exp_min=-2**7, exp_max=2**7, sig_bits=23))),
))
def test_softmax_vs_original(self, input_tensor, softmax_hparams):
dtype = jax._src.numpy.lax_numpy.float32
norm_dims = (0,)
input_tensor = jnp.array(input_tensor)
output = flax_attention.softmax(
input_tensor, norm_dims, dtype, softmax_hparams,
quant_config.QuantContext(update_bounds=False, quantize_acts=True))
expected_output = flax_attention.softmax(
input_tensor, norm_dims, dtype, SoftmaxHParams(None, None, None),
quant_config.QuantContext(update_bounds=False, quantize_acts=True))
self.assertAllClose(expected_output, output, atol=1e-8)
class AttentionTest(parameterized.TestCase):
@classmethod
def construct_hparams(cls, weight_prec):
dense = flax_layers.DenseAqt.HParams(
weight_prec=weight_prec,
quant_act=None,
quant_type=QuantType.fake_quant,
weight_quant_granularity=quant_config.QuantGranularity.per_channel,
weight_half_shift=False)
return flax_attention.MultiHeadDotProductAttentionAqt.HParams(
dense_kqv=dense,
dense_out=dense,
attn_acts=DotProductAttnHParams(
attn_act_q=None,
attn_act_k=None,
attn_act_probs=None,
attn_act_v=None,
quant_type=QuantType.fake_quant,
softmax=SoftmaxHParams(None, None, None)))
@parameterized.named_parameters(
dict(testcase_name='float', weight_prec=None),
dict(testcase_name='quant_8bit', weight_prec=8),
dict(testcase_name='quant_4bit', weight_prec=4),
)
def test_multihead_self_attention(self, weight_prec):
rng = random.PRNGKey(0)
x = jnp.ones((4, 3, 5))
hparams = self.construct_hparams(weight_prec)
sa_module = flax_attention.SelfAttentionAqt(
hparams=hparams,
num_heads=8,
attention_axis=(1,),
qkv_features=16,
quant_context=quant_config.QuantContext(
update_bounds=False, collect_acts_stats=False),
train=False,
paxis_name=None,
kernel_init=initializers.ones,
bias_init=initializers.zeros,
dtype=jnp.float32,
causal_mask=False,
dropout_rate=0.0,
deterministic=False,
decode=False)
y, _ = sa_module.init_with_output(rng, x, padding_mask=None)
self.assertEqual(y.shape, x.shape)
@parameterized.named_parameters(
dict(testcase_name='float', weight_prec=None),
dict(testcase_name='quant_8bit', weight_prec=8),
dict(testcase_name='quant_4bit', weight_prec=4),
)
def test_multihead_encoder_decoder_attention(self, weight_prec):
rng = random.PRNGKey(0)
q = jnp.ones((4, 3, 5))
kv = jnp.ones((4, 3, 5))
sa_module = flax_attention.MultiHeadDotProductAttentionAqt(
num_heads=8,
hparams=self.construct_hparams(weight_prec),
attention_axis=(1,),
quant_context=quant_config.QuantContext(
update_bounds=False, collect_acts_stats=False),
train=False,
paxis_name=None,
qkv_features=16,
kernel_init=initializers.ones,
bias_init=initializers.zeros,
dtype=jnp.float32,
causal_mask=False,
dropout_rate=0.0,
deterministic=False,
decode=False)
y, _ = sa_module.init_with_output(
rng, q, kv, padding_mask=None, key_padding_mask=None)
self.assertEqual(y.shape, q.shape)
@parameterized.named_parameters(
dict(testcase_name='float', weight_prec=None),
dict(testcase_name='quant_8bit', weight_prec=8),
dict(testcase_name='quant_4bit', weight_prec=4),
)
def test_multihead_self_attention_w_dropout(self, weight_prec):
rng = random.PRNGKey(0)
x = jnp.ones((4, 3, 5))
sa_module = flax_attention.SelfAttentionAqt(
num_heads=8,
hparams=self.construct_hparams(weight_prec),
attention_axis=(1,),
quant_context=quant_config.QuantContext(
update_bounds=False, collect_acts_stats=False),
train=False,
paxis_name=None,
qkv_features=16,
kernel_init=initializers.ones,
bias_init=initializers.zeros,
dropout_rate=0.1,
dtype=jnp.float32,
causal_mask=False,
deterministic=False,
decode=False)
rng_dropout, rng_params = random.split(rng)
y, _ = sa_module.init_with_output(
{
'dropout': rng_dropout,
'params': rng_params
}, x, padding_mask=None)
self.assertEqual(y.shape, x.shape)
@parameterized.named_parameters(
dict(
testcase_name='float_spatial_shape_5_attn_dim_1',
weight_prec=None,
spatial_shape=(5,),
attn_dims=(1,)),
dict(
testcase_name='quant_8bit_spatial_shape_5_attn_dim_1',
weight_prec=8,
spatial_shape=(5,),
attn_dims=(1,)),
dict(
testcase_name='quant_4bit_spatial_shape_5_attn_dim_1',
weight_prec=4,
spatial_shape=(5,),
attn_dims=(1,)),
)
def test_decoding(self, weight_prec, spatial_shape, attn_dims):
bs = 2
num_heads = 3
num_features = 4
rng = random.PRNGKey(0)
key1, key2 = random.split(rng)
inputs = random.normal(key1,
(bs,) + spatial_shape + (num_heads * num_features,))
module = flax_attention.SelfAttentionAqt(
num_heads=num_heads,
hparams=self.construct_hparams(weight_prec),
quant_context=quant_config.QuantContext(
update_bounds=False, collect_acts_stats=False),
train=False,
paxis_name=None,
qkv_features=num_heads * num_features,
attention_axis=attn_dims,
decode=False,
causal_mask=True,
dtype=jnp.float32,
dropout_rate=0.0,
deterministic=False)
initial_vars = module.init(key2, inputs, padding_mask=None)
y_ref = module.apply(initial_vars, inputs, padding_mask=None)
module.decode = True
initial_vars_decode = module.init(key2, inputs, padding_mask=None)
cache0 = initial_vars_decode['cache']
def body_fn(cache, x):
y, new_vars = module.apply({
**initial_vars, 'cache': cache
},
x,
mutable='cache',
padding_mask=None)
return new_vars['cache'], y
# scan_in_dim supports scanning multiple dims
_, y = jax_utils.scan_in_dim(
body_fn, cache0, inputs, axis=attn_dims, keepdims=True)
onp.testing.assert_allclose(y_ref, y, atol=1e-5)
@parameterized.named_parameters(
dict(testcase_name='float', weight_prec=None),
dict(testcase_name='quant_8bit', weight_prec=8),
dict(testcase_name='quant_4bit', weight_prec=4),
)
def test_autoregresive_receptive_field_1d(self, weight_prec):
"""Tests the autoregresive self-attention receptive field."""
rng = random.PRNGKey(0)
rng1, rng2 = random.split(rng, num=2)
def model_loss(inputs, pos):
out = module.apply(initial_vars, inputs, padding_mask=None)
assert out.shape == input_shape
assert len(out.shape) == 3
return out[0, pos, :].sum()
grad_fn = jax.jit(jax.grad(model_loss))
def get_receptive_field_1d(pos):
g = grad_fn(inputs, pos)[0, :, :]
return jnp.any((jnp.abs(g) > 1e-5).astype(jnp.uint32), axis=-1)
length = 10
dim = 1
num_heads = 1
input_shape = (1, length, dim)
inputs = random.normal(rng2, input_shape)
module = flax_attention.SelfAttentionAqt(
num_heads=num_heads,
hparams=self.construct_hparams(weight_prec),
quant_context=quant_config.QuantContext(
update_bounds=False, collect_acts_stats=False),
train=False,
paxis_name=None,
causal_mask=True,
kernel_init=initializers.ones,
dtype=jnp.float32,
qkv_features=None,
attention_axis=None,
dropout_rate=0.0,
deterministic=False,
decode=False)
initial_vars = module.init(
rng1, jnp.ones((1,) + (length, dim), jnp.float32), padding_mask=None)
# model = nn.Model(module, initial_params)
for i in range(length):
deps = get_receptive_field_1d(i)
assert (deps[:i] == 1).all(), ('Receptive Field Error: Some of the '
'previous positions are not reachable '
'in autoregressive self-attention.')
if i != length - 1:
k = i + 1
assert (deps[k:] == 0).all(), ('Receptive Field Error: Some of the '
'future positions are reachable in '
'autoregressive self-attention.')
def test_padding_mask(self):
"""Test that the activation stats respect masking."""
# This test's strategy is to change the value of a channels of a padding
# token and make sure the stats don't change. Because the attention
# calculation is fairly involved, this is more robust and less tedious than
# trying to directly test numeric expected values.
# Construct HParams with dynamic bounds.
# Exact values don't matter, just need bounds to be dynamic so stats are
# collected.
bounds = get_bounds.GetBounds.Hyper(
initial_bound=0.0,
stddev_coeff=0.4,
absdev_coeff=0.6,
mix_coeff=0.4,
reset_stats=False,
granularity=quant_config.QuantGranularity.per_channel)
quant_act = flax_layers.QuantOps.ActHParams(
input_distribution=flax_layers.QuantOps.ActHParams.InputDistribution
.symmetric,
prec=8,
bounds=bounds,
half_shift=False)
attn_quant_act = flax_layers.QuantOps.ActHParams(
input_distribution=flax_layers.QuantOps.ActHParams.InputDistribution
.positive,
prec=8,
bounds=1.0,
half_shift=False)
dense_hparams = flax_layers.DenseAqt.HParams(
quant_type=flax_layers.QuantType.fake_quant,
weight_prec=8,
quant_act=quant_act,
weight_quant_granularity=quant_config.QuantGranularity.per_channel,
weight_half_shift=False)
dotproduct_attn_hparams = flax_attention.DotProductAttnHParams(
attn_act_q=quant_act,
attn_act_k=quant_act,
attn_act_v=quant_act,
attn_act_probs=attn_quant_act,
quant_type=QuantType.fake_quant,
softmax=SoftmaxHParams(None, None, None))
attn_hparams = flax_attention.MultiHeadDotProductAttentionAqt.HParams(
dense_kqv=dense_hparams,
dense_out=dense_hparams,
attn_acts=dotproduct_attn_hparams)
module = flax_attention.SelfAttentionAqt(
hparams=attn_hparams,
num_heads=2,
paxis_name=None,
train=True,
quant_context=quant_config.QuantContext(
update_bounds=True, collect_acts_stats=False),
dtype=jnp.float32,
qkv_features=None,
attention_axis=None,
causal_mask=False,
dropout_rate=0.0,
deterministic=False,
decode=False)
# Simulate an input of a batch size of 1 with two tokens, each with four
# features
x = onp.arange(8).astype(onp.float32).reshape((1, 2, 4))
initial_state = module.init(random.PRNGKey(0), x, padding_mask=None)
padding_mask = onp.full((1, 2, 1), True)
padding_mask[0, 1, 0] = False # Mask out the second token
_, state1 = module.apply(
initial_state, x, padding_mask=padding_mask, mutable=True)
# Now we adjust the input for the masked token and recompute the mean. It
# should be the same as before.
x[0, 1, 0] = 100
_, state2 = module.apply(
initial_state, x, padding_mask=padding_mask, mutable=True)
test_utils.assert_stats_are_equal(state1, state2)
# Now we adjust the input for an unmasked token and verify that the stats
# have changed.
x[0, 0, 0] = 200
_, state3 = module.apply(
initial_state, x, padding_mask=padding_mask, mutable=True)
test_utils.assert_stats_are_unequal(state1, state3)
class AttnActsMatmulQuantTest(parameterized.TestCase):
def construct_hparams(self, attn_act_q, attn_act_k, attn_act_probs,
attn_act_v):
dense = flax_layers.DenseAqt.HParams(
weight_prec=None,
quant_act=None,
quant_type=QuantType.fake_quant,
weight_quant_granularity=quant_config.QuantGranularity.per_channel,
weight_half_shift=False)
return flax_attention.MultiHeadDotProductAttentionAqt.HParams(
dense_kqv=dense,
dense_out=dense,
attn_acts=flax_attention.DotProductAttnHParams(
attn_act_q=attn_act_q,
attn_act_k=attn_act_k,
attn_act_probs=attn_act_probs,
attn_act_v=attn_act_v,
quant_type=QuantType.fake_quant,
softmax=SoftmaxHParams(None, None, None)))
@parameterized.named_parameters(
dict(
testcase_name='float',
attn_act_q=None,
attn_act_k=None,
attn_act_probs=None,
attn_act_v=None,
update_bounds=False,
paxis_name=None,
train=False),
dict(
testcase_name='quant_q',
attn_act_q=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution
.symmetric,
prec=8,
bounds=1,
half_shift=False),
attn_act_k=None,
attn_act_probs=None,
attn_act_v=None,
update_bounds=False,
paxis_name='batch',
train=True),
dict(
testcase_name='quant_qk',
attn_act_q=None,
attn_act_k=None,
attn_act_probs=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution
.symmetric,
prec=8,
bounds=1.0,
half_shift=False),
attn_act_v=None,
update_bounds=False,
paxis_name='batch',
train=True),
dict(
testcase_name='quant_k',
attn_act_q=None,
attn_act_k=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution
.symmetric,
prec=4,
bounds=2,
half_shift=False),
attn_act_probs=None,
attn_act_v=None,
update_bounds=False,
paxis_name=None,
train=True),
dict(
testcase_name='quant_v',
attn_act_q=None,
attn_act_k=None,
attn_act_probs=None,
attn_act_v=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution
.symmetric,
prec=2,
bounds=3,
half_shift=False),
update_bounds=True,
paxis_name='batch',
train=False),
dict(
testcase_name='quant_all_aa',
attn_act_q=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution
.symmetric,
prec=8,
bounds=1,
half_shift=False),
attn_act_k=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution
.symmetric,
prec=4,
bounds=2,
half_shift=False),
attn_act_probs=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution
.symmetric,
prec=8,
bounds=1.0,
half_shift=False),
attn_act_v=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution
.symmetric,
prec=2,
bounds=3,
half_shift=False),
update_bounds=True,
paxis_name=None,
train=True),
)
@unittest.mock.patch.object(QuantOps, 'create_inputs_fake_quant')
def test_self_attention_act_quant_should_call_quant_ops(
self, mock_inputs_fake_quant, attn_act_q, attn_act_k, attn_act_probs,
attn_act_v, update_bounds, paxis_name, train):
mock_inputs_fake_quant.side_effect = (
lambda inputs, hparams, get_bounds_params: inputs)
rng = random.PRNGKey(0)
x = jnp.ones((4, 3, 7))
hparams = self.construct_hparams(attn_act_q, attn_act_k, attn_act_probs,
attn_act_v)
sa_module = flax_attention.SelfAttentionAqt(
hparams=hparams,
num_heads=4,
quant_context=quant_config.QuantContext(
update_bounds=update_bounds, collect_acts_stats=False),
train=train,
paxis_name=paxis_name,
attention_axis=None,
qkv_features=8,
kernel_init=initializers.ones,
bias_init=initializers.zeros,
causal_mask=False,
dtype=jnp.float32,
dropout_rate=0.0,
deterministic=False,
decode=False)
sa_module.init(rng, x, padding_mask=None)
calls = []
for hparam in [attn_act_q, attn_act_k, attn_act_probs, attn_act_v]:
if hparam is not None:
calls.append(
unittest.mock.call(
unittest.mock.ANY,
hparams=hparam,
get_bounds_params=get_bounds.GetBounds.Params(
update_stats=train,
update_bounds=update_bounds,
paxis_name=paxis_name,
mask=unittest.mock.ANY,
module_name=unittest.mock.ANY)))
mock_inputs_fake_quant.assert_has_calls(calls, any_order=True)
self.assertLen(calls, mock_inputs_fake_quant.call_count)
if __name__ == '__main__':
absltest.main()
| |
"""
Useful form fields for use with SQLAlchemy ORM.
"""
import operator
from wtforms.fields import SelectFieldBase, StringField
from wtforms.validators import ValidationError
try:
from wtforms.fields import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from .tools import get_primary_key
from flask_admin._compat import text_type, string_types, iteritems
from flask_admin.contrib.sqla.widgets import CheckboxListInput
from flask_admin.form import FormOpts, BaseForm, Select2Widget
from flask_admin.model.fields import InlineFieldList, InlineModelFormField
from flask_admin.babel import lazy_gettext
try:
from sqlalchemy.orm.util import identity_key
has_identity_key = True
except ImportError:
has_identity_key = False
class QuerySelectField(SelectFieldBase):
"""
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = Select2Widget()
def __init__(self, label=None, validators=None, query_factory=None,
get_pk=None, get_label=None, allow_blank=False,
blank_text=u'', **kwargs):
super(QuerySelectField, self).__init__(label, validators, **kwargs)
self.query_factory = query_factory
if get_pk is None:
if not has_identity_key:
raise Exception(u'The sqlalchemy identity_key function could not be imported.')
self.get_pk = get_pk_from_identity
else:
self.get_pk = get_pk
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self.query = None
self._object_list = None
def _get_data(self):
if self._formdata is not None:
for pk, obj in self._get_object_list():
if pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _get_object_list(self):
if self._object_list is None:
query = self.query or self.query_factory()
get_pk = self.get_pk
self._object_list = [(text_type(get_pk(obj)), obj) for obj in query]
return self._object_list
def iter_choices(self):
if self.allow_blank:
yield (u'__None', self.blank_text, self.data is None)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == u'__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for pk, obj in self._get_object_list():
if self.data == obj:
break
else:
raise ValidationError(self.gettext(u'Not a valid choice'))
class QuerySelectMultipleField(QuerySelectField):
"""
Very similar to QuerySelectField with the difference that this will
display a multiple select. The data property will hold a list with ORM
model instances and will be an empty list when no value is selected.
If any of the items in the data list or submitted form data cannot be
found in the query, this will result in a validation error.
"""
widget = Select2Widget(multiple=True)
def __init__(self, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(QuerySelectMultipleField, self).__init__(label, validators, default=default, **kwargs)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata is not None:
data = []
for pk, obj in self._get_object_list():
if not formdata:
break
elif pk in formdata:
formdata.remove(pk)
data.append(obj)
if formdata:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj in self.data)
def process_formdata(self, valuelist):
self._formdata = set(valuelist)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext(u'Not a valid choice'))
elif self.data:
obj_list = list(x[1] for x in self._get_object_list())
for v in self.data:
if v not in obj_list:
raise ValidationError(self.gettext(u'Not a valid choice'))
class CheckboxListField(QuerySelectMultipleField):
"""
Alternative field for many-to-many relationships.
Can be used instead of `QuerySelectMultipleField`.
Appears as the list of checkboxes.
Example::
class MyView(ModelView):
form_columns = (
'languages',
)
form_args = {
'languages': {
'query_factory': Language.query,
},
}
form_overrides = {
'languages': CheckboxListField,
}
"""
widget = CheckboxListInput()
class HstoreForm(BaseForm):
""" Form used in InlineFormField/InlineHstoreList for HSTORE columns """
key = StringField(lazy_gettext('Key'))
value = StringField(lazy_gettext('Value'))
class KeyValue(object):
""" Used by InlineHstoreList to simulate a key and a value field instead of
the single HSTORE column. """
def __init__(self, key=None, value=None):
self.key = key
self.value = value
class InlineHstoreList(InlineFieldList):
""" Version of InlineFieldList for use with Postgres HSTORE columns """
def process(self, formdata, data=unset_value):
""" SQLAlchemy returns a dict for HSTORE columns, but WTForms cannot
process a dict. This overrides `process` to convert the dict
returned by SQLAlchemy to a list of classes before processing. """
if isinstance(data, dict):
data = [KeyValue(k, v) for k, v in iteritems(data)]
super(InlineHstoreList, self).process(formdata, data)
def populate_obj(self, obj, name):
""" Combines each FormField key/value into a dictionary for storage """
_fake = type(str('_fake'), (object, ), {})
output = {}
for form_field in self.entries:
if not self.should_delete(form_field):
fake_obj = _fake()
fake_obj.data = KeyValue()
form_field.populate_obj(fake_obj, 'data')
output[fake_obj.data.key] = fake_obj.data.value
setattr(obj, name, output)
class InlineModelFormList(InlineFieldList):
"""
Customized inline model form list field.
"""
form_field_type = InlineModelFormField
"""
Form field type. Override to use custom field for each inline form
"""
def __init__(self, form, session, model, prop, inline_view, **kwargs):
"""
Default constructor.
:param form:
Form for the related model
:param session:
SQLAlchemy session
:param model:
Related model
:param prop:
Related property name
:param inline_view:
Inline view
"""
self.form = form
self.session = session
self.model = model
self.prop = prop
self.inline_view = inline_view
self._pk = get_primary_key(model)
# Generate inline form field
form_opts = FormOpts(widget_args=getattr(inline_view, 'form_widget_args', None),
form_rules=inline_view._form_rules)
form_field = self.form_field_type(form, self._pk, form_opts=form_opts)
super(InlineModelFormList, self).__init__(form_field, **kwargs)
def display_row_controls(self, field):
return field.get_pk() is not None
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
if values is None:
return
# Create primary key map
pk_map = dict((get_obj_pk(v, self._pk), v) for v in values)
# Handle request data
for field in self.entries:
field_id = get_field_id(field)
is_created = field_id not in pk_map
if not is_created:
model = pk_map[field_id]
if self.should_delete(field):
self.session.delete(model)
continue
else:
model = self.model()
values.append(model)
field.populate_obj(model, None)
self.inline_view._on_model_change(field, model, is_created)
class InlineModelOneToOneField(InlineModelFormField):
def __init__(self, form, session, model, prop, inline_view, **kwargs):
self.form = form
self.session = session
self.model = model
self.prop = prop
self.inline_view = inline_view
self._pk = get_primary_key(model)
# Generate inline form field
form_opts = FormOpts(
widget_args=getattr(inline_view, 'form_widget_args', None),
form_rules=inline_view._form_rules
)
super().__init__(form, self._pk, form_opts=form_opts, **kwargs)
@staticmethod
def _looks_empty(field):
"""
Check while installed fields is not null
"""
if field is None:
return True
if isinstance(field, str) and not field:
return True
return False
def populate_obj(self, model, field_name):
inline_model = getattr(model, field_name, None)
is_created = False
form_is_empty = True
if not inline_model:
is_created = True
inline_model = self.model()
# iterate all inline form fields and fill model
for name, field in iteritems(self.form._fields):
if name != self._pk:
field.populate_obj(inline_model, name)
if form_is_empty and not self._looks_empty(field.data):
form_is_empty = False
# don't create inline model if perhaps one field was not filled
if form_is_empty:
return
# set for our model updated inline model
setattr(model, field_name, inline_model)
# save results
self.inline_view.on_model_change(self.form, model, is_created)
def get_pk_from_identity(obj):
# TODO: Remove me
key = identity_key(instance=obj)[1]
return u':'.join(text_type(x) for x in key)
def get_obj_pk(obj, pk):
"""
get and format pk from obj
:rtype: text_type
"""
if isinstance(pk, tuple):
return tuple(text_type(getattr(obj, k)) for k in pk)
return text_type(getattr(obj, pk))
def get_field_id(field):
"""
get and format id from field
:rtype: text_type
"""
field_id = field.get_pk()
if isinstance(field_id, tuple):
return tuple(text_type(_) for _ in field_id)
return text_type(field_id)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for batch_norm related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import gen_nn_ops
class BatchNormalizationTest(tf.test.TestCase):
def _npBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) / np.sqrt(v + epsilon)
y = y * gamma if scale_after_normalization else y
return y + beta if shift_after_normalization else y
def _opsBatchNorm(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
y = (x - m) * tf.rsqrt(v + epsilon)
if scale_after_normalization:
y = gamma * y
return y + beta if shift_after_normalization else y
def _tfBatchNormV1(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Original implementation."""
# _batch_norm_with_global_normalization is deprecated in v9
tf.get_default_graph().graph_def_versions.producer = 8
# pylint: disable=protected-access
return gen_nn_ops._batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
# pylint: enable=protected-access
def _tfBatchNormV1BW(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization):
"""Re-implementation of the original kernel for backward compatibility."""
return tf.nn.batch_norm_with_global_normalization(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
def _tfBatchNormV2(self, x, m, v, beta, gamma, epsilon,
scale_after_normalization, shift_after_normalization):
"""New implementation."""
return tf.nn.batch_normalization(
x, m, v, beta if shift_after_normalization else None,
gamma if scale_after_normalization else None, epsilon)
def testBatchNorm(self):
x_shape = [3, 5, 4, 2]
param_shape = [2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn2 = self._tfBatchNormV2(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
bn1bw = self._tfBatchNormV1BW(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
bn1 = self._tfBatchNormV1(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
on = self._opsBatchNorm(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
np_bn = self._npBatchNorm(
x_val, m_val, v_val, beta_val, gamma_val, epsilon,
scale_after_normalization, shift_after_normalization)
tf_bn_v2, tf_bn_v1bw, tf_bn_v1, ops_bn = sess.run(
[bn2, bn1bw, bn1, on])
self.assertAllClose(np_bn, ops_bn, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v2, atol=0.00001)
self.assertAllClose(tf_bn_v2, ops_bn, atol=0.00001)
# shift_after_normalization=False is not supported in v1.
if shift_after_normalization:
self.assertAllClose(np_bn, tf_bn_v1bw, atol=0.00001)
self.assertAllClose(np_bn, tf_bn_v1, atol=0.00001)
self.assertAllClose(tf_bn_v1, ops_bn, atol=0.00001)
self.assertAllClose(tf_bn_v1bw, ops_bn, atol=0.00001)
def _testBatchNormGradient(self, param_index, tag, scale_after_normalization,
shift_after_normalization, version,
err_tolerance=1e-11):
x_shape = [3, 5, 4, 5]
param_shape = [5]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
m_val = np.random.random_sample(param_shape).astype(np.float64)
v_val = np.random.random_sample(param_shape).astype(np.float64)
beta_val = np.random.random_sample(param_shape).astype(np.float64)
gamma_val = np.random.random_sample(param_shape).astype(np.float64)
with self.test_session():
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
epsilon = 0.001
if version == 1:
output = self._tfBatchNormV1(
x, m, v, beta, gamma, epsilon, scale_after_normalization)
elif version == 2:
output = self._tfBatchNormV2(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
else:
print("Invalid version", version)
raise ValueError()
all_params = [x, m, v, beta, gamma]
all_shapes = [x_shape, param_shape, param_shape, param_shape, param_shape]
err = tf.test.compute_gradient_error(
all_params[param_index], all_shapes[param_index], output, x_shape)
print("Batch normalization v%d %s gradient %s scale and %s shift err = " %
(version, tag, "with" if scale_after_normalization else "without",
"with" if shift_after_normalization else "without"),
err)
self.assertLess(err, err_tolerance)
def _testBatchNormGradientInAllNeedConfigs(
self, param_index, tag, err_tolerance=1e-11):
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
# shift_after_normalization=False is not supported in version 1.
for v in ([1, 2] if shift_after_normalization else [2]):
self._testBatchNormGradient(
param_index, tag, scale_after_normalization,
shift_after_normalization, v, err_tolerance)
def testBatchNormInputGradient(self):
self._testBatchNormGradientInAllNeedConfigs(0, "x")
def testBatchNormMeanGradient(self):
self._testBatchNormGradientInAllNeedConfigs(1, "mean")
def testBatchNormVarianceGradient(self):
self._testBatchNormGradientInAllNeedConfigs(2, "variance",
err_tolerance=1e-03)
def testBatchNormBetaGradient(self):
# Since beta does not exist when scale_after_normalization=False, we only
# test for scale_after_normalization=True.
for scale_after_normalization in [True, False]:
for v in [1, 2]:
self._testBatchNormGradient(3, "beta", scale_after_normalization, True,
v)
def testBatchNormGammaGradient(self):
# If scale_after_normalization is False, backprop for gamma in v1
# will be 0. In version 2 of the API, if scale_after_normalization is False,
# gamma is not used at all, and the gradient is None, which displeases the
# gradient checker.
for scale_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", scale_after_normalization, True,
1)
for shift_after_normalization in [True, False]:
self._testBatchNormGradient(4, "gamma", True, shift_after_normalization,
2)
def testBatchNormGradImpl(self):
x_shape = [7, 5, 4, 6]
param_shape = [6]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
backprop_val = np.random.random_sample(x_shape).astype(np.float32)
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
backprop = tf.constant(backprop_val, name="backprop")
epsilon = 0.001
for scale_after_normalization in [True, False]:
# _batch_norm_with_global_normalization_grad is deprecated in v9
tf.get_default_graph().graph_def_versions.producer = 8
grad = gen_nn_ops._batch_norm_with_global_normalization_grad(
x, m, v, gamma, backprop, epsilon, scale_after_normalization)
dx, dm, dv, db, dg = grad
self.assertEqual(grad.dx, dx)
self.assertEqual(grad.dm, dm)
self.assertEqual(grad.dv, dv)
self.assertEqual(grad.db, db)
self.assertEqual(grad.dg, dg)
on = self._opsBatchNorm(
x, m, v, beta, gamma, epsilon, scale_after_normalization, True)
odx, odm, odv, odb, odg = tf.gradients(
[on], [x, m, v, beta, gamma], [backprop])
if scale_after_normalization:
all_grads = sess.run([dx, dm, dv, db, dg, odx, odm, odv, odb, odg])
to_check = ["dx", "dm", "dv", "db", "dg"]
else:
all_grads = sess.run([dx, dm, dv, db, odx, odm, odv, odb])
to_check = ["dx", "dm", "dv", "db"]
for i, _ in enumerate(to_check):
self.assertAllClose(
all_grads[i + len(to_check)], all_grads[i], atol=0.000001)
def testBatchNormKeepDims(self):
"""Test for tf.nn.moments(..., keep_dims=True / False).
Make sure that parameters with shape (1, 1, 1, depth) yield the same
result as parameters with shape (depth)
"""
x_shape = (3, 5, 4, 2)
param_shape = (2)
keep_dims_param_shape = (1, 1, 1, 2)
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
keep_dims_m = tf.reshape(m, keep_dims_param_shape, name="keep_dims_m")
keep_dims_v = tf.reshape(v, keep_dims_param_shape, name="keep_dims_v")
keep_dims_beta = tf.reshape(
beta, keep_dims_param_shape, name="keep_dims_beta")
keep_dims_gamma = tf.reshape(
gamma, keep_dims_param_shape, name="keep_dims_gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
keep_dims_bn = self._tfBatchNormV2(
x, keep_dims_m, keep_dims_v, keep_dims_beta,
keep_dims_gamma, epsilon, scale_after_normalization,
shift_after_normalization)
tf_batch_norm, keep_dims_tf_batch_norm = sess.run(
[bn, keep_dims_bn])
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertEquals(x_shape, keep_dims_tf_batch_norm.shape)
self.assertAllClose(
tf_batch_norm, keep_dims_tf_batch_norm, atol=0.000001)
def _testBatchNormArbitraryShapes(self, x_shape, param_shape, atol=0.0001):
x_val = np.random.random_sample(x_shape).astype(np.float32)
m_val = np.random.random_sample(param_shape).astype(np.float32)
v_val = np.random.random_sample(param_shape).astype(np.float32)
beta_val = np.random.random_sample(param_shape).astype(np.float32)
gamma_val = np.random.random_sample(param_shape).astype(np.float32)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = tf.constant(x_val, name="x")
m = tf.constant(m_val, name="m")
v = tf.constant(v_val, name="v")
beta = tf.constant(beta_val, name="beta")
gamma = tf.constant(gamma_val, name="gamma")
epsilon = 0.001
for scale_after_normalization in [True, False]:
for shift_after_normalization in [True, False]:
bn = self._tfBatchNormV2(
x, m, v, beta, gamma, epsilon, scale_after_normalization,
shift_after_normalization)
np_batch_norm = self._npBatchNorm(
x_val, m_val, v_val, beta_val, gamma_val, epsilon,
scale_after_normalization, shift_after_normalization)
[tf_batch_norm] = sess.run([bn])
self.assertEquals(x_shape, np_batch_norm.shape)
self.assertEquals(x_shape, tf_batch_norm.shape)
self.assertAllClose(np_batch_norm, tf_batch_norm, atol=atol)
def testBatchNormArbitraryShapes(self):
"""Test for a variety of shapes and moments.
Batch normalization is expected to work regardless of the position and
dimensionality of the 'depth' axis/axes.
"""
self._testBatchNormArbitraryShapes((3, 3), (1, 3))
self._testBatchNormArbitraryShapes((3, 3), (3, 1))
self._testBatchNormArbitraryShapes((3, 2, 4, 5), (1, 2, 1, 1))
self._testBatchNormArbitraryShapes((2, 3, 2, 4, 5), (1, 1, 1, 4, 5),
atol=0.005)
class SufficientStatisticsTest(tf.test.TestCase):
def _npSuffStats(self, x, axes, shift, keep_dims):
axis = tuple(axes)
if shift is not None:
m_ss = np.sum(x - shift, axis=axis, keepdims=keep_dims)
v_ss = np.sum((x - shift) * (x - shift), axis=axis, keepdims=keep_dims)
else:
m_ss = np.sum(x, axis=axis, keepdims=keep_dims)
v_ss = np.sum(x * x, axis=axis, keepdims=keep_dims)
count = 1.0
for d in xrange(x.ndim):
if d in set(axes):
count *= x.shape[d]
if not keep_dims:
shift = np.squeeze(shift, axis=axis)
return count, m_ss, v_ss, shift
def _opSuffStats(self, x, axes, shift, keep_dims):
return tf.nn.sufficient_statistics(x, axes, shift, keep_dims)
def _testSuffStats(self, x_shape, axes, shift, keep_dims, has_shape):
x_val = np.random.random_sample(x_shape).astype(np.float32)
np_c, np_m, np_v, np_s = self._npSuffStats(x_val, axes, shift, keep_dims)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
if has_shape:
x = tf.constant(x_val, name="x")
x.set_shape(x_shape)
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run([op_c, op_m, op_v, op_s])
else:
tf_c, tf_m, tf_v = sess.run([op_c, op_m, op_v])
else:
x = tf.placeholder(dtype=tf.float32,
shape=[None] * len(x_shape),
name="x")
op_c, op_m, op_v, op_s = self._opSuffStats(x, axes, shift, keep_dims)
if shift:
tf_c, tf_m, tf_v, tf_s = sess.run(
[op_c, op_m, op_v, op_s],
feed_dict={x: x_val})
else:
tf_c, tf_m, tf_v = sess.run(
[op_c, op_m, op_v],
feed_dict={x: x_val})
self.assertAllClose(np_c, tf_c, atol=0.000001)
self.assertAllClose(np_m, tf_m, atol=0.000001)
self.assertAllClose(np_v, tf_v, atol=0.000001)
if shift:
self.assertAllClose(np_s, tf_s, atol=0.000001)
def testSuffStats(self):
for has_shape in [True, False]:
for keep_dims in [True, False]:
for shift in [None, 1.0]:
self._testSuffStats([2, 3], [1], shift, keep_dims, has_shape)
self._testSuffStats([2, 3], [0], shift, keep_dims, has_shape)
self._testSuffStats([1, 2, 3], [0, 2], shift, keep_dims, has_shape)
class NormalizeMomentsTest(tf.test.TestCase):
def _npNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
mean = mean_ss / counts
variance = variance_ss / counts - mean * mean
if shift is not None:
mean += shift
return mean, variance
def _opNormalizeMoments(self, counts, mean_ss, variance_ss, shift):
return tf.nn.normalize_moments(counts, mean_ss, variance_ss, shift)
def _testNormalizeMoments(self, shape, shift):
counts = np.ones([1]).astype(np.float32)
mean_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss = np.random.random_sample(shape).astype(np.float32)
variance_ss *= variance_ss
if shift:
shift_v = np.random.random_sample(shape).astype(np.float32)
else:
shift_v = None
npm, npv = self._npNormalizeMoments(counts, mean_ss, variance_ss, shift_v)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
tf_counts = tf.constant(counts, name="counts")
tf_mean_ss = tf.constant(mean_ss, name="mean_ss")
tf_variance_ss = tf.constant(variance_ss, name="variance_ss")
if shift:
tf_shift_v = tf.constant(shift_v, name="shift")
else:
tf_shift_v = None
opm, opv = self._opNormalizeMoments(tf_counts, tf_mean_ss,
tf_variance_ss, tf_shift_v)
tfm, tfv = sess.run([opm, opv])
self.assertAllClose(npm, tfm, atol=0.000001)
self.assertAllClose(npv, tfv, atol=0.000001)
def testNormalizeMoments(self):
for shift in [None, 4.0]:
self._testNormalizeMoments([3], shift)
self._testNormalizeMoments([2, 3], shift)
class MomentsTest(tf.test.TestCase):
def _unweighted_moments(self, x, axes, keep_dims=False, extra_out_grads=None):
# Method to compute moments of `x` wrt `axes`.
#
# This is exposed so WeightedMomentsTest can inherit the tests and
# assertions from MomentsTest; the extra_out_grads argument allows
# its inherited gradient tests to assert gradients against the
# weights as well as the input values.
return tf.nn.moments(x, axes, keep_dims=keep_dims)
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims, dtype):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = tf.placeholder(dtype, shape=[None] * len(shape))
mean, var = self._unweighted_moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(
x_numpy, axis=ax, keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(
np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllCloseAccordingToType(expected_mean,
mean.eval(feed_dict={x: x_numpy}))
self.assertAllCloseAccordingToType(expected_variance,
var.eval(feed_dict={x: x_numpy}))
def RunMomentTest(self, shape, axes, keep_dims, dtype):
with self.test_session():
# shape = [batch, width, height, depth]
assert len(shape) == 4
x_numpy = np.random.normal(size=shape).astype(np.float32)
x = tf.cast(tf.constant(x_numpy), dtype=dtype)
# Compute the expected values at high precision since the method
# is prone to catastrophic cancellation:
x_numpy = x_numpy.astype(np.float128)
mean, var = self._unweighted_moments(x, axes, keep_dims=keep_dims)
num_elements = np.prod([shape[i] for i in axes])
ax = tuple(axes)
expected_mean = np.sum(
x_numpy, axis=ax, keepdims=keep_dims) / num_elements
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = np.sum(
np.multiply(x_numpy, x_numpy),
axis=ax,
keepdims=keep_dims) / num_elements
expected_variance = expected_x_squared - expected_mean_squared
# Check that the moments are correct.
self.assertAllCloseAccordingToType(expected_mean, mean.eval())
self.assertAllCloseAccordingToType(expected_variance, var.eval())
def testBasic(self):
for keep_dims in [False, True]:
for dtype in [tf.float32, tf.float16]:
self.RunMomentTest(shape=[2, 3, 5, 4],
axes=[0],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(shape=[2, 3, 5, 4],
axes=[0],
keep_dims=keep_dims,
dtype=dtype)
def testGlobalNormalization(self):
for keep_dims in [False, True]:
for dtype in [tf.float32, tf.float16]:
self.RunMomentTest(shape=[2, 3, 5, 4],
axes=[0, 1, 2],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(shape=[2, 3, 5, 4],
axes=[0, 1, 2],
keep_dims=keep_dims,
dtype=dtype)
def testAxes(self):
for keep_dims in [False, True]:
for dtype in [tf.float32, tf.float16]:
self.RunMomentTest(shape=[2, 3, 5, 4],
axes=[1, 2, 3],
keep_dims=keep_dims,
dtype=dtype)
self.RunMomentTestWithDynamicShape(shape=[2, 3, 5, 4],
axes=[1, 2, 3],
keep_dims=keep_dims,
dtype=dtype)
def _testGlobalGradient(self, from_y="mean"):
with self.test_session():
x_shape = [3, 5, 4, 2]
x_val = np.random.random_sample(x_shape).astype(np.float64)
x = tf.constant(x_val)
x.set_shape(x_shape)
axes = [0, 1, 2]
y_shape = [2] # Depth of x
inputs_to_compute_gradients_for = [x]
out_mean, out_var = self._unweighted_moments(
x, axes, extra_out_grads=inputs_to_compute_gradients_for)
if from_y == "mean":
y = out_mean
elif from_y == "var":
y = out_var
for (i, v) in enumerate(inputs_to_compute_gradients_for):
err = tf.test.compute_gradient_error(v, v.get_shape().as_list(),
y, y_shape)
print("Moments %s gradient err vs input %d = %g" % (from_y, i, err))
self.assertLess(err, 1e-11)
def testMeanGlobalGradient(self):
self._testGlobalGradient(from_y="mean")
def testVarGlobalGradient(self):
self._testGlobalGradient(from_y="var")
class WeightedMomentsTest(MomentsTest):
"""Tests for nn.weighted_moments.
Note that this test inherits from MomentsTest, inheriting all its
test methods!
It modifies MomentsTest in two ways:
a) By overriding _unweighted_moments, all the codepaths in
MomentsTest are executed, but with calls to tf.nn.moments()
replaced by calls to tf.nn.weighted_moments() with a constant
weight of 1.
b) By overriding RunMomentTest and RunMomentTestWithDynamicShape,
this test adds multiple additional calls to
RunWeightedMomentsTest() to exercise correctness with
non-constant weights and varying broadcasting situations. (It
also continues to call MomentsTest.Run(Weighted)?MomentsTest as
well.)
"""
def _unweighted_moments(self, x, axes, keep_dims=False, extra_out_grads=None):
weights = tf.constant(1, dtype=x.dtype)
if extra_out_grads is not None:
# We want to assert gradients WRT weights as well as X!
extra_out_grads.append(weights)
return tf.nn.weighted_moments(
x, axes, weights, keep_dims=keep_dims)
def RunMomentTest(self, shape, axes, keep_dims, dtype, dynshapes=False):
if not dynshapes:
super(WeightedMomentsTest, self).RunMomentTest(
shape, axes, keep_dims, dtype)
else:
super(WeightedMomentsTest, self).RunMomentTestWithDynamicShape(
shape, axes, keep_dims, dtype)
# 1:1 weights and inputs
self.RunWeightedMomentTest(shape, shape, axes, keep_dims, dtype)
# Various broadcasting combinations
for idx in range(len(shape)):
# try broadcasting weights in all positions
weight_shape = [1] * len(shape)
weight_shape[idx] = shape[idx]
self.RunWeightedMomentTest(shape, weight_shape, axes, keep_dims, dtype)
# Also try broadcasting with a suffix of length n
weight_shape = shape[-(idx+1):]
self.RunWeightedMomentTest(
shape, weight_shape, axes, keep_dims, dtype, dynshapes=dynshapes)
def RunMomentTestWithDynamicShape(self, shape, axes, keep_dims, dtype):
self.RunMomentTest(shape, axes, keep_dims, dtype, dynshapes=True)
def RunWeightedMomentTest(
self, shape, weights_shape, axes, keep_dims, dtype, dynshapes=False):
with self.test_session() as s:
x_numpy = np.random.normal(size=shape).astype(np.float32)
weights_numpy = np.absolute( # weights must be positive
np.random.normal(size=weights_shape, loc=1.0).astype(np.float32))
# Expand the numpy version to higher precision
x_numpy = x_numpy.astype(np.float128)
weights_numpy = weights_numpy.astype(np.float128)
x_shape = [None] * len(shape) if dynshapes else shape
weights_shape = (
[None] * len(weights_shape) if dynshapes else weights_shape)
x = tf.placeholder(dtype, shape=x_shape)
weights = tf.placeholder(dtype, shape=weights_shape)
mean, var = tf.nn.weighted_moments(x, axes, weights, keep_dims=keep_dims)
ax = tuple(axes)
def _np_weighted_sum(v):
return np.sum(weights_numpy * v, axis=ax, keepdims=keep_dims)
weight_sum = _np_weighted_sum(np.ones_like(x_numpy))
expected_mean = _np_weighted_sum(x_numpy) / weight_sum
expected_mean_squared = np.multiply(expected_mean, expected_mean)
expected_x_squared = (
_np_weighted_sum(np.multiply(x_numpy, x_numpy)) / weight_sum)
expected_variance = expected_x_squared - expected_mean_squared
mean_v, var_v = s.run([mean, var],
feed_dict={x: x_numpy, weights: weights_numpy})
self.assertAllCloseAccordingToType(expected_mean, mean_v)
self.assertAllCloseAccordingToType(expected_variance, var_v)
if __name__ == "__main__":
tf.test.main()
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service import (
VideoIntelligenceServiceAsyncClient,
)
from google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service import (
VideoIntelligenceServiceClient,
)
from google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service import (
transports,
)
from google.cloud.videointelligence_v1p2beta1.types import video_intelligence
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import duration_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert VideoIntelligenceServiceClient._get_default_mtls_endpoint(None) is None
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
VideoIntelligenceServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class",
[VideoIntelligenceServiceClient, VideoIntelligenceServiceAsyncClient,],
)
def test_video_intelligence_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "videointelligence.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.VideoIntelligenceServiceGrpcTransport, "grpc"),
(transports.VideoIntelligenceServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_video_intelligence_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[VideoIntelligenceServiceClient, VideoIntelligenceServiceAsyncClient,],
)
def test_video_intelligence_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "videointelligence.googleapis.com:443"
def test_video_intelligence_service_client_get_transport_class():
transport = VideoIntelligenceServiceClient.get_transport_class()
available_transports = [
transports.VideoIntelligenceServiceGrpcTransport,
]
assert transport in available_transports
transport = VideoIntelligenceServiceClient.get_transport_class("grpc")
assert transport == transports.VideoIntelligenceServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
VideoIntelligenceServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceClient),
)
@mock.patch.object(
VideoIntelligenceServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceAsyncClient),
)
def test_video_intelligence_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(
VideoIntelligenceServiceClient, "get_transport_class"
) as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(
VideoIntelligenceServiceClient, "get_transport_class"
) as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
"true",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
"false",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
VideoIntelligenceServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceClient),
)
@mock.patch.object(
VideoIntelligenceServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_video_intelligence_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class",
[VideoIntelligenceServiceClient, VideoIntelligenceServiceAsyncClient],
)
@mock.patch.object(
VideoIntelligenceServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceClient),
)
@mock.patch.object(
VideoIntelligenceServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(VideoIntelligenceServiceAsyncClient),
)
def test_video_intelligence_service_client_get_mtls_endpoint_and_cert_source(
client_class,
):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_video_intelligence_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_video_intelligence_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_video_intelligence_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = VideoIntelligenceServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_video_intelligence_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"videointelligence.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="videointelligence.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type", [video_intelligence.AnnotateVideoRequest, dict,]
)
def test_annotate_video(request_type, transport: str = "grpc"):
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.annotate_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_annotate_video_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
client.annotate_video()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
@pytest.mark.asyncio
async def test_annotate_video_async(
transport: str = "grpc_asyncio",
request_type=video_intelligence.AnnotateVideoRequest,
):
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.annotate_video(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == video_intelligence.AnnotateVideoRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_annotate_video_async_from_dict():
await test_annotate_video_async(request_type=dict)
def test_annotate_video_flattened():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.annotate_video(
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].input_uri
mock_val = "input_uri_value"
assert arg == mock_val
arg = args[0].features
mock_val = [video_intelligence.Feature.LABEL_DETECTION]
assert arg == mock_val
def test_annotate_video_flattened_error():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.annotate_video(
video_intelligence.AnnotateVideoRequest(),
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
@pytest.mark.asyncio
async def test_annotate_video_flattened_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.annotate_video), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.annotate_video(
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].input_uri
mock_val = "input_uri_value"
assert arg == mock_val
arg = args[0].features
mock_val = [video_intelligence.Feature.LABEL_DETECTION]
assert arg == mock_val
@pytest.mark.asyncio
async def test_annotate_video_flattened_error_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.annotate_video(
video_intelligence.AnnotateVideoRequest(),
input_uri="input_uri_value",
features=[video_intelligence.Feature.LABEL_DETECTION],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = VideoIntelligenceServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = VideoIntelligenceServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.VideoIntelligenceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.VideoIntelligenceServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport, transports.VideoIntelligenceServiceGrpcTransport,
)
def test_video_intelligence_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.VideoIntelligenceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_video_intelligence_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.VideoIntelligenceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("annotate_video",)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_video_intelligence_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VideoIntelligenceServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_video_intelligence_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.videointelligence_v1p2beta1.services.video_intelligence_service.transports.VideoIntelligenceServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.VideoIntelligenceServiceTransport()
adc.assert_called_once()
def test_video_intelligence_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
VideoIntelligenceServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.VideoIntelligenceServiceGrpcTransport, grpc_helpers),
(transports.VideoIntelligenceServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_video_intelligence_service_transport_create_channel(
transport_class, grpc_helpers
):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"videointelligence.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="videointelligence.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_grpc_transport_client_cert_source_for_mtls(
transport_class,
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_video_intelligence_service_host_no_port():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="videointelligence.googleapis.com"
),
)
assert client.transport._host == "videointelligence.googleapis.com:443"
def test_video_intelligence_service_host_with_port():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="videointelligence.googleapis.com:8000"
),
)
assert client.transport._host == "videointelligence.googleapis.com:8000"
def test_video_intelligence_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VideoIntelligenceServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_video_intelligence_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.VideoIntelligenceServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.VideoIntelligenceServiceGrpcTransport,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
],
)
def test_video_intelligence_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_video_intelligence_service_grpc_lro_client():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_video_intelligence_service_grpc_lro_async_client():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = VideoIntelligenceServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = VideoIntelligenceServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = VideoIntelligenceServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = VideoIntelligenceServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = VideoIntelligenceServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = VideoIntelligenceServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = VideoIntelligenceServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = VideoIntelligenceServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = VideoIntelligenceServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = VideoIntelligenceServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = VideoIntelligenceServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.VideoIntelligenceServiceTransport, "_prep_wrapped_messages"
) as prep:
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.VideoIntelligenceServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = VideoIntelligenceServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = VideoIntelligenceServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = VideoIntelligenceServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(
VideoIntelligenceServiceClient,
transports.VideoIntelligenceServiceGrpcTransport,
),
(
VideoIntelligenceServiceAsyncClient,
transports.VideoIntelligenceServiceGrpcAsyncIOTransport,
),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| |
# -*- coding: utf-8 -*-
"""Store messages from an AMQP server into a SQL database."""
import argparse
import logging
import os
import sys
import threading
import time
import traceback
from pprint import pformat
import six
import yaml
from typing import ( # noqa
Any,
Dict,
List,
)
from rabbithole.amqp import Consumer
from rabbithole.sql import Database
from rabbithole.batcher import Batcher
LOGGER = logging.getLogger(__name__)
BLOCK_CLASSES = {
'amqp': Consumer,
'sql': Database,
}
def main(argv=None):
# type: (List[str]) -> int
"""Console script for rabbithole.
:param argv: Command line arguments
:type argv: list(str)
"""
if argv is None:
argv = sys.argv[1:]
args = parse_arguments(argv)
config = args['config']
configure_logging(args['log_level'], args['log_file'])
logging.debug('Configuration:\n%s', pformat(config))
namespace = {
block['name']: create_block_instance(block)
for block in config['blocks']
}
batcher_config = {
'size_limit': config.get('size_limit'),
'time_limit': config.get('time_limit'),
}
for flow in config['flows']:
create_flow(flow, namespace, batcher_config)
run_input_blocks(namespace)
try:
# Loop needed to be able to catch KeyboardInterrupt
while True:
time.sleep(1)
except KeyboardInterrupt:
LOGGER.info('Interrupted by user')
return 0
def create_block_instance(block):
# type: (Dict[str, Any]) -> object
"""Create block instance from its configuration.
:param block: Block configuration
:type block: dict(str)
:return: Block instance
:rtype: instance
"""
LOGGER.info('Creating %r block instance...', block['type'])
LOGGER.debug(
'%r block instance arguments: (args: %s, kwargs: %s)',
block['type'],
block.get('args'),
block.get('kwargs'),
)
block_class = BLOCK_CLASSES[block['type']]
try:
block_instance = block_class(
*block.get('args', []),
**block.get('kwargs', {})
)
except Exception: # pylint:disable=broad-except
LOGGER.error(traceback.format_exc())
LOGGER.error(
'Unable to create %r block: (type: %r, args: %r, kwargs: %r)',
block['name'],
block['type'],
block.get('args', []),
block.get('kwargs', {}),
)
sys.exit(1)
return block_instance
def create_flow(flow, namespace, batcher_config):
# type: (List[Dict[str, Any]], Dict[str, Any], Dict[str, int]) -> None
"""Create flow by connecting block signals.
:param flow: Flow configuration
:type flow: dict(str)
:param namespace: Block instances namespace
:type namespace: dict(str, instance)
:param batcher_config: Configuration to be passed to batcher objects
:type batcher_config: dict(str)
"""
input_block, output_block = flow
input_block_instance = namespace[input_block['name']]
try:
LOGGER.info('Getting %r input block signal...', input_block['name'])
LOGGER.debug(
'%r input block signal arguments: (args: %s, kwargs: %s)',
input_block['name'],
input_block.get('args'),
input_block.get('kwargs'),
)
input_signal = input_block_instance(
*input_block.get('args', []),
**input_block.get('kwargs', {})
)
except Exception: # pylint:disable=broad-except
LOGGER.error(traceback.format_exc())
LOGGER.error(
'Unable to get signal from %r block: (args: %r, kwargs: %r)',
input_block['name'],
input_block.get('args', []),
input_block.get('kwargs', {}),
)
sys.exit(1)
output_block_instance = namespace[output_block['name']]
try:
output_cb = output_block_instance(
*output_block.get('args', []),
**output_block.get('kwargs', {})
)
except Exception: # pylint:disable=broad-except
LOGGER.error(traceback.format_exc())
LOGGER.error(
'Unable to get callback from %r block: (args: %r, kwargs: %r)',
output_block['name'],
output_block.get('args', []),
output_block.get('kwargs', {}),
)
sys.exit(1)
batcher = Batcher(**batcher_config)
input_signal.connect(batcher.message_received_cb, weak=False)
batcher.batch_ready.connect(output_cb, weak=False)
def run_input_blocks(namespace):
# type: (Dict[str, object]) -> List[threading.Thread]
"""Run inputs blocks and start receiving messages from them.
:param namespace: Block instances namespace
:type namespace: dict(str, instance)
"""
threads = []
for block_name, block_instance in six.iteritems(namespace):
run_method = getattr(block_instance, 'run', None)
if run_method:
thread = threading.Thread(name=block_name, target=run_method)
thread.daemon = True
thread.start()
threads.append(thread)
return threads
def parse_arguments(argv):
# type: (List[str]) -> Dict[str, Any]
"""Parse command line arguments.
:param argv: Command line arguments
:type argv: list(str)
:returns: Parsed arguments
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser(description=__doc__)
def yaml_file(path):
# type: (str) -> str
"""Yaml file argument.
:param path: Path to the yaml file
:type path: str
"""
if not os.path.isfile(path):
raise argparse.ArgumentTypeError('File not found')
with open(path) as file_:
try:
data = yaml.load(file_)
except yaml.YAMLError:
raise argparse.ArgumentTypeError('YAML parsing error')
return data
parser.add_argument(
'config',
type=yaml_file,
help='Configuration file',
)
log_levels = ['debug', 'info', 'warning', 'error', 'critical']
parser.add_argument(
'-l', '--log-level',
dest='log_level',
choices=log_levels,
default='debug',
help=('Log level. One of {0} or {1} '
'(%(default)s by default)'
.format(', '.join(log_levels[:-1]), log_levels[-1])))
parser.add_argument(
'-f', '--log-file',
dest='log_file',
help='Path to log file',
)
args = vars(parser.parse_args(argv))
args['log_level'] = getattr(logging, args['log_level'].upper())
return args
def configure_logging(log_level, log_file):
# type: (int, str) -> None
"""Configure logging based on command line argument.
:param log_level: Log level passed form the command line
:type log_level: int
:param log_file: Path to log file
:type log_level: str
"""
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
# Log to sys.stderr using log level
# passed through command line
formatter = logging.Formatter(
'%(asctime)s %(threadName)s %(levelname)s: %(message)s')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(log_level)
root_logger.addHandler(stream_handler)
# Log to file if available
if log_file is not None:
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
file_handler.setLevel(log_level)
root_logger.addHandler(file_handler)
# Disable pika extra verbose logging
logging.getLogger('pika').setLevel(logging.WARNING)
if __name__ == "__main__":
sys.exit(main())
| |
"""
Tools to use with the simulation's syntetic spectra
"""
import numpy as np
from scipy import ndimage
import scipy.interpolate as interp
def psf_diffr(ang, wave=777, D=1., pix=True):
''' Returns the diffraction PSF (Airy disc) for a circular
aperture telescope. See R. Wilson, Reflecting Telescope Optics I,
pp. 287-289.
IN:
ang - angular coordinate in arcsecs, unless pix is True (then in
simulation pixels)
wave - wavelength (in nanometers)
D - telescope diameter (in meters)
pix - if true, then angular coordinate is read in simulation pixels
(assuming 50x50x82: 50 pixels are 6 Mm)
'''
from scipy import special as sp
a = ang * 1.
if pix:
solr = 696. # solar radius in Mm
sold = 959.5 # solar angular radius in arcsec
a *= 6 / 50. * sold / solr
# Put x in rad, then in normalized angle
a *= np.pi / 648000
a *= np.pi * D / (wave * 1e-9)
# Remove zeros, both array and non array cases
try:
if a == 0:
a = 1e-20
except ValueError:
a[np.where(a == 0)] = 1e-20
# Airy disk function
return (2 * sp.j1(a) / a)**2
def psf_atm(x, a=1, b=1.):
'''Atmospheric PSF, similar to the one used in Shelyag et al. (2003),
with influences from Nordlund (1984), Collados & Vazquez (1987).
IN:
x - angular coordinate
a - parameter defining the width of the distribution
b - height of the distribution
'''
# return b * a**3/(np.sqrt(x**2+a**2))**3 # initial function
return a / (x**2 + a**2) + b / (x**2 + b**2) # pure lorentzian
def psf_kernel(a, b=0.1, n=100, mu=1., phi=0., norm=True, threshold=1e-2,
minpts=11):
''' Returns a lorentzian shaped 2D centered matrix kernel
to use in convolutions.
IN:
a - lorentzian fwhm
n - size of the square matrix (should be even, so that the matrix
has n-1 even number, better for symmetry)
b - if different than zero, kernel won\'t conserve intensity
mu - to be used in simulations with different mu values
threshold - value from which smaller values are ignored in the psf
--Tiago, 20080130
'''
a = float(a)
b = float(b)
kernel = np.zeros((n - 1, n - 1))
kernel_atm = np.zeros((n - 1, n - 1))
kernel_dif = np.zeros((n - 1, n - 1))
for i in range(n - 1): # This is the axis where mu dist will occur
for j in range(n - 1):
# mu acts on the x axis, convention of phi=0 is along x axis
r = np.sqrt(mu * (i - n / 2 + 1)**2 + (j - n / 2 + 1)**2)
#kernel[i,j] = psf_diffr(r) + psf_atm(r,a,b)
# new way, separate both components and then convolve them
kernel_atm[i, j] = psf_atm(r, a, b)
kernel_dif[i, j] = psf_diffr(r)
#kernel = ndimage.convolve(kernel_atm,kernel_dif)
if norm:
kernel /= np.sum(kernel)
kernel_atm /= np.sum(kernel_atm)
kernel_dif /= np.sum(kernel_dif)
# if phi is nonzero, rotate the matrix:
if phi != 0 and mu != 1:
# phi in degrees
kernel = ndimage.rotate(kernel, phi, reshape=False)
kernel_atm = ndimage.rotate(kernel_atm, phi, reshape=False)
kernel_dif = ndimage.rotate(kernel_dif, phi, reshape=False)
# Select elements contributing less than 0.1% to the middle row integral
# first for kernel_atm
kernel_atm = psf_trim(kernel_atm, threshold, minpts=minpts)
# second for kernel_dif
# kernel_dif = psf_trim(kernel_dif,threshold,5) # not essential
# convolve with diffraction PSF
kernel = ndimage.convolve(kernel_atm, kernel_dif)
if norm:
kernel /= np.sum(kernel)
return kernel
def psf_trim(psf, threshold, minpts=11):
''' Trims PSF by removing elements contributing less than the threshold
fraction to the middle row integral. The minimum number of points for
the resulting psf is minpts.'''
n = psf.shape[0]
(stix, stiy) = (0, 0)
# find x cutoff value
uu = np.cumsum(psf[:, n // 2]) / np.sum(psf[:, n // 2])
if np.any(np.where(uu < threshold)):
stix = np.max(np.where(uu < threshold))
# leave at least minpts elements
if stix > n // 2 - minpts // 2:
stix = n // 2 - minpts // 2
# find y cutoff value
uu = np.cumsum(psf[n // 2, :]) / np.sum(psf[n // 2, :])
if np.any(np.where(uu < threshold)):
stiy = np.max(np.where(uu < threshold))
# leave at least minpts elements
if stiy > n // 2 - minpts // 2:
stiy = n // 2 - minpts // 2
return psf[stix:n - stix, stiy:n - stiy]
def gaussconv(spec, wave, resolution, fixed=False):
''' Convolves spectra with a gaussian, given a resolution
and wavelength array.
IN:
spec - spectrum array (can be 1D, 2D or nD as long as last dimension is wave)
wave - wavelength array
resolution - resolving power in dw/w
fixed - if true, will treat resolution as fixed FWHM (in wave units)
OUT:
convolved spectrum
--Tiago, 20080201
'''
ishp = spec.shape
if len(ishp) > 1:
# Make the spectrum a 2D array [spatial point,wave point]
a = ishp[0]
for i in range(1, len(ishp) - 1):
a *= ishp[i]
nspec = np.reshape(spec, (a, ishp[-1]))
else:
nspec = np.array([spec])
out = np.zeros(nspec.shape)
# mean wavelengh step
step = abs(np.mean(wave[1:] - wave[:-1]))
# note: this fwhm and sigma are in 'pixels' (or array units),
# hence the need to divide by step
if not fixed:
fwhm = np.mean(wave) / (resolution * step)
else: # use a fixed fwhm, given by resolution in wavelength units
fwhm = resolution / step
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
for n in range(nspec.shape[0]):
out[n] = ndimage.gaussian_filter1d(nspec[n], sigma)
if len(ishp) > 1:
out.shape = ishp
else:
out = out[0]
return out
def sincconv(spec, wavein, resolution, fine=True, lobes=100):
''' Convolves spectra with a sinc function, given a resolution
and wavelength array.
IN:
spec - spectrum array (can be 1D, 2D or 3D)
wave - wavelength array
resolution - resolving power in dw/w
fine - if True, will interpolate the spectrum to a finer wavelength grid,
in the end interpolating back to the original grid.
lobes - number of zeros of sinc. Must be even number!
At last 80 recommended.
OUT:
convolved spectrum
--Tiago, 20090127
'''
ishp = spec.shape
if len(ishp) > 1:
# Make the spectrum a 2D array [spatial point,wave point]
a = ishp[0]
for i in range(1, len(ishp) - 1):
a *= ishp[i]
nspec = np.reshape(spec, (a, ishp[-1]))
else:
nspec = np.array([spec])
out = np.zeros(nspec.shape)
if fine:
# interpolate to wavelength grid 5x higher than required resolution
res = np.mean(wavein) / resolution
wave = np.arange(wavein[0], wavein[-1], res / 5.)
nspec2 = np.zeros((nspec.shape[0], len(wave)))
for i in range(nspec.shape[0]):
nspec2[i] = interp.splev(wave, interp.splrep(
wavein, nspec[i], k=3, s=0), der=0)
nspec = nspec2
else:
wave = wavein.copy()
# mean wavelengh step
step = abs(np.mean(wave[1:] - wave[:-1]))
# note: this fwhm is in wavelength units! (later divided by step)
fwhm = np.mean(wave) / (resolution)
# Make sinc function out to 20th zero-crossing on either side. Error due to
# ignoring additional lobes is less than 0.2% of continuum. Reducing extent
# to 10th zero-crossing doubles maximum error.
hwhm = fwhm / 2. # half width at half maximum
# lobes = nr of zeros of sinc (radians)
xxrange = lobes * np.pi
nhalf = int(xxrange / np.pi * fwhm / step +
0.999) # nr. points in half sinc
nsinc = 2 * nhalf + 1 # nr. points in sinc (odd!)
wsinc = (np.arange(nsinc) - nhalf) * step # abcissa (wavelength)
xsinc = wsinc / (hwhm) * np.pi # abcissa (radians)
xsinc[nhalf] = 1.0 # avoid divide by zero
sinc = np.sin(xsinc) / xsi # calculate sinc
sinc[nhalf] = 1.0 # insert midpoint
sinc /= np.sum(sinc) # normalize sinc
# convolve
for n in range(nspec.shape[0]):
result = ndimage.convolve1d(nspec[n], sinc)
if fine: # interpolate back to original wave grid
out[n] = interp.splev(wavein, interp.splrep(
wave, result, k=3, s=0), der=0)
else:
out[n] = result
if len(ishp) > 1:
out.shape = ishp
else:
out = out[0]
return out
######################################
### INPUT FILE GENERATING PROGRAMS ###
######################################
def buildltein(line, mu, phi, sim='fsun201', nts=20, nta=3, multphi_nt=False):
# Get initial data
f = open('lte.in.' + line + '.source', 'r')
ll_ini = f.readlines()
f.close()
phi = np.array([phi]).ravel()
for p in range(len(phi)):
ll = ll_ini
ll[0] = "'scr0/%s_nopack.int','linetab/oxyobs/%s.%s.tab','lineprof/oxyobs/%s.%s_phi%s_mu%s.I'\n" % \
(sim, line, sim, line, sim, str(p), str(mu))
if multiphi_nt:
# Advance nta snapshots in each different phi
if len(phi) * nta > nts:
print('(EEE) buildltein: nts not big enough to cover '
'separation from all phi angles.')
return
ll[2] = ' %i, 98, %i, 1, .ns1,ns2,ns3,ns4 (snapshots)\n' % (
(i + 1) * nta, nts)
else:
ll[2] = ' 1, 98, %i, 1, .ns1,ns2,ns3,ns4 (snapshots)\n' % nts
ll[9] = ' 1, 1, 1,%.3f,%.2f, .nfl,nmy,nphi,xmu1,phi1\n' % (mu, phi[p])
outfile = 'lte.in.%s_phi%s_mu%s' % (line, phi[p], mu)
out = open(outfile, 'w')
out.writelines(ll)
out.close()
print(('*** Wrote ' + outfile))
return
| |
'''
Created on Aug 16, 2011
@author: jklo
'''
import couchdb
import sys, os, gnupg, json, getpass
import traceback
from uuid import uuid4
import lrnodetemplate as t
from pprint import pprint
import urlparse
#Default url to the couchdb server.
_DEFAULT_COUCHDB_URL = "http://127.0.0.1:5984"
_DEFAULT_AUTH_COUCHDB_URL = "http://admin:password@127.0.0.1:5984"
class ResponseFile(object):
def __init__(self, filename=None):
self._response_file = None
ResponseFile.set(self, filename)
def set(self, path):
self._path = path
try:
self._response_file = open(path, "w")
self._response_file.truncate()
self._response_file.flush()
except:
pass
def write(self, response):
if self._response_file:
self._response_file.write("{0}{1}".format(response, os.linesep))
self._response_file.flush()
def close(self):
if self._response_file:
self._response_file.close()
self._response_file = None
self._path = None
response_file = ResponseFile()
def publishService(nodeUrl, server, dbname, serviceType, serviceName):
service = {}
service.update(t.service_description)
service['service_type'] =serviceType
service['service_id'] = uuid4().hex
# service['service_name'] = serviceName+" service"
service['service_name'] = serviceName
service["service_endpoint"] = urlparse.urljoin(nodeUrl, serviceName)
service['service_description']= "{0} {1} service".format(serviceType, serviceName)
PublishDoc(server, dbname, "{0}:{1} service".format(serviceType, serviceName), service)
def CreateDB(couchServer = _DEFAULT_COUCHDB_URL, dblist=[], deleteDB=False):
'''Creates a DB in Couch based upon config'''
for db in dblist:
if deleteDB:
try:
del couchServer[db]
except couchdb.http.ResourceNotFound as rnf:
print("DB '{0}' doesn't exist on '{1}', creating".format(db, couchServer))
else:
try:
existingDB = couchServer[db]
print("Using existing DB '{0}' on '{1}'\n".format(db, couchServer))
continue
except:
pass
try:
couchServer.create(db)
print("Created DB '{0}' on '{1}'\n".format(db, couchServer))
except Exception as e:
print("Exception while creating database: {0}\n".format(e) )
def PublishDoc(couchServer, dbname, name, doc_data):
try:
#delete existing document.
db = couchServer[dbname]
if "_rev" in doc_data:
del doc_data["_rev"]
try:
del db[name]
except:
pass
db[name] = doc_data
print("Added config document '{0}' to '{1}".format(name, dbname))
except Exception as ex:
print("Exception when add config document:\n")
exc_type, exc_value, exc_tb = sys.exc_info()
pprint(traceback.format_exception(exc_type, exc_value, exc_tb))
def testCouchServer(serverURL):
try:
couchServer = couchdb.Server(url=serverURL)
# Try to get the server configuration to ensure the the server is up and
# and running. There may be a better way of doing that.
couchServer.version()
except Exception as e:
print(e)
print("Cannot connect to couchDB server '{0}'\n".format(serverURL))
return False
return True
def testAuthCouchServer(serverURL):
try:
couchServer = couchdb.Server(url=serverURL)
# Try to get the server configuration to ensure the the server is up and
# and running. There may be a better way of doing that.
couchServer.config()
except Exception as e:
print(e)
print("Cannot connect to couchDB server '{0}'\n".format(serverURL))
return False
return True
def getInput(question, defaultInput=None, validateFunc=None, hide_input=False):
ques = question+': '
if defaultInput is not None:
ques = question+' [{0}]: '.format(defaultInput)
while True:
if not hide_input:
userInput = raw_input(ques)
else:
userInput = getpass.getpass(ques)
inputLen = len(userInput.strip())
if inputLen == 0:
if defaultInput is not None:
userInput = defaultInput
else:
continue
if validateFunc is not None and validateFunc(userInput) == False:
continue
response_file.write(userInput)
return userInput
_DEFAULT_ENDPOINT = "http://www.example.com"
def isValidKey(userInput):
pass
def isURL(userInput):
if userInput.lower() == _DEFAULT_ENDPOINT:
return False
import re
return re.match("^https?://[^/]+", userInput.lower()) is not None
YES = ['t', 'true', 'yes', 'y']
NO = ['f', 'false', 'no', 'n']
def isBoolean(userInput):
if userInput.lower() in YES or userInput.lower in NO:
return True
def isInt(userInput):
try:
int(userInput)
return True
except ValueError:
return False
def getDefaultEndpoint():
import socket
hostname = socket.gethostname()
if hostname != None:
parts = list(urlparse.urlsplit(_DEFAULT_ENDPOINT))
parts[1] = hostname
return urlparse.urlunsplit(parts)
else:
return _DEFAULT_ENDPOINT
def getSetupInfo(response_file=None):
"""Get the user node info"""
nodeSetup = {}
nodeUrl = getInput("Enter the node service endpoint URL", getDefaultEndpoint(), isURL)
nodeSetup['nodeUrl'] = nodeUrl
couchDBUrl = getInput("Enter your unauthenticated couchDB server URL",
_DEFAULT_COUCHDB_URL, testCouchServer)
nodeSetup['couchDBUrl'] = couchDBUrl
couchDBUrlDBA = getInput("Enter your AUTHENTICATED CouchDB server DBA URL",
_DEFAULT_AUTH_COUCHDB_URL, testAuthCouchServer)
nodeSetup['couchDBUrlDBA'] = couchDBUrlDBA
nodeName = getInput("Enter your node name", "Node@{0}".format(nodeUrl))
nodeSetup['node_name'] = nodeName
nodeDescription = getInput("Enter your node description", nodeName)
nodeSetup['node_description'] = nodeDescription
adminUrl = getInput("Enter node admin indentity",
"admin@learningregistry.org".format(nodeUrl))
nodeSetup['node_admin_identity'] = adminUrl
distributeTargets = getInput("Enter the URLs of nodes that you wish to distribute to",
"")
nodeSetup['connections'] = distributeTargets.split()
isGatewayNode = getInput('Is the node a gateway node" (T/F)', 'F')
nodeSetup['gateway_node'] = (isGatewayNode == 'T')
isNodeOpen = getInput('Is the node "open" (T/F)', 'T')
nodeSetup['open_connect_source'] = (isNodeOpen=='T')
'''
nodeSetup['distributeResourceDataUrl'] = getInput("\nEnter distribute/replication "+
"resource_data destination URL \n(this is the resource_data URL that another node couchdb "+
"will use to replicate/distribute to this node)", "{0}/resource_data".format(nodeUrl))
'''
nodeSetup['distributeResourceDataUrl'] = "{0}/resource_data".format(nodeUrl)
nodeSetup['distributeIncomingUrl'] = getInput("\nEnter distribute/replication "+
"incoming destination URL \n(this is the incoming URL that another node couchdb "+
"will use to replicate/distribute to this node)", "{0}/incoming".format(nodeUrl))
isDistributeDest = getInput("Does the node want to be the destination for replication (T/F)", 'T')
nodeSetup['open_connect_dest'] =(isDistributeDest =='T')
return nodeSetup
def getDefaultGnuPGHome():
return os.path.join(os.path.expanduser('~'), ".gnupg")
def getGPG(gpgbin, gnupghome):
return gnupg.GPG(gpgbin, gnupghome)
def checkKey(gpg):
def checkKeyID(userInput):
try:
if len(userInput.strip()) == 0:
return False
privateKey = gpg.export_keys(userInput, True)
publicKey = gpg.export_keys(userInput, True)
foundKey = len(privateKey) > 0 and len(publicKey) > 0
if not foundKey:
print("Invalid Private Key ID. Ensure key public and private key exists in keyring. Please try again.\n")
return foundKey
except:
pass
return False
return checkKeyID
def checkPassphrase(gpg, keyID):
def check(userInput):
try:
sign = gpg.sign("hello learning registry", keyid=keyID, passphrase=userInput)
if len(sign.data) > 0 and len(sign.fingerprint) > 0:
return True
else:
print("Bad passphrase! Please try again.\n")
except:
pass
return False
return check
def getDefaultSigner(gpg, keyID):
try:
for key in gpg.list_keys(True):
if key['keyid'] == keyID.strip():
return key['uids'][0]
except:
pass
return None
def setNodeSigning(server, config, setupInfo):
if "oauth" in setupInfo and setupInfo["oauth"]:
from services.service_template import getCouchAppPath
import oauth2 as oauth, time
gpgbin = getInput("Path to GnuPG executable", "gpg")
setupInfo["lr.publish.signing.gpgbin"] = gpgbin
config.set("app:main","lr.publish.signing.gpgbin",gpgbin)
gnupghome = getInput("Path to GnuPG Home", getDefaultGnuPGHome())
setupInfo["lr.publish.signing.gnupghome"] = gnupghome
config.set("app:main","lr.publish.signing.gnupghome",gnupghome)
gpg = getGPG(gpgbin, gnupghome)
privateKeyId = getInput("Private Key Id for Signing", "", checkKey(gpg)).strip()
setupInfo["lr.publish.signing.privatekeyid"] = privateKeyId
config.set("app:main","lr.publish.signing.privatekeyid",privateKeyId)
publickeylocations = [ "%s/pubkey" % setupInfo['nodeUrl']]
setupInfo["lr.publish.signing.publickeylocations"] = json.dumps(publickeylocations)
config.set("app:main","lr.publish.signing.publickeylocations",json.dumps(publickeylocations))
signer = getInput("Signer for Resource Data Identity", getDefaultSigner(gpg, privateKeyId))
setupInfo["lr.publish.signing.signer"] = signer
config.set("app:main","lr.publish.signing.signer",signer)
show_pass = not setupInfo["show_pass"]
passphrase = getInput("Passphrase for Signing with Private Key [typing is concealed]", "", checkPassphrase(gpg, privateKeyId), hide_input=show_pass)
setupInfo["lr.publish.signing.passphrase"] = passphrase
config.set("app:main","lr.publish.signing.passphrase",passphrase)
server.resource("_config","couch_httpd_oauth").put('use_users_db', '"true"')
server.resource("_config","httpd").put('WWW-Authenticate', '"OAuth"')
server.resource("_config","browserid").put('enabled', '"true"')
apps = config.get("app:main", "couchdb.db.apps", "apps")
try:
server.create(apps)
except:
pass
oauthCouchApp = os.path.join(getCouchAppPath(),apps,"kanso","oauth-key-management.json")
with open(oauthCouchApp) as f:
ddoc = json.load(f)
try:
del server[apps]["_design/%s"%ddoc['kanso']['config']['name']]
except:
pass
ddoc["_id"] = "_design/%s"%ddoc['kanso']['config']['name']
server[apps].save(ddoc)
setupInfo["oauth.app.name"] = ddoc['kanso']['config']['name']
setupInfo["lr.oauth.signup"] = "{0}/apps/{1}".format(setupInfo["nodeUrl"],ddoc['kanso']['config']['name'])
config.set("app:main","lr.oauth.signup",setupInfo["lr.oauth.signup"])
## TODO: Need to make an initial OAuth call to get the oauth view installed.
users = config.get("app:main", "couchdb.db.users", "_users")
couch_url = config.get("app:main", "couchdb.url", "http://localhost:5984")
dummy_user = {
"_id": "org.couchdb.user:tempuser",
"name": "tempuser",
"type": "user",
"roles": [],
"oauth": {
"consumer_keys":
{
"localhost": "walt_2.0"
},
"tokens":
{
"temptoken": "learningregistry"
}
}
}
server[users].save(dummy_user)
# Create your consumer with the proper key/secret.
consumer = oauth.Consumer(key="localhost",
secret=dummy_user["oauth"]["consumer_keys"]["localhost"])
token = oauth.Token(key="temptoken",
secret=dummy_user["oauth"]["tokens"]["temptoken"])
# Create our client.
client = oauth.Client(consumer, token=token)
client.disable_ssl_certificate_validation=True
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time())
}
resp, content = client.request("{0}/_session".format(couch_url), "GET", headers={"Content-Type": "application/json"})
del server[users][dummy_user["_id"]]
return True
return False
| |
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.contrib.sites import models
from django.contrib.sites.management import create_default_site
from django.contrib.sites.middleware import CurrentSiteMiddleware
from django.contrib.sites.models import Site, clear_site_cache
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.signals import post_migrate
from django.http import HttpRequest
from django.test import TestCase, modify_settings, override_settings
from django.test.utils import captured_stdout
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class SitesFrameworkTests(TestCase):
multi_db = True
def setUp(self):
self.site = Site(
id=settings.SITE_ID,
domain="example.com",
name="example.com",
)
self.site.save()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertIsInstance(s, Site)
s.delete()
self.assertRaises(ObjectDoesNotExist, Site.objects.get_current)
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
def test_delete_all_sites_clears_cache(self):
# When all site objects are deleted the cache should also
# be cleared and get_current() should raise a DoesNotExist.
self.assertIsInstance(Site.objects.get_current(), Site)
Site.objects.all().delete()
self.assertRaises(Site.DoesNotExist, Site.objects.get_current)
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertIsInstance(site, Site)
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
self.assertRaises(ObjectDoesNotExist, get_current_site, request)
# A RequestSite is returned if the sites framework is not installed
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
site = get_current_site(request)
self.assertIsInstance(site, RequestSite)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com'])
def test_get_current_site_no_site_id(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
del settings.SITE_ID
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
def test_domain_name_with_whitespaces(self):
# Regression for #17320
# Domain names are not allowed contain whitespace characters
site = Site(name="test name", domain="test test")
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ttest"
self.assertRaises(ValidationError, site.full_clean)
site.domain = "test\ntest"
self.assertRaises(ValidationError, site.full_clean)
def test_clear_site_cache(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
self.assertEqual(models.SITE_CACHE, {})
get_current_site(request)
expected_cache = {self.site.id: self.site}
self.assertEqual(models.SITE_CACHE, expected_cache)
with self.settings(SITE_ID=''):
get_current_site(request)
expected_cache.update({self.site.domain: self.site})
self.assertEqual(models.SITE_CACHE, expected_cache)
clear_site_cache(Site, instance=self.site, using='default')
self.assertEqual(models.SITE_CACHE, {})
@override_settings(SITE_ID='')
def test_clear_site_cache_domain(self):
site = Site.objects.create(name='example2.com', domain='example2.com')
request = HttpRequest()
request.META = {
"SERVER_NAME": "example2.com",
"SERVER_PORT": "80",
}
get_current_site(request) # prime the models.SITE_CACHE
expected_cache = {site.domain: site}
self.assertEqual(models.SITE_CACHE, expected_cache)
# Site exists in 'default' database so using='other' shouldn't clear.
clear_site_cache(Site, instance=site, using='other')
self.assertEqual(models.SITE_CACHE, expected_cache)
# using='default' should clear.
clear_site_cache(Site, instance=site, using='default')
self.assertEqual(models.SITE_CACHE, {})
class JustOtherRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == 'other'
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class CreateDefaultSiteTests(TestCase):
multi_db = True
def setUp(self):
self.app_config = apps.get_app_config('sites')
# Delete the site created as part of the default migration process.
Site.objects.all().delete()
def test_basic(self):
"""
#15346, #15573 - create_default_site() creates an example site only if
none exist.
"""
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertIn("Creating example.com", stdout.getvalue())
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertEqual("", stdout.getvalue())
@override_settings(DATABASE_ROUTERS=[JustOtherRouter()])
def test_multi_db_with_router(self):
"""
#16353, #16828 - The default site creation should respect db routing.
"""
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertFalse(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_multi_db(self):
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertTrue(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_save_another(self):
"""
#17415 - Another site can be created right after the default one.
On some backends the sequence needs to be reset after saving with an
explicit ID. Test that there isn't a sequence collisions by saving
another site. This test is only meaningful with databases that use
sequences for automatic primary keys such as PostgreSQL and Oracle.
"""
create_default_site(self.app_config, verbosity=0)
Site(domain='example2.com', name='example2.com').save()
def test_signal(self):
"""
#23641 - Sending the ``post_migrate`` signal triggers creation of the
default site.
"""
post_migrate.send(sender=self.app_config, app_config=self.app_config, verbosity=0)
self.assertTrue(Site.objects.exists())
@override_settings(SITE_ID=35696)
def test_custom_site_id(self):
"""
#23945 - The configured ``SITE_ID`` should be respected.
"""
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 35696)
class MiddlewareTest(TestCase):
def test_request(self):
""" Makes sure that the request has correct `site` attribute. """
middleware = CurrentSiteMiddleware()
request = HttpRequest()
middleware.process_request(request)
self.assertEqual(request.site.id, settings.SITE_ID)
| |
# stdlib
from copy import deepcopy
from datetime import datetime, timedelta
from hashlib import md5
import re
import time
import traceback
from Queue import Queue, Empty
# project
from checks import AgentCheck
from util import Timer
from checks.libs.thread_pool import Pool
from checks.libs.vmware.basic_metrics import BASIC_METRICS
from checks.libs.vmware.all_metrics import ALL_METRICS
# 3rd party
from pyVim import connect
# This drives travis-ci pylint crazy!
from pyVmomi import vim # pylint: disable=E0611
SOURCE_TYPE = 'vsphere'
REAL_TIME_INTERVAL = 20 # Default vCenter sampling interval
# The size of the ThreadPool used to process the request queue
DEFAULT_SIZE_POOL = 4
# The interval in seconds between two refresh of the entities list
REFRESH_MORLIST_INTERVAL = 3 * 60
# The interval in seconds between two refresh of metrics metadata (id<->name)
REFRESH_METRICS_METADATA_INTERVAL = 10 * 60
# The amount of jobs batched at the same time in the queue to query available metrics
BATCH_MORLIST_SIZE = 50
# Time after which we reap the jobs that clog the queue
# TODO: use it
JOB_TIMEOUT = 10
EXCLUDE_FILTERS = {
'AlarmStatusChangedEvent': [r'Gray'],
'TaskEvent': [
r'Initialize powering On',
r'Power Off virtual machine',
r'Power On virtual machine',
r'Reconfigure virtual machine',
r'Relocate virtual machine',
r'Suspend virtual machine',
r'Migrate virtual machine',
],
'VmBeingHotMigratedEvent': [],
'VmMessageEvent': [],
'VmMigratedEvent': [],
'VmPoweredOnEvent': [],
'VmPoweredOffEvent': [],
'VmReconfiguredEvent': [],
'VmResumedEvent': [],
'VmSuspendedEvent': [],
}
MORLIST = 'morlist'
METRICS_METADATA = 'metrics_metadata'
LAST = 'last'
INTERVAL = 'interval'
class VSphereEvent(object):
UNKNOWN = 'unknown'
def __init__(self, raw_event, event_config=None):
self.raw_event = raw_event
if self.raw_event and self.raw_event.__class__.__name__.startswith('vim.event'):
self.event_type = self.raw_event.__class__.__name__[10:]
else:
self.event_type = VSphereEvent.UNKNOWN
self.timestamp = int((self.raw_event.createdTime.replace(tzinfo=None) - datetime(1970, 1, 1)).total_seconds())
self.payload = {
"timestamp": self.timestamp,
"event_type": SOURCE_TYPE,
"source_type_name": SOURCE_TYPE,
}
if event_config is None:
self.event_config = {}
else:
self.event_config = event_config
def _is_filtered(self):
# Filter the unwanted types
if self.event_type not in EXCLUDE_FILTERS:
return True
filters = EXCLUDE_FILTERS[self.event_type]
for f in filters:
if re.search(f, self.raw_event.fullFormattedMessage):
return True
return False
def get_datadog_payload(self):
if self._is_filtered():
return None
transform_method = getattr(self, 'transform_%s' % self.event_type.lower(), None)
if callable(transform_method):
return transform_method()
# Default event transformation
self.payload["msg_title"] = u"{0}".format(self.event_type)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
return self.payload
def transform_vmbeinghotmigratedevent(self):
self.payload["msg_title"] = u"VM {0} is being migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} has launched a hot migration of this virtual machine:\n".format(user=self.raw_event.userName)
changes = []
pre_host = self.raw_event.host.name
new_host = self.raw_event.destHost.name
pre_dc = self.raw_event.datacenter.name
new_dc = self.raw_event.destDatacenter.name
pre_ds = self.raw_event.ds.name
new_ds = self.raw_event.destDatastore.name
if pre_host == new_host:
changes.append(u"- No host migration: still {0}".format(new_host))
else:
# Insert in front if it's a change
changes = [u"- Host MIGRATION: from {0} to {1}".format(pre_host, new_host)] + changes
if pre_dc == new_dc:
changes.append(u"- No datacenter migration: still {0}".format(new_dc))
else:
# Insert in front if it's a change
changes = [u"- Datacenter MIGRATION: from {0} to {1}".format(pre_dc, new_dc)] + changes
if pre_ds == new_ds:
changes.append(u"- No datastore migration: still {0}".format(new_ds))
else:
# Insert in front if it's a change
changes = [u"- Datastore MIGRATION: from {0} to {1}".format(pre_ds, new_ds)] + changes
self.payload["msg_text"] += "\n".join(changes)
self.payload['host'] = self.raw_event.vm.name
self.payload['tags'] = [
'vsphere_host:%s' % pre_host,
'vsphere_host:%s' % new_host,
'vsphere_datacenter:%s' % pre_dc,
'vsphere_datacenter:%s' % new_dc,
]
return self.payload
def transform_alarmstatuschangedevent(self):
if self.event_config.get('collect_vcenter_alarms') is None:
return None
def get_transition(before, after):
vals = {
'gray': -1,
'green': 0,
'yellow': 1,
'red': 2
}
before = before.lower()
after = after.lower()
if before not in vals or after not in vals:
return None
if vals[before] < vals[after]:
return 'Triggered'
else:
return 'Recovered'
TO_ALERT_TYPE = {
'green': 'success',
'yellow': 'warning',
'red': 'error'
}
def get_agg_key(alarm_event):
return 'h:{0}|dc:{1}|a:{2}'.format(
md5(alarm_event.entity.name).hexdigest()[:10],
md5(alarm_event.datacenter.name).hexdigest()[:10],
md5(alarm_event.alarm.name).hexdigest()[:10]
)
# Get the entity type/name
if self.raw_event.entity.entity.__class__ == vim.VirtualMachine:
host_type = 'VM'
elif self.raw_event.entity.entity.__class__ == vim.HostSystem:
host_type = 'host'
else:
return None
host_name = self.raw_event.entity.name
# Need a getattr because from is a reserved keyword...
trans_before = getattr(self.raw_event, 'from')
trans_after = self.raw_event.to
transition = get_transition(trans_before, trans_after)
# Bad transition, we shouldn't have got this transition
if transition is None:
return None
self.payload['msg_title'] = u"[{transition}] {monitor} on {host_type} {host_name} is now {status}".format(
transition=transition,
monitor=self.raw_event.alarm.name,
host_type=host_type,
host_name=host_name,
status=trans_after
)
self.payload['alert_type'] = TO_ALERT_TYPE[trans_after]
self.payload['event_object'] = get_agg_key(self.raw_event)
self.payload['msg_text'] = u"""vCenter monitor status changed on this alarm, it was {before} and it's now {after}.""".format(
before=trans_before,
after=trans_after
)
self.payload['host'] = host_name
return self.payload
def transform_vmmessageevent(self):
self.payload["msg_title"] = u"VM {0} is reporting".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmmigratedevent(self):
self.payload["msg_title"] = u"VM {0} has been migrated".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"@@@\n{0}\n@@@".format(self.raw_event.fullFormattedMessage)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredoffevent(self):
self.payload["msg_title"] = u"VM {0} has been powered OFF".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered off this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmpoweredonevent(self):
self.payload["msg_title"] = u"VM {0} has been powered ON".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has powered on this virtual machine. It is running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmresumingevent(self):
self.payload["msg_title"] = u"VM {0} is RESUMING".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has resumed {vm}. It will soon be powered on.""".format(
user=self.raw_event.userName,
vm=self.raw_event.vm.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmsuspendedevent(self):
self.payload["msg_title"] = u"VM {0} has been SUSPENDED".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"""{user} has suspended this virtual machine. It was running on:
- datacenter: {dc}
- host: {host}
""".format(
user=self.raw_event.userName,
dc=self.raw_event.datacenter.name,
host=self.raw_event.host.name
)
self.payload['host'] = self.raw_event.vm.name
return self.payload
def transform_vmreconfiguredevent(self):
self.payload["msg_title"] = u"VM {0} configuration has been changed".format(self.raw_event.vm.name)
self.payload["msg_text"] = u"{user} saved the new configuration:\n@@@\n".format(user=self.raw_event.userName)
# Add lines for configuration change don't show unset, that's hacky...
config_change_lines = [ line for line in self.raw_event.configSpec.__repr__().splitlines() if 'unset' not in line ]
self.payload["msg_text"] += u"\n".join(config_change_lines)
self.payload["msg_text"] += u"\n@@@"
self.payload['host'] = self.raw_event.vm.name
return self.payload
def atomic_method(method):
""" Decorator to catch the exceptions that happen in detached thread atomic tasks
and display them in the logs.
"""
def wrapper(*args, **kwargs):
try:
method(*args, **kwargs)
except Exception as e:
args[0].exceptionq.put("A worker thread crashed:\n" + traceback.format_exc())
return wrapper
class VSphereCheck(AgentCheck):
""" Get performance metrics from a vCenter server and upload them to Datadog
References:
http://pubs.vmware.com/vsphere-51/index.jsp#com.vmware.wssdk.apiref.doc/vim.PerformanceManager.html
*_atomic jobs perform one single task asynchronously in the ThreadPool, we
don't know exactly when they will finish, but we reap them if they're stuck.
The other calls are performed synchronously.
"""
SERVICE_CHECK_NAME = 'vcenter.can_connect'
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.time_started = time.time()
self.pool_started = False
self.exceptionq = Queue()
# Connections open to vCenter instances
self.server_instances = {}
# Event configuration
self.event_config = {}
# Caching resources, timeouts
self.cache_times = {}
for instance in self.instances:
i_key = self._instance_key(instance)
self.cache_times[i_key] = {
MORLIST: {
LAST: 0,
INTERVAL: init_config.get('refresh_morlist_interval',
REFRESH_MORLIST_INTERVAL)
},
METRICS_METADATA: {
LAST: 0,
INTERVAL: init_config.get('refresh_metrics_metadata_interval',
REFRESH_METRICS_METADATA_INTERVAL)
}
}
self.event_config[i_key] = instance.get('event_config')
# First layer of cache (get entities from the tree)
self.morlist_raw = {}
# Second layer, processed from the first one
self.morlist = {}
# Metrics metadata, basically perfCounterId -> {name, group, description}
self.metrics_metadata = {}
self.latest_event_query = {}
def stop(self):
self.stop_pool()
def start_pool(self):
self.log.info("Starting Thread Pool")
self.pool_size = int(self.init_config.get('threads_count', DEFAULT_SIZE_POOL))
self.pool = Pool(self.pool_size)
self.pool_started = True
self.jobs_status = {}
def stop_pool(self):
self.log.info("Stopping Thread Pool")
if self.pool_started:
self.pool.terminate()
self.pool.join()
self.jobs_status.clear()
assert self.pool.get_nworkers() == 0
self.pool_started = False
def restart_pool(self):
self.stop_pool()
self.start_pool()
def _clean(self):
now = time.time()
# TODO: use that
for name in self.jobs_status.keys():
start_time = self.jobs_status[name]
if now - start_time > JOB_TIMEOUT:
self.log.critical("Restarting Pool. One check is stuck.")
self.restart_pool()
break
def _query_event(self, instance):
i_key = self._instance_key(instance)
last_time = self.latest_event_query.get(i_key)
server_instance = self._get_server_instance(instance)
event_manager = server_instance.content.eventManager
# Be sure we don't duplicate any event, never query the "past"
if not last_time:
last_time = self.latest_event_query[i_key] = \
event_manager.latestEvent.createdTime + timedelta(seconds=1)
query_filter = vim.event.EventFilterSpec()
time_filter = vim.event.EventFilterSpec.ByTime(beginTime=self.latest_event_query[i_key])
query_filter.time = time_filter
try:
new_events = event_manager.QueryEvents(query_filter)
self.log.debug("Got {0} events from vCenter event manager".format(len(new_events)))
for event in new_events:
normalized_event = VSphereEvent(event, self.event_config[i_key])
# Can return None if the event if filtered out
event_payload = normalized_event.get_datadog_payload()
if event_payload is not None:
self.event(event_payload)
last_time = event.createdTime + timedelta(seconds=1)
except Exception as e:
# Don't get stuck on a failure to fetch an event
# Ignore them for next pass
self.log.warning("Unable to fetch Events %s", e)
last_time = event_manager.latestEvent.createdTime + timedelta(seconds=1)
self.latest_event_query[i_key] = last_time
def _instance_key(self, instance):
i_key = instance.get('name')
if i_key is None:
raise Exception("Must define a unique 'name' per vCenter instance")
return i_key
def _should_cache(self, instance, entity):
i_key = self._instance_key(instance)
now = time.time()
return now - self.cache_times[i_key][entity][LAST] > self.cache_times[i_key][entity][INTERVAL]
def _get_server_instance(self, instance):
i_key = self._instance_key(instance)
service_check_tags = [
'vcenter_server:{0}'.format(instance.get('name')),
'vcenter_host:{0}'.format(instance.get('host')),
]
if i_key not in self.server_instances:
try:
server_instance = connect.SmartConnect(
host=instance.get('host'),
user=instance.get('username'),
pwd=instance.get('password')
)
except Exception as e:
err_msg = "Connection to %s failed: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
self.server_instances[i_key] = server_instance
# Test if the connection is working
try:
server_instance.RetrieveContent()
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=service_check_tags)
except Exception as e:
err_msg = "Connection to %s died unexpectedly: %s" % (instance.get('host'), e)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
tags=service_check_tags, message=err_msg)
raise Exception(err_msg)
return self.server_instances[i_key]
def _compute_needed_metrics(self, instance, available_metrics):
""" Compare the available metrics for one MOR we have computed and intersect them
with the set of metrics we want to report
"""
if instance.get('all_metrics', False):
return available_metrics
i_key = self._instance_key(instance)
wanted_metrics = []
# Get only the basic metrics
for metric in available_metrics:
# No cache yet, skip it for now
if i_key not in self.metrics_metadata\
or metric.counterId not in self.metrics_metadata[i_key]:
continue
if self.metrics_metadata[i_key][metric.counterId]['name'] in BASIC_METRICS:
wanted_metrics.append(metric)
return wanted_metrics
def get_external_host_tags(self):
""" Returns a list of tags for every host that is detected by the vSphere
integration.
List of pairs (hostname, list_of_tags)
"""
self.log.info("Sending external_host_tags now")
external_host_tags = []
for instance in self.instances:
i_key = self._instance_key(instance)
mor_list = self.morlist[i_key].items()
for mor_name, mor in mor_list:
external_host_tags.append((mor['hostname'], {SOURCE_TYPE: mor['tags']}))
return external_host_tags
@atomic_method
def _cache_morlist_raw_atomic(self, i_key, obj_type, obj, tags, regexes=None):
""" Compute tags for a single node in the vCenter rootFolder
and queue other such jobs for children nodes.
Usual hierarchy:
rootFolder
- datacenter1
- compute_resource1 == cluster
- host1
- host2
- host3
- compute_resource2
- host5
- vm1
- vm2
If it's a node we want to query metric for, queue it in self.morlist_raw
that will be processed by another job.
"""
### <TEST-INSTRUMENTATION>
t = Timer()
self.log.debug("job_atomic: Exploring MOR {0} (type={1})".format(obj, obj_type))
### </TEST-INSTRUMENTATION>
tags_copy = deepcopy(tags)
if obj_type == 'rootFolder':
for datacenter in obj.childEntity:
# Skip non-datacenter
if not hasattr(datacenter, 'hostFolder'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'datacenter', datacenter, tags_copy, regexes)
)
elif obj_type == 'datacenter':
dc_tag = "vsphere_datacenter:%s" % obj.name
tags_copy.append(dc_tag)
for compute_resource in obj.hostFolder.childEntity:
# Skip non-compute resource
if not hasattr(compute_resource, 'host'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'compute_resource', compute_resource, tags_copy, regexes)
)
elif obj_type == 'compute_resource':
if obj.__class__ == vim.ClusterComputeResource:
cluster_tag = "vsphere_cluster:%s" % obj.name
tags_copy.append(cluster_tag)
for host in obj.host:
# Skip non-host
if not hasattr(host, 'vm'):
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'host', host, tags_copy, regexes)
)
elif obj_type == 'host':
if regexes and regexes.get('host_include') is not None:
match = re.search(regexes['host_include'], obj.name)
if not match:
self.log.debug(u"Filtered out VM {0} because of host_include_only_regex".format(obj.name))
return
watched_mor = dict(mor_type='host', mor=obj, hostname=obj.name, tags=tags_copy+['vsphere_type:host'])
self.morlist_raw[i_key].append(watched_mor)
host_tag = "vsphere_host:%s" % obj.name
tags_copy.append(host_tag)
for vm in obj.vm:
if vm.runtime.powerState != 'poweredOn':
continue
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'vm', vm, tags_copy, regexes)
)
elif obj_type == 'vm':
if regexes and regexes.get('vm_include') is not None:
match = re.search(regexes['vm_include'], obj.name)
if not match:
self.log.debug(u"Filtered out VM {0} because of vm_include_only_regex".format(obj.name))
return
watched_mor = dict(mor_type='vm', mor=obj, hostname=obj.name, tags=tags_copy+['vsphere_type:vm'])
self.morlist_raw[i_key].append(watched_mor)
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.morlist_raw_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_raw(self, instance):
""" Initiate the first layer to refresh self.morlist by queueing
_cache_morlist_raw_atomic on the rootFolder in a recursive/asncy approach
"""
i_key = self._instance_key(instance)
self.log.debug("Caching the morlist for vcenter instance %s" % i_key)
if i_key in self.morlist_raw and len(self.morlist_raw[i_key]) > 0:
self.log.debug("Skipping morlist collection now, RAW results processing not over (latest refresh was {0}s ago)"\
.format(time.time() - self.cache_times[i_key][MORLIST][LAST]))
return
self.morlist_raw[i_key] = []
server_instance = self._get_server_instance(instance)
root_folder = server_instance.content.rootFolder
instance_tag = "vcenter_server:%s" % instance.get('name')
regexes = {
'host_include': instance.get('host_include_only_regex'),
'vm_include': instance.get('vm_include_only_regex')
}
self.pool.apply_async(
self._cache_morlist_raw_atomic,
args=(i_key, 'rootFolder', root_folder, [instance_tag], regexes)
)
self.cache_times[i_key][MORLIST][LAST] = time.time()
@atomic_method
def _cache_morlist_process_atomic(self, instance, mor):
""" Process one item of the self.morlist_raw list by querying the available
metrics for this MOR and then putting it in self.morlist
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
self.log.debug("job_atomic: Querying available metrics for MOR {0} (type={1})"\
.format(mor['mor'], mor['mor_type']))
available_metrics = perfManager.QueryAvailablePerfMetric(
mor['mor'], intervalId=REAL_TIME_INTERVAL)
mor['metrics'] = self._compute_needed_metrics(instance, available_metrics)
mor_name = str(mor['mor'])
if mor_name in self.morlist[i_key]:
# Was already here last iteration
self.morlist[i_key][mor_name]['metrics'] = mor['metrics']
else:
self.morlist[i_key][mor_name] = mor
self.morlist[i_key][mor_name]['last_seen'] = time.time()
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.morlist_process_atomic.time', t.total())
### </TEST-INSTRUMENTATION>
def _cache_morlist_process(self, instance):
""" Empties the self.morlist_raw by popping items and running asynchronously
the _cache_morlist_process_atomic operation that will get the available
metrics for this MOR and put it in self.morlist
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.morlist[i_key] = {}
batch_size = self.init_config.get('batch_morlist_size', BATCH_MORLIST_SIZE)
for i in xrange(batch_size):
try:
mor = self.morlist_raw[i_key].pop()
self.pool.apply_async(self._cache_morlist_process_atomic, args=(instance, mor))
except (IndexError, KeyError):
self.log.debug("No more work to process in morlist_raw")
return
def _vacuum_morlist(self, instance):
""" Check if self.morlist doesn't have some old MORs that are gone, ie
we cannot get any metrics from them anyway (or =0)
"""
i_key = self._instance_key(instance)
morlist = self.morlist[i_key].items()
for mor_name, mor in morlist:
last_seen = mor['last_seen']
if (time.time() - last_seen) > 2 * REFRESH_MORLIST_INTERVAL:
del self.morlist[i_key][mor_name]
def _cache_metrics_metadata(self, instance):
""" Get from the server instance, all the performance counters metadata
meaning name/group/description... attached with the corresponding ID
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
self.log.info("Warming metrics metadata cache for instance {0}".format(i_key))
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
new_metadata = {}
for counter in perfManager.perfCounter:
d = dict(
name = "%s.%s" % (counter.groupInfo.key, counter.nameInfo.key),
unit = counter.unitInfo.key,
instance_tag = 'instance' #FIXME: replace by what we want to tag!
)
new_metadata[counter.key] = d
self.cache_times[i_key][METRICS_METADATA][LAST] = time.time()
self.log.info("Finished metadata collection for instance {0}".format(i_key))
# Reset metadata
self.metrics_metadata[i_key] = new_metadata
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_metadata_collection.time', t.total())
### </TEST-INSTRUMENTATION>
def _transform_value(self, instance, counter_id, value):
""" Given the counter_id, look up for the metrics metadata to check the vsphere
type of the counter and apply pre-reporting transformation if needed.
"""
i_key = self._instance_key(instance)
if counter_id in self.metrics_metadata[i_key]:
unit = self.metrics_metadata[i_key][counter_id]['unit']
if unit == 'percent':
return float(value) / 100
# Defaults to return the value without transformation
return value
@atomic_method
def _collect_metrics_atomic(self, instance, mor):
""" Task that collects the metrics listed in the morlist for one MOR
"""
### <TEST-INSTRUMENTATION>
t = Timer()
### </TEST-INSTRUMENTATION>
i_key = self._instance_key(instance)
server_instance = self._get_server_instance(instance)
perfManager = server_instance.content.perfManager
query = vim.PerformanceManager.QuerySpec(maxSample=1,
entity=mor['mor'],
metricId=mor['metrics'],
intervalId=20,
format='normal')
results = perfManager.QueryPerf(querySpec=[query])
if results:
for result in results[0].value:
if result.id.counterId not in self.metrics_metadata[i_key]:
self.log.debug("Skipping this metric value, because there is no metadata about it")
continue
instance_name = result.id.instance or "none"
value = self._transform_value(instance, result.id.counterId, result.value[0])
self.gauge("vsphere.%s" % self.metrics_metadata[i_key][result.id.counterId]['name'],
value,
hostname=mor['hostname'],
tags=['instance:%s' % instance_name]
)
### <TEST-INSTRUMENTATION>
self.histogram('datadog.agent.vsphere.metric_colection.time', t.total())
### </TEST-INSTRUMENTATION>
def collect_metrics(self, instance):
""" Calls asynchronously _collect_metrics_atomic on all MORs, as the
job queue is processed the Aggregator will receive the metrics.
"""
i_key = self._instance_key(instance)
if i_key not in self.morlist:
self.log.debug("Not collecting metrics for this instance, nothing to do yet: {0}".format(i_key))
return
mors = self.morlist[i_key].items()
self.log.debug("Collecting metrics of %d mors" % len(mors))
vm_count = 0
for mor_name, mor in mors:
if mor['mor_type'] == 'vm':
vm_count += 1
if 'metrics' not in mor:
# self.log.debug("Skipping entity %s collection because we didn't cache its metrics yet" % mor['hostname'])
continue
self.pool.apply_async(self._collect_metrics_atomic, args=(instance, mor))
self.gauge('vsphere.vm.count', vm_count, tags=["vcenter_server:%s" % instance.get('name')])
def check(self, instance):
if not self.pool_started:
self.start_pool()
### <TEST-INSTRUMENTATION>
self.gauge('datadog.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:initial'])
### </TEST-INSTRUMENTATION>
# First part: make sure our object repository is neat & clean
if self._should_cache(instance, METRICS_METADATA):
self._cache_metrics_metadata(instance)
if self._should_cache(instance, MORLIST):
self._cache_morlist_raw(instance)
self._cache_morlist_process(instance)
self._vacuum_morlist(instance)
# Second part: do the job
self.collect_metrics(instance)
self._query_event(instance)
# For our own sanity
self._clean()
thread_crashed = False
try:
while True:
self.log.critical(self.exceptionq.get_nowait())
thread_crashed = True
except Empty:
pass
if thread_crashed:
self.stop_pool()
raise Exception("One thread in the pool crashed, check the logs")
### <TEST-INSTRUMENTATION>
self.gauge('datadog.agent.vsphere.queue_size', self.pool._workq.qsize(), tags=['instant:final'])
### </TEST-INSTRUMENTATION>
if __name__ == '__main__':
check, _instances = VSphereCheck.from_yaml('conf.d/vsphere.yaml')
try:
for i in xrange(200):
print "Loop %d" % i
for instance in check.instances:
check.check(instance)
if check.has_events():
print 'Events: %s' % (check.get_events())
print 'Metrics: %d' % (len(check.get_metrics()))
time.sleep(10)
except Exception as e:
print "Whoops something happened {0}".format(traceback.format_exc())
finally:
check.stop()
| |
import json
import mimetypes
import os
import sys
import time
import traceback
from collections import defaultdict
from datetime import datetime
from django import forms as django_forms
from django import http
from django.contrib import messages
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
import commonware.log
import waffle
from rest_framework import status as http_status
from rest_framework.generics import CreateAPIView, ListAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from session_csrf import anonymous_csrf, anonymous_csrf_exempt
from tower import ugettext as _
from waffle.decorators import waffle_switch
import mkt
import lib.iarc
from lib.iarc.utils import get_iarc_app_title
from mkt.access import acl
from mkt.api.base import CORSMixin, SlugOrIdMixin
from mkt.api.models import Access
from mkt.comm.utils import create_comm_note
from mkt.constants import comm
from mkt.developers.decorators import dev_required
from mkt.developers.forms import (
APIConsumerForm, AppFormBasic, AppFormDetails, AppFormMedia,
AppFormSupport, AppFormTechnical, AppVersionForm, CategoryForm,
ContentRatingForm, IARCGetAppInfoForm, MOTDForm, NewPackagedAppForm,
PreloadTestPlanForm, PreviewFormSet, TransactionFilterForm, trap_duplicate)
from mkt.developers.models import AppLog, PreloadTestPlan
from mkt.developers.serializers import ContentRatingSerializer
from mkt.developers.tasks import (
fetch_manifest, file_validator, run_validator,
save_test_plan, validator)
from mkt.developers.utils import (
check_upload, escalate_reserved_permissions, handle_vip)
from mkt.files.models import File, FileUpload
from mkt.files.utils import parse_addon
from mkt.purchase.models import Contribution
from mkt.reviewers.models import QUEUE_TARAKO
from mkt.site.decorators import (
json_view, login_required, permission_required, use_master)
from mkt.site.utils import escape_all, paginate
from mkt.submit.forms import AppFeaturesForm, NewWebappVersionForm
from mkt.translations.query import order_by_translation
from mkt.users.models import UserProfile
from mkt.users.views import _clean_next_url
from mkt.versions.models import Version
from mkt.webapps.decorators import app_view
from mkt.webapps.models import AddonUser, ContentRating, IARCInfo, Webapp
from mkt.webapps.tasks import _update_manifest, update_manifests
from mkt.zadmin.models import set_config, unmemoized_get_config
from . import forms
log = commonware.log.getLogger('z.devhub')
# We use a session cookie to make sure people see the dev agreement.
DEV_AGREEMENT_COOKIE = 'yes-I-read-the-dev-agreement'
def addon_listing(request):
"""Set up the queryset and filtering for addon listing for Dashboard."""
qs = request.user.addons.all()
sorting = 'name'
if request.GET.get('sort') == 'created':
sorting = 'created'
qs = qs.order_by('-created')
else:
qs = order_by_translation(qs, 'name')
return qs, sorting
@anonymous_csrf
def login(request, template=None):
if 'to' in request.GET:
request = _clean_next_url(request)
data = {
'to': request.GET.get('to')
}
if request.user.is_authenticated():
return http.HttpResponseRedirect(
request.GET.get('to', settings.LOGIN_REDIRECT_URL))
return render(request, 'developers/login.html', data)
def home(request):
return index(request)
@login_required
def index(request):
# This is a temporary redirect.
return redirect('mkt.developers.apps')
@login_required
def dashboard(request):
addons, sorting = addon_listing(request)
addons = paginate(request, addons, per_page=10)
data = {
'addons': addons,
'sorting': sorting,
'motd': unmemoized_get_config('mkt_developers_motd')
}
return render(request, 'developers/apps/dashboard.html', data)
@dev_required(staff=True)
def edit(request, addon_id, addon):
data = {
'page': 'edit',
'addon': addon,
'valid_slug': addon.app_slug,
'tags': addon.tags.not_blocked().values_list('tag_text', flat=True),
'previews': addon.get_previews(),
'version': addon.current_version or addon.latest_version
}
if not addon.is_packaged and data['version']:
data['feature_list'] = [unicode(f) for f in
data['version'].features.to_list()]
if acl.action_allowed(request, 'Apps', 'Configure'):
data['admin_settings_form'] = forms.AdminSettingsForm(instance=addon,
request=request)
return render(request, 'developers/apps/edit.html', data)
@dev_required(owner_for_post=True)
@require_POST
def delete(request, addon_id, addon):
# Database deletes only allowed for free or incomplete addons.
if not addon.can_be_deleted():
msg = _('Paid apps cannot be deleted. Disable this app instead.')
messages.error(request, msg)
return redirect(addon.get_dev_url('versions'))
# TODO: Force the user to re-auth with BrowserID (this DeleteForm doesn't
# ask the user for his password)
form = forms.DeleteForm(request)
if form.is_valid():
reason = form.cleaned_data.get('reason', '')
addon.delete(msg='Removed via devhub', reason=reason)
messages.success(request, _('App deleted.'))
# Preserve query-string parameters if we were directed from Dashboard.
return redirect(request.GET.get('to') or
reverse('mkt.developers.apps'))
else:
msg = _('Password was incorrect. App was not deleted.')
messages.error(request, msg)
return redirect(addon.get_dev_url('versions'))
@dev_required
@require_POST
def enable(request, addon_id, addon):
addon.update(disabled_by_user=False)
mkt.log(mkt.LOG.USER_ENABLE, addon)
return redirect(addon.get_dev_url('versions'))
@dev_required
@require_POST
def disable(request, addon_id, addon):
addon.update(disabled_by_user=True)
mkt.log(mkt.LOG.USER_DISABLE, addon)
return redirect(addon.get_dev_url('versions'))
@dev_required
def status(request, addon_id, addon):
appeal_form = forms.AppAppealForm(request.POST, product=addon)
upload_form = NewWebappVersionForm(request.POST or None, is_packaged=True,
addon=addon, request=request)
publish_form = forms.PublishForm(
request.POST if 'publish-app' in request.POST else None, addon=addon)
if request.method == 'POST':
if 'resubmit-app' in request.POST and appeal_form.is_valid():
if not addon.is_rated():
# Cannot resubmit without content ratings.
return http.HttpResponseForbidden(
'This app must obtain content ratings before being '
'resubmitted.')
appeal_form.save()
create_comm_note(addon, addon.latest_version,
request.user, appeal_form.data['notes'],
note_type=comm.RESUBMISSION)
if addon.vip_app:
handle_vip(addon, addon.latest_version, request.user)
messages.success(request, _('App successfully resubmitted.'))
return redirect(addon.get_dev_url('versions'))
elif 'upload-version' in request.POST and upload_form.is_valid():
upload = upload_form.cleaned_data['upload']
ver = Version.from_upload(upload, addon)
# Update addon status now that the new version was saved.
addon.update_status()
res = run_validator(ver.all_files[0].file_path)
validation_result = json.loads(res)
# Escalate the version if it uses reserved permissions.
escalate_reserved_permissions(addon, validation_result, ver)
# Set all detected features as True and save them.
keys = ['has_%s' % feature.lower()
for feature in validation_result['feature_profile']]
data = defaultdict.fromkeys(keys, True)
# Set "Smartphone-Sized Displays" if it's a mobile-only app.
qhd_devices = (set((mkt.DEVICE_GAIA,)),
set((mkt.DEVICE_MOBILE,)),
set((mkt.DEVICE_GAIA, mkt.DEVICE_MOBILE,)))
mobile_only = (addon.latest_version and
addon.latest_version.features.has_qhd)
if set(addon.device_types) in qhd_devices or mobile_only:
data['has_qhd'] = True
# Update feature profile for this version.
ver.features.update(**data)
messages.success(request, _('New version successfully added.'))
log.info('[Webapp:%s] New version created id=%s from upload: %s'
% (addon, ver.pk, upload))
if addon.vip_app:
handle_vip(addon, ver, request.user)
return redirect(addon.get_dev_url('versions.edit', args=[ver.pk]))
elif 'publish-app' in request.POST and publish_form.is_valid():
publish_form.save()
return redirect(addon.get_dev_url('versions'))
ctx = {
'addon': addon,
'appeal_form': appeal_form,
'is_tarako': addon.tags.filter(tag_text=QUEUE_TARAKO).exists(),
'tarako_review': addon.additionalreview_set
.latest_for_queue(QUEUE_TARAKO),
'publish_form': publish_form,
'QUEUE_TARAKO': QUEUE_TARAKO,
'upload_form': upload_form,
}
# Used in the delete version modal.
if addon.is_packaged:
versions = addon.versions.values('id', 'version')
version_strings = dict((v['id'], v) for v in versions)
version_strings['num'] = len(versions)
ctx['version_strings'] = json.dumps(version_strings)
if addon.status == mkt.STATUS_REJECTED:
try:
entry = (AppLog.objects
.filter(addon=addon,
activity_log__action=mkt.LOG.REJECT_VERSION.id)
.order_by('-created'))[0]
except IndexError:
entry = None
# This contains the rejection reason and timestamp.
ctx['rejection'] = entry and entry.activity_log
if waffle.switch_is_active('preload-apps'):
test_plan = PreloadTestPlan.objects.filter(
addon=addon, status=mkt.STATUS_PUBLIC)
if test_plan.exists():
test_plan = test_plan[0]
if (test_plan.last_submission <
settings.PREINSTALL_TEST_PLAN_LATEST):
ctx['outdated_test_plan'] = True
ctx['next_step_suffix'] = 'submit'
else:
ctx['next_step_suffix'] = 'home'
ctx['test_plan'] = test_plan
return render(request, 'developers/apps/status.html', ctx)
@permission_required([('DeveloperMOTD', 'Edit')])
def motd(request):
message = unmemoized_get_config('mkt_developers_motd')
form = MOTDForm(request.POST or None, initial={'motd': message})
if request.method == 'POST' and form and form.is_valid():
set_config('mkt_developers_motd', form.cleaned_data['motd'])
messages.success(request, _('Changes successfully saved.'))
return redirect(reverse('mkt.developers.motd'))
return render(request, 'developers/motd.html', {'form': form})
def _submission_msgs():
return {
'complete': _('Congratulations, your app submission is now complete '
'and will be reviewed shortly!'),
'content_ratings_saved': _('Content ratings successfully saved.'),
}
def _ratings_success_msg(app, old_status, old_modified):
"""
Ratings can be created via IARC pinging our API.
Thus we can't display a success message via the standard POST/req/res.
To workaround, we stored app's rating's `modified` from edit page.
When hitting back to the ratings summary page, calc what msg to show.
old_status -- app status during ratings edit page.
old_modified -- rating modified datetime during ratings edit page.
"""
if old_modified:
old_modified = datetime.strptime(
old_modified, '%Y-%m-%dT%H:%M:%S')
if old_status != app.status:
# App just created a rating to go pending, show 'app now pending'.
return _submission_msgs()['complete']
elif old_modified != app.last_rated_time():
# App create/update rating, but was already pending/public, show 'ok'.
return _submission_msgs()['content_ratings_saved']
@dev_required
def content_ratings(request, addon_id, addon):
if not addon.is_rated():
return redirect(addon.get_dev_url('ratings_edit'))
# Use _ratings_success_msg to display success message.
session = request.session
app_id = str(addon.id)
if 'ratings_edit' in session and app_id in session['ratings_edit']:
prev_state = session['ratings_edit'][app_id]
msg = _ratings_success_msg(
addon, prev_state['app_status'], prev_state['rating_modified'])
messages.success(request, msg) if msg else None
del session['ratings_edit'][app_id] # Clear msg so not shown again.
request.session.modified = True
return render(request, 'developers/apps/ratings/ratings_summary.html',
{'addon': addon})
@dev_required
def content_ratings_edit(request, addon_id, addon):
initial = {}
try:
app_info = addon.iarc_info
initial['submission_id'] = app_info.submission_id
initial['security_code'] = app_info.security_code
except IARCInfo.DoesNotExist:
pass
messages.debug(request,
"DEBUG mode on; you may use IARC id 0 with any code")
form = IARCGetAppInfoForm(data=request.POST or None, initial=initial,
app=addon)
if request.method == 'POST' and form.is_valid():
try:
form.save()
return redirect(addon.get_dev_url('ratings'))
except django_forms.ValidationError:
pass # Fall through to show the form error.
# Save some information for _ratings_success_msg.
if 'ratings_edit' not in request.session:
request.session['ratings_edit'] = {}
last_rated = addon.last_rated_time()
request.session['ratings_edit'][str(addon.id)] = {
'app_status': addon.status,
'rating_modified': last_rated.isoformat() if last_rated else None
}
request.session.modified = True
return render(request, 'developers/apps/ratings/ratings_edit.html',
{'addon': addon,
'app_name': get_iarc_app_title(addon),
'form': form,
'company': addon.latest_version.developer_name,
'now': datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
@waffle_switch('preload-apps')
@dev_required
def preload_home(request, addon_id, addon):
"""
Gives information on the preload process, links to test plan template.
"""
return render(request, 'developers/apps/preload/home.html',
{'addon': addon})
@waffle_switch('preload-apps')
@dev_required(owner_for_post=True)
def preload_submit(request, addon_id, addon):
if request.method == 'POST':
form = PreloadTestPlanForm(request.POST, request.FILES)
if form.is_valid():
# Save test plan file.
test_plan = request.FILES['test_plan']
# Figure the type to save it as (cleaned as pdf/xls from the form).
filetype = mimetypes.guess_type(test_plan.name)[0]
if 'pdf' in filetype:
filename = 'test_plan_%s.pdf'
else:
filename = 'test_plan_%s.xls'
# Timestamp.
filename = filename % str(time.time()).split('.')[0]
save_test_plan(request.FILES['test_plan'], filename, addon)
# Log test plan.
PreloadTestPlan.objects.filter(addon=addon).update(
status=mkt.STATUS_DISABLED
)
PreloadTestPlan.objects.create(addon=addon, filename=filename)
messages.success(
request,
_('Application for preload successfully submitted.'))
return redirect(addon.get_dev_url('versions'))
else:
messages.error(request, _('There was an error with the form.'))
else:
form = PreloadTestPlanForm()
return render(request, 'developers/apps/preload/submit.html',
{'addon': addon, 'form': form})
@dev_required
def version_edit(request, addon_id, addon, version_id):
show_features = addon.is_packaged
formdata = request.POST if request.method == 'POST' else None
version = get_object_or_404(Version, pk=version_id, addon=addon)
version.addon = addon # Avoid extra useless query.
form = AppVersionForm(formdata, instance=version)
all_forms = [form]
if show_features:
appfeatures = version.features
appfeatures_form = AppFeaturesForm(formdata, instance=appfeatures)
all_forms.append(appfeatures_form)
if request.method == 'POST' and all(f.is_valid() for f in all_forms):
[f.save() for f in all_forms]
if f.data.get('approvalnotes'):
create_comm_note(
addon, version, request.user, f.data['approvalnotes'],
note_type=comm.DEVELOPER_VERSION_NOTE_FOR_REVIEWER)
messages.success(request, _('Version successfully edited.'))
return redirect(addon.get_dev_url('versions'))
context = {
'addon': addon,
'version': version,
'form': form
}
if show_features:
context.update({
'appfeatures_form': appfeatures_form,
'appfeatures': appfeatures,
'feature_list': [unicode(f) for f in appfeatures.to_list()]
})
return render(request, 'developers/apps/version_edit.html', context)
@dev_required
@require_POST
def version_publicise(request, addon_id, addon):
version_id = request.POST.get('version_id')
version = get_object_or_404(Version, pk=version_id, addon=addon)
if version.all_files[0].status == mkt.STATUS_APPROVED:
File.objects.filter(version=version).update(status=mkt.STATUS_PUBLIC)
mkt.log(mkt.LOG.CHANGE_VERSION_STATUS, unicode(version.status[0]),
version)
# Call update_version, so various other bits of data update.
addon.update_version()
# Call to update names and locales if changed.
addon.update_name_from_package_manifest()
addon.update_supported_locales()
messages.success(request, _('Version successfully made active.'))
return redirect(addon.get_dev_url('versions'))
@dev_required
@require_POST
def version_delete(request, addon_id, addon):
version_id = request.POST.get('version_id')
version = get_object_or_404(Version, pk=version_id, addon=addon)
if version.all_files[0].status == mkt.STATUS_BLOCKED:
raise PermissionDenied
version.delete()
messages.success(request,
_('Version "{0}" deleted.').format(version.version))
return redirect(addon.get_dev_url('versions'))
@dev_required(owner_for_post=True)
def ownership(request, addon_id, addon):
# Authors.
qs = AddonUser.objects.filter(addon=addon).order_by('position')
user_form = forms.AuthorFormSet(request.POST or None, queryset=qs)
if request.method == 'POST' and user_form.is_valid():
# Authors.
authors = user_form.save(commit=False)
redirect_url = addon.get_dev_url('owner')
for author in authors:
action = None
if not author.id or author.user_id != author._original_user_id:
action = mkt.LOG.ADD_USER_WITH_ROLE
author.addon = addon
elif author.role != author._original_role:
action = mkt.LOG.CHANGE_USER_WITH_ROLE
author.save()
if action:
mkt.log(action, author.user, author.get_role_display(), addon)
if (author._original_user_id and
author.user_id != author._original_user_id):
mkt.log(mkt.LOG.REMOVE_USER_WITH_ROLE,
(UserProfile, author._original_user_id),
author.get_role_display(), addon)
# Unsubscribe user from emails (Commbadge).
author.user.comm_thread_cc.filter(
thread___addon=addon).delete()
for author in user_form.deleted_objects:
author.delete()
if author.user_id == request.user.id:
# The current user removed their own access to the app.
redirect_url = reverse('mkt.developers.apps')
mkt.log(mkt.LOG.REMOVE_USER_WITH_ROLE, author.user,
author.get_role_display(), addon)
# Unsubscribe user from emails (Commbadge).
author.user.comm_thread_cc.filter(thread___addon=addon).delete()
messages.success(request, _('Changes successfully saved.'))
return redirect(redirect_url)
ctx = dict(addon=addon, user_form=user_form)
return render(request, 'developers/apps/owner.html', ctx)
@anonymous_csrf
def validate_app(request):
return render(request, 'developers/validate_app.html', {
'upload_hosted_url':
reverse('mkt.developers.standalone_hosted_upload'),
'upload_packaged_url':
reverse('mkt.developers.standalone_packaged_upload'),
})
@require_POST
def _upload(request, addon=None, is_standalone=False):
user = request.user
# If there is no user, default to None (saves the file upload as anon).
form = NewPackagedAppForm(request.POST, request.FILES,
user=user if user.is_authenticated() else None,
addon=addon)
if form.is_valid():
validator.delay(form.file_upload.pk)
if addon:
return redirect('mkt.developers.upload_detail_for_addon',
addon.app_slug, form.file_upload.pk)
elif is_standalone:
return redirect('mkt.developers.standalone_upload_detail',
'packaged', form.file_upload.pk)
else:
return redirect('mkt.developers.upload_detail',
form.file_upload.pk, 'json')
@login_required
def upload_new(*args, **kwargs):
return _upload(*args, **kwargs)
@anonymous_csrf
def standalone_packaged_upload(request):
return _upload(request, is_standalone=True)
@dev_required
def upload_for_addon(request, addon_id, addon):
return _upload(request, addon=addon)
@dev_required
@require_POST
def refresh_manifest(request, addon_id, addon):
log.info('Manifest %s refreshed for %s' % (addon.manifest_url, addon))
_update_manifest(addon_id, True, {})
return http.HttpResponse(status=204)
@require_POST
@json_view
@use_master
def _upload_manifest(request, is_standalone=False):
form = forms.NewManifestForm(request.POST, is_standalone=is_standalone)
if (not is_standalone and
waffle.switch_is_active('webapps-unique-by-domain')):
# Helpful error if user already submitted the same manifest.
dup_msg = trap_duplicate(request, request.POST.get('manifest'))
if dup_msg:
return {
'validation': {
'errors': 1, 'success': False,
'messages': [{
'type': 'error', 'message': dup_msg, 'tier': 1}]
}
}
if form.is_valid():
user = request.user if request.user.is_authenticated() else None
upload = FileUpload.objects.create(user=user)
fetch_manifest.delay(form.cleaned_data['manifest'], upload.pk)
if is_standalone:
return redirect('mkt.developers.standalone_upload_detail',
'hosted', upload.pk)
else:
return redirect('mkt.developers.upload_detail', upload.pk, 'json')
else:
error_text = _('There was an error with the submission.')
if 'manifest' in form.errors:
error_text = ' '.join(form.errors['manifest'])
error_message = {'type': 'error', 'message': error_text, 'tier': 1}
v = {'errors': 1, 'success': False, 'messages': [error_message]}
return make_validation_result(dict(validation=v, error=error_text))
@login_required
def upload_manifest(*args, **kwargs):
"""Wrapper function for `_upload_manifest` so we can keep the
standalone validator separate from the manifest upload stuff.
"""
return _upload_manifest(*args, **kwargs)
def standalone_hosted_upload(request):
return _upload_manifest(request, is_standalone=True)
@json_view
@anonymous_csrf_exempt
def standalone_upload_detail(request, type_, uuid):
upload = get_object_or_404(FileUpload, uuid=uuid)
url = reverse('mkt.developers.standalone_upload_detail',
args=[type_, uuid])
return upload_validation_context(request, upload, url=url)
@dev_required
@json_view
def upload_detail_for_addon(request, addon_id, addon, uuid):
upload = get_object_or_404(FileUpload, uuid=uuid)
return json_upload_detail(request, upload, addon=addon)
def make_validation_result(data):
"""Safe wrapper around JSON dict containing a validation result."""
if not settings.EXPOSE_VALIDATOR_TRACEBACKS:
if data['error']:
data['error'] = _('An error occurred validating the manifest.')
if data['validation']:
for msg in data['validation']['messages']:
for k, v in msg.items():
msg[k] = escape_all(v, linkify=k in ('message', 'description'))
return data
@dev_required(allow_editors=True)
def file_validation(request, addon_id, addon, file_id):
file = get_object_or_404(File, id=file_id)
v = addon.get_dev_url('json_file_validation', args=[file.id])
return render(request, 'developers/validation.html',
dict(validate_url=v, filename=file.filename,
timestamp=file.created, addon=addon))
@json_view
@csrf_exempt
@dev_required(allow_editors=True)
def json_file_validation(request, addon_id, addon, file_id):
file = get_object_or_404(File, id=file_id)
if not file.has_been_validated:
if request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
try:
v_result = file_validator(file.id)
except Exception, exc:
log.error('file_validator(%s): %s' % (file.id, exc))
error = "\n".join(traceback.format_exception(*sys.exc_info()))
return make_validation_result({'validation': '',
'error': error})
else:
v_result = file.validation
validation = json.loads(v_result.validation)
return make_validation_result(dict(validation=validation, error=None))
@json_view
def json_upload_detail(request, upload, addon=None):
result = upload_validation_context(request, upload, addon=addon)
if result['validation']:
if result['validation']['errors'] == 0:
try:
parse_addon(upload, addon=addon)
except django_forms.ValidationError, exc:
m = []
for msg in exc.messages:
# Simulate a validation error so the UI displays it.
m.append({'type': 'error', 'message': msg, 'tier': 1})
v = make_validation_result(dict(error='',
validation=dict(messages=m)))
return json_view.error(v)
return result
def upload_validation_context(request, upload, addon=None, url=None):
if not settings.VALIDATE_ADDONS:
upload.task_error = ''
upload.validation = json.dumps({'errors': 0, 'messages': [],
'metadata': {}, 'notices': 0,
'warnings': 0})
upload.save()
validation = json.loads(upload.validation) if upload.validation else ''
if not url:
if addon:
url = reverse('mkt.developers.upload_detail_for_addon',
args=[addon.app_slug, upload.uuid])
else:
url = reverse('mkt.developers.upload_detail',
args=[upload.uuid, 'json'])
report_url = reverse('mkt.developers.upload_detail', args=[upload.uuid])
return make_validation_result(dict(upload=upload.uuid,
validation=validation,
error=upload.task_error, url=url,
full_report_url=report_url))
def upload_detail(request, uuid, format='html'):
upload = get_object_or_404(FileUpload, uuid=uuid)
if format == 'json' or request.is_ajax():
return json_upload_detail(request, upload)
validate_url = reverse('mkt.developers.standalone_upload_detail',
args=['hosted', upload.uuid])
return render(request, 'developers/validation.html',
dict(validate_url=validate_url, filename=upload.name,
timestamp=upload.created))
@dev_required(staff=True)
def addons_section(request, addon_id, addon, section, editable=False):
models = {'basic': AppFormBasic,
'media': AppFormMedia,
'details': AppFormDetails,
'support': AppFormSupport,
'technical': AppFormTechnical,
'admin': forms.AdminSettingsForm}
is_dev = acl.check_addon_ownership(request, addon, dev=True)
if section not in models:
raise http.Http404()
version = addon.current_version or addon.latest_version
tags, previews = [], []
cat_form = appfeatures = appfeatures_form = version_form = None
formdata = request.POST if request.method == 'POST' else None
# Permissions checks.
# Only app owners can edit any of the details of their apps.
# Users with 'Apps:Configure' can edit the admin settings.
if ((section != 'admin' and not is_dev) or
(section == 'admin' and
not acl.action_allowed(request, 'Apps', 'Configure') and
not acl.action_allowed(request, 'Apps', 'ViewConfiguration'))):
raise PermissionDenied
if section == 'basic':
cat_form = CategoryForm(formdata, product=addon, request=request)
# Only show/use the release notes form for hosted apps, packaged apps
# can do that from the version edit page.
if not addon.is_packaged:
version_form = AppVersionForm(formdata, instance=version)
tags = addon.tags.not_blocked().values_list('tag_text', flat=True)
elif section == 'media':
previews = PreviewFormSet(
request.POST or None, prefix='files',
queryset=addon.get_previews())
elif section == 'technical':
# Only show/use the features form for hosted apps, packaged apps
# can do that from the version edit page.
if not addon.is_packaged:
appfeatures = version.features
appfeatures_form = AppFeaturesForm(formdata, instance=appfeatures)
# Get the slug before the form alters it to the form data.
valid_slug = addon.app_slug
if editable:
if request.method == 'POST':
if (section == 'admin' and
not acl.action_allowed(request, 'Apps', 'Configure')):
raise PermissionDenied
form = models[section](formdata, request.FILES, instance=addon,
version=version, request=request)
all_forms = [form, previews]
for additional_form in (appfeatures_form, cat_form, version_form):
if additional_form:
all_forms.append(additional_form)
if all(not f or f.is_valid() for f in all_forms):
if cat_form:
cat_form.save()
addon = form.save(addon)
if appfeatures_form:
appfeatures_form.save()
if version_form:
# We are re-using version_form without displaying all its
# fields, so we need to override the boolean fields,
# otherwise they'd be considered empty and therefore False.
version_form.cleaned_data['publish_immediately'] = (
version_form.fields['publish_immediately'].initial)
version_form.save()
if 'manifest_url' in form.changed_data:
addon.update(
app_domain=addon.domain_from_url(addon.manifest_url))
update_manifests([addon.pk])
if previews:
for preview in previews.forms:
preview.save(addon)
editable = False
if section == 'media':
mkt.log(mkt.LOG.CHANGE_ICON, addon)
else:
mkt.log(mkt.LOG.EDIT_PROPERTIES, addon)
valid_slug = addon.app_slug
else:
form = models[section](instance=addon, version=version,
request=request)
else:
form = False
data = {
'addon': addon,
'version': version,
'form': form,
'editable': editable,
'tags': tags,
'cat_form': cat_form,
'version_form': version_form,
'preview_form': previews,
'valid_slug': valid_slug,
}
if appfeatures_form and appfeatures:
data.update({
'appfeatures': appfeatures,
'feature_list': [unicode(f) for f in appfeatures.to_list()],
'appfeatures_form': appfeatures_form
})
return render(request, 'developers/apps/edit/%s.html' % section, data)
@never_cache
@dev_required(skip_submit_check=True)
@json_view
def image_status(request, addon_id, addon, icon_size=64):
# Default icon needs no checking.
if not addon.icon_type or addon.icon_type.split('/')[0] == 'icon':
icons = True
else:
icons = storage.exists(
os.path.join(addon.get_icon_dir(), '%s-%s.png' % (
addon.id, icon_size)))
previews = all(storage.exists(p.thumbnail_path)
for p in addon.get_previews())
return {'overall': icons and previews,
'icons': icons,
'previews': previews}
@json_view
def ajax_upload_media(request, upload_type):
errors = []
upload_hash = ''
if 'upload_image' in request.FILES:
upload_preview = request.FILES['upload_image']
upload_preview.seek(0)
content_type = upload_preview.content_type
errors, upload_hash = check_upload(upload_preview, upload_type,
content_type)
else:
errors.append(_('There was an error uploading your preview.'))
if errors:
upload_hash = ''
return {'upload_hash': upload_hash, 'errors': errors}
@dev_required
def upload_media(request, addon_id, addon, upload_type):
return ajax_upload_media(request, upload_type)
@dev_required
@require_POST
def remove_locale(request, addon_id, addon):
locale = request.POST.get('locale')
if locale and locale != addon.default_locale:
addon.remove_locale(locale)
return http.HttpResponse()
return http.HttpResponseBadRequest()
def docs(request, doc_name=None, doc_page=None):
filename = ''
all_docs = {'policies': ['agreement']}
if doc_name and doc_name in all_docs:
filename = '%s.html' % doc_name
if doc_page and doc_page in all_docs[doc_name]:
filename = '%s-%s.html' % (doc_name, doc_page)
else:
# TODO: Temporary until we have a `policies` docs index.
filename = None
if not filename:
return redirect('ecosystem.landing')
return render(request, 'developers/docs/%s' % filename)
@login_required
def terms(request):
form = forms.DevAgreementForm({'read_dev_agreement': True},
instance=request.user)
if request.POST and form.is_valid():
form.save()
log.info('Dev agreement agreed for user: %s' % request.user.pk)
if request.GET.get('to') and request.GET['to'].startswith('/'):
return redirect(request.GET['to'])
messages.success(request, _('Terms of service accepted.'))
return render(request, 'developers/terms.html',
{'accepted': request.user.read_dev_agreement,
'agreement_form': form})
@login_required
def api(request):
roles = request.user.groups.filter(name='Admins').exists()
form = APIConsumerForm()
if roles:
messages.error(request,
_('Users with the admin role cannot use the API.'))
elif request.method == 'POST':
if 'delete' in request.POST:
try:
consumer = Access.objects.get(pk=request.POST.get('consumer'),
user=request.user)
consumer.delete()
except Access.DoesNotExist:
messages.error(request, _('No such API key.'))
else:
access = Access.create_for_user(request.user)
form = APIConsumerForm(request.POST, instance=access)
if form.is_valid():
form.save()
messages.success(request, _('New API key generated.'))
else:
access.delete()
consumers = list(Access.objects.filter(user=request.user))
return render(request, 'developers/api.html',
{'consumers': consumers, 'roles': roles, 'form': form,
'domain': settings.DOMAIN, 'site_url': settings.SITE_URL})
@app_view
@require_POST
@permission_required([('Admin', '%'), ('Apps', 'Configure')])
def blocklist(request, addon):
"""
Blocklists the app by creating a new version/file.
"""
if addon.status != mkt.STATUS_BLOCKED:
addon.create_blocklisted_version()
messages.success(request, _('Created blocklisted version.'))
else:
messages.info(request, _('App already blocklisted.'))
return redirect(addon.get_dev_url('versions'))
@waffle_switch('view-transactions')
@login_required
def transactions(request):
form, transactions = _get_transactions(request)
return render(
request, 'developers/transactions.html',
{'form': form, 'CONTRIB_TYPES': mkt.CONTRIB_TYPES,
'count': transactions.count(),
'transactions': paginate(request, transactions, per_page=50)})
def _get_transactions(request):
apps = addon_listing(request)[0]
transactions = Contribution.objects.filter(addon__in=list(apps),
type__in=mkt.CONTRIB_TYPES)
form = TransactionFilterForm(request.GET, apps=apps)
if form.is_valid():
transactions = _filter_transactions(transactions, form.cleaned_data)
return form, transactions
def _filter_transactions(qs, data):
"""Handle search filters and queries for transactions."""
filter_mapping = {'app': 'addon_id',
'transaction_type': 'type',
'transaction_id': 'uuid',
'date_from': 'created__gte',
'date_to': 'created__lte'}
for form_field, db_field in filter_mapping.iteritems():
if data.get(form_field):
try:
qs = qs.filter(**{db_field: data[form_field]})
except ValueError:
continue
return qs
def testing(request):
return render(request, 'developers/testing.html')
class ContentRatingList(CORSMixin, SlugOrIdMixin, ListAPIView):
model = ContentRating
serializer_class = ContentRatingSerializer
permission_classes = (AllowAny,)
cors_allowed_methods = ['get']
queryset = Webapp.objects.all()
slug_field = 'app_slug'
def get(self, request, *args, **kwargs):
app = self.get_object()
self.queryset = app.content_ratings.all()
if 'since' in request.GET:
form = ContentRatingForm(request.GET)
if form.is_valid():
self.queryset = self.queryset.filter(
modified__gt=form.cleaned_data['since'])
if not self.queryset.exists():
raise http.Http404()
return super(ContentRatingList, self).get(self, request)
class ContentRatingsPingback(CORSMixin, SlugOrIdMixin, CreateAPIView):
cors_allowed_methods = ['post']
parser_classes = (lib.iarc.utils.IARC_JSON_Parser,)
permission_classes = (AllowAny,)
queryset = Webapp.objects.all()
slug_field = 'app_slug'
def post(self, request, pk, *args, **kwargs):
log.info(u'Received IARC pingback for app:%s' % pk)
if request.content_type != 'application/json':
log.info(u'IARC pingback not of content-type "application/json"')
return Response({
'detail': "Endpoint only accepts 'application/json'."
}, status=http_status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
app = self.get_object()
data = request.DATA[0]
if settings.DEBUG:
log.debug(u'%s' % data)
if app.iarc_token() != data.get('token'):
# Verify token.
log.info(u'Token mismatch in IARC pingback for app:%s' % app.id)
return Response({'detail': 'Token mismatch'},
status=http_status.HTTP_400_BAD_REQUEST)
if data.get('ratings'):
# Double-check with IARC that it's the correct rating.
if not self.verify_data(data):
return Response('The ratings do not match the submission ID.',
status=http_status.HTTP_400_BAD_REQUEST)
log.info(u'Setting content ratings from IARC pingback for app:%s' %
app.id)
# We found a rating, so store the id and code for future use.
if 'submission_id' in data and 'security_code' in data:
app.set_iarc_info(data['submission_id'], data['security_code'])
# Update status if incomplete status.
# Do this before set_content_ratings to not prematurely trigger
# a refresh.
log.info('Checking app:%s completeness after IARC pingback.'
% app.id)
if (app.has_incomplete_status() and
app.is_fully_complete(ignore_ratings=True)):
log.info('Updating app status from IARC pingback for app:%s' %
app.id)
# Don't call update to prevent recursion in update_status.
app.update(status=mkt.STATUS_PENDING)
log.info('Updated app status from IARC pingback for app:%s' %
app.id)
elif app.has_incomplete_status():
log.info('Reasons for app:%s incompleteness after IARC '
'pingback: %s' % (app.id, app.completion_errors()))
app.set_descriptors(data.get('descriptors', []))
app.set_interactives(data.get('interactives', []))
# Set content ratings last since it triggers a refresh on Content
# Ratings page. We want descriptors and interactives visible by
# the time it's refreshed.
app.set_content_ratings(data.get('ratings', {}))
return Response('ok')
def verify_data(self, data):
client = lib.iarc.client.get_iarc_client('services')
xml = lib.iarc.utils.render_xml('get_app_info.xml', data)
resp = client.Get_App_Info(XMLString=xml)
check_data = lib.iarc.utils.IARC_XML_Parser().parse_string(resp)
try:
check_data = check_data.get('rows', [])[0]
except IndexError:
return False
rates_bad = data.get('ratings') != check_data.get('ratings')
inter_bad = (set(data.get('interactives', [])) !=
set(check_data.get('interactives', [])))
descs_bad = (set(data.get('descriptors', [])) !=
set(check_data.get('descriptors', [])))
if rates_bad:
log.error('IARC pingback did not match rating %s vs %s' %
(data.get('ratings'), check_data.get('ratings')))
if inter_bad:
log.error('IARC pingback did not match interactives %s vs %s' %
(data.get('interactives'),
check_data.get('interactives')))
if descs_bad:
log.error('IARC pingback did not match descriptors %s vs %s' %
(data.get('descriptors'), check_data.get('descriptors')))
if rates_bad or inter_bad or descs_bad:
return False
return True
| |
from __future__ import unicode_literals
from future.builtins import int, range, str
from datetime import date, datetime
from os.path import join, split
from uuid import uuid4
from django import forms
from django.forms.extras import SelectDateWidget
from django.core.files.storage import FileSystemStorage
from django.core.urlresolvers import reverse
from django.template import Template
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.timezone import now
from mezzanine.conf import settings
from mezzanine.forms import fields
from mezzanine.forms.models import FormEntry, FieldEntry
from mezzanine.utils.email import split_addresses as split_choices
fs = FileSystemStorage(location=settings.FORMS_UPLOAD_ROOT)
##############################
# Each type of export filter #
##############################
# Text matches
FILTER_CHOICE_CONTAINS = "1"
FILTER_CHOICE_DOESNT_CONTAIN = "2"
# Exact matches
FILTER_CHOICE_EQUALS = "3"
FILTER_CHOICE_DOESNT_EQUAL = "4"
# Greater/less than
FILTER_CHOICE_BETWEEN = "5"
# Multiple values
FILTER_CHOICE_CONTAINS_ANY = "6"
FILTER_CHOICE_CONTAINS_ALL = "7"
FILTER_CHOICE_DOESNT_CONTAIN_ANY = "8"
FILTER_CHOICE_DOESNT_CONTAIN_ALL = "9"
##########################
# Export filters grouped #
##########################
# Text fields
TEXT_FILTER_CHOICES = (
("", _("Nothing")),
(FILTER_CHOICE_CONTAINS, _("Contains")),
(FILTER_CHOICE_DOESNT_CONTAIN, _("Doesn't contain")),
(FILTER_CHOICE_EQUALS, _("Equals")),
(FILTER_CHOICE_DOESNT_EQUAL, _("Doesn't equal")),
)
# Choices with single value entries
CHOICE_FILTER_CHOICES = (
("", _("Nothing")),
(FILTER_CHOICE_CONTAINS_ANY, _("Equals any")),
(FILTER_CHOICE_DOESNT_CONTAIN_ANY, _("Doesn't equal any")),
)
# Choices with multiple value entries
MULTIPLE_FILTER_CHOICES = (
("", _("Nothing")),
(FILTER_CHOICE_CONTAINS_ANY, _("Contains any")),
(FILTER_CHOICE_CONTAINS_ALL, _("Contains all")),
(FILTER_CHOICE_DOESNT_CONTAIN_ANY, _("Doesn't contain any")),
(FILTER_CHOICE_DOESNT_CONTAIN_ALL, _("Doesn't contain all")),
)
# Dates
DATE_FILTER_CHOICES = (
("", _("Nothing")),
(FILTER_CHOICE_BETWEEN, _("Is between")),
)
# The filter function for each filter type
FILTER_FUNCS = {
FILTER_CHOICE_CONTAINS:
lambda val, field: val.lower() in field.lower(),
FILTER_CHOICE_DOESNT_CONTAIN:
lambda val, field: val.lower() not in field.lower(),
FILTER_CHOICE_EQUALS:
lambda val, field: val.lower() == field.lower(),
FILTER_CHOICE_DOESNT_EQUAL:
lambda val, field: val.lower() != field.lower(),
FILTER_CHOICE_BETWEEN:
lambda val_from, val_to, field: (
(not val_from or val_from <= field) and
(not val_to or val_to >= field)
),
FILTER_CHOICE_CONTAINS_ANY:
lambda val, field: set(val) & set(split_choices(field)),
FILTER_CHOICE_CONTAINS_ALL:
lambda val, field: set(val) == set(split_choices(field)),
FILTER_CHOICE_DOESNT_CONTAIN_ANY:
lambda val, field: not set(val) & set(split_choices(field)),
FILTER_CHOICE_DOESNT_CONTAIN_ALL:
lambda val, field: set(val) != set(split_choices(field)),
}
# Export form fields for each filter type grouping
text_filter_field = forms.ChoiceField(label=" ", required=False,
choices=TEXT_FILTER_CHOICES)
choice_filter_field = forms.ChoiceField(label=" ", required=False,
choices=CHOICE_FILTER_CHOICES)
multiple_filter_field = forms.ChoiceField(label=" ", required=False,
choices=MULTIPLE_FILTER_CHOICES)
date_filter_field = forms.ChoiceField(label=" ", required=False,
choices=DATE_FILTER_CHOICES)
class FormForForm(forms.ModelForm):
"""
Form with a set of fields dynamically assigned, directly based on the
given ``forms.models.Form`` instance.
"""
class Meta:
model = FormEntry
exclude = ("form", "entry_time")
def __init__(self, form, context, *args, **kwargs):
"""
Dynamically add each of the form fields for the given form model
instance and its related field model instances.
"""
self.form = form
self.form_fields = form.fields.visible()
initial = kwargs.pop("initial", {})
# If a FormEntry instance is given to edit, populate initial
# with its field values.
field_entries = {}
if kwargs.get("instance"):
for field_entry in kwargs["instance"].fields.all():
field_entries[field_entry.field_id] = field_entry.value
super(FormForForm, self).__init__(*args, **kwargs)
# Create the form fields.
for field in self.form_fields:
field_key = "field_%s" % field.id
field_class = fields.CLASSES[field.field_type]
field_widget = fields.WIDGETS.get(field.field_type)
field_args = {"label": field.label, "required": field.required,
"help_text": field.help_text}
if field.required and not field.help_text:
field_args["help_text"] = _("required")
arg_names = field_class.__init__.__code__.co_varnames
if "max_length" in arg_names:
field_args["max_length"] = settings.FORMS_FIELD_MAX_LENGTH
if "choices" in arg_names:
field_args["choices"] = field.get_choices()
if field_widget is not None:
field_args["widget"] = field_widget
#
# Initial value for field, in order of preference:
#
# - If a form model instance is given (eg we're editing a
# form response), then use the instance's value for the
# field.
# - If the developer has provided an explicit "initial"
# dict, use it.
# - The default value for the field instance as given in
# the admin.
#
initial_val = None
try:
initial_val = field_entries[field.id]
except KeyError:
try:
initial_val = initial[field_key]
except KeyError:
initial_val = Template(field.default).render(context)
if initial_val:
if field.is_a(*fields.MULTIPLE):
initial_val = split_choices(initial_val)
elif field.field_type == fields.CHECKBOX:
initial_val = initial_val != "False"
self.initial[field_key] = initial_val
self.fields[field_key] = field_class(**field_args)
if field.field_type == fields.DOB:
_now = datetime.now()
years = list(range(_now.year, _now.year - 120, -1))
self.fields[field_key].widget.years = years
# Add identifying type attr to the field for styling.
setattr(self.fields[field_key], "type",
field_class.__name__.lower())
if (field.required and settings.FORMS_USE_HTML5 and
field.field_type != fields.CHECKBOX_MULTIPLE):
self.fields[field_key].widget.attrs["required"] = ""
if field.placeholder_text and not field.default:
text = field.placeholder_text
self.fields[field_key].widget.attrs["placeholder"] = text
def save(self, **kwargs):
"""
Create a ``FormEntry`` instance and related ``FieldEntry``
instances for each form field.
"""
entry = super(FormForForm, self).save(commit=False)
entry.form = self.form
entry.entry_time = now()
entry.save()
entry_fields = entry.fields.values_list("field_id", flat=True)
new_entry_fields = []
for field in self.form_fields:
field_key = "field_%s" % field.id
value = self.cleaned_data[field_key]
if value and self.fields[field_key].widget.needs_multipart_form:
value = fs.save(join("forms", str(uuid4()), value.name), value)
if isinstance(value, list):
value = ", ".join([v.strip() for v in value])
if field.id in entry_fields:
field_entry = entry.fields.get(field_id=field.id)
field_entry.value = value
field_entry.save()
else:
new = {"entry": entry, "field_id": field.id, "value": value}
new_entry_fields.append(FieldEntry(**new))
if new_entry_fields:
FieldEntry.objects.bulk_create(new_entry_fields)
return entry
def email_to(self):
"""
Return the value entered for the first field of type
``forms.fields.EMAIL``.
"""
for field in self.form_fields:
if field.is_a(fields.EMAIL):
return self.cleaned_data["field_%s" % field.id]
return None
class EntriesForm(forms.Form):
"""
Form with a set of fields dynamically assigned that can be used to
filter entries for the given ``forms.models.Form`` instance.
"""
def __init__(self, form, request, *args, **kwargs):
"""
Iterate through the fields of the ``forms.models.Form`` instance and
create the form fields required to control including the field in
the export (with a checkbox) or filtering the field which differs
across field types. User a list of checkboxes when a fixed set of
choices can be chosen from, a pair of date fields for date ranges,
and for all other types provide a textbox for text search.
"""
self.form = form
self.request = request
self.form_fields = form.fields.all()
self.entry_time_name = str(FormEntry._meta.get_field(
"entry_time").verbose_name)
super(EntriesForm, self).__init__(*args, **kwargs)
for field in self.form_fields:
field_key = "field_%s" % field.id
# Checkbox for including in export.
self.fields["%s_export" % field_key] = forms.BooleanField(
label=field.label, initial=True, required=False)
if field.is_a(*fields.CHOICES):
# A fixed set of choices to filter by.
if field.is_a(fields.CHECKBOX):
choices = ((True, _("Checked")), (False, _("Not checked")))
else:
choices = field.get_choices()
contains_field = forms.MultipleChoiceField(label=" ",
choices=choices, widget=forms.CheckboxSelectMultiple(),
required=False)
self.fields["%s_filter" % field_key] = choice_filter_field
self.fields["%s_contains" % field_key] = contains_field
elif field.is_a(*fields.MULTIPLE):
# A fixed set of choices to filter by, with multiple
# possible values in the entry field.
contains_field = forms.MultipleChoiceField(label=" ",
choices=field.get_choices(),
widget=forms.CheckboxSelectMultiple(),
required=False)
self.fields["%s_filter" % field_key] = multiple_filter_field
self.fields["%s_contains" % field_key] = contains_field
elif field.is_a(*fields.DATES):
# A date range to filter by.
self.fields["%s_filter" % field_key] = date_filter_field
self.fields["%s_from" % field_key] = forms.DateField(
label=" ", widget=SelectDateWidget(), required=False)
self.fields["%s_to" % field_key] = forms.DateField(
label=_("and"), widget=SelectDateWidget(), required=False)
else:
# Text box for search term to filter by.
contains_field = forms.CharField(label=" ", required=False)
self.fields["%s_filter" % field_key] = text_filter_field
self.fields["%s_contains" % field_key] = contains_field
# Add ``FormEntry.entry_time`` as a field.
field_key = "field_0"
self.fields["%s_export" % field_key] = forms.BooleanField(initial=True,
label=FormEntry._meta.get_field("entry_time").verbose_name,
required=False)
self.fields["%s_filter" % field_key] = date_filter_field
self.fields["%s_from" % field_key] = forms.DateField(
label=" ", widget=SelectDateWidget(), required=False)
self.fields["%s_to" % field_key] = forms.DateField(
label=_("and"), widget=SelectDateWidget(), required=False)
def __iter__(self):
"""
Yield pairs of include checkbox / filters for each field.
"""
for field_id in [f.id for f in self.form_fields] + [0]:
prefix = "field_%s_" % field_id
fields = [f for f in super(EntriesForm, self).__iter__()
if f.name.startswith(prefix)]
yield fields[0], fields[1], fields[2:]
def columns(self):
"""
Returns the list of selected column names.
"""
fields = [f.label for f in self.form_fields
if self.cleaned_data["field_%s_export" % f.id]]
if self.cleaned_data["field_0_export"]:
fields.append(self.entry_time_name)
return fields
def rows(self, csv=False):
"""
Returns each row based on the selected criteria.
"""
# Store the index of each field against its ID for building each
# entry row with columns in the correct order. Also store the IDs of
# fields with a type of FileField or Date-like for special handling of
# their values.
field_indexes = {}
file_field_ids = []
date_field_ids = []
for field in self.form_fields:
if self.cleaned_data["field_%s_export" % field.id]:
field_indexes[field.id] = len(field_indexes)
if field.is_a(fields.FILE):
file_field_ids.append(field.id)
elif field.is_a(*fields.DATES):
date_field_ids.append(field.id)
num_columns = len(field_indexes)
include_entry_time = self.cleaned_data["field_0_export"]
if include_entry_time:
num_columns += 1
# Get the field entries for the given form and filter by entry_time
# if specified.
field_entries = FieldEntry.objects.filter(entry__form=self.form
).order_by("-entry__id").select_related("entry")
if self.cleaned_data["field_0_filter"] == FILTER_CHOICE_BETWEEN:
time_from = self.cleaned_data["field_0_from"]
time_to = self.cleaned_data["field_0_to"]
if time_from and time_to:
field_entries = field_entries.filter(
entry__entry_time__range=(time_from, time_to))
# Loop through each field value ordered by entry, building up each
# entry as a row. Use the ``valid_row`` flag for marking a row as
# invalid if it fails one of the filtering criteria specified.
current_entry = None
current_row = None
valid_row = True
for field_entry in field_entries:
if field_entry.entry_id != current_entry:
# New entry, write out the current row and start a new one.
if valid_row and current_row is not None:
if not csv:
current_row.insert(0, current_entry)
yield current_row
current_entry = field_entry.entry_id
current_row = [""] * num_columns
valid_row = True
if include_entry_time:
current_row[-1] = field_entry.entry.entry_time
field_value = field_entry.value or ""
# Check for filter.
field_id = field_entry.field_id
filter_type = self.cleaned_data.get("field_%s_filter" % field_id)
filter_args = None
if filter_type:
if filter_type == FILTER_CHOICE_BETWEEN:
f, t = "field_%s_from" % field_id, "field_%s_to" % field_id
filter_args = [self.cleaned_data[f], self.cleaned_data[t]]
else:
field_name = "field_%s_contains" % field_id
filter_args = self.cleaned_data[field_name]
if filter_args:
filter_args = [filter_args]
if filter_args:
# Convert dates before checking filter.
if field_id in date_field_ids:
y, m, d = field_value.split(" ")[0].split("-")
dte = date(int(y), int(m), int(d))
filter_args.append(dte)
else:
filter_args.append(field_value)
filter_func = FILTER_FUNCS[filter_type]
if not filter_func(*filter_args):
valid_row = False
# Create download URL for file fields.
if field_entry.value and field_id in file_field_ids:
url = reverse("admin:form_file", args=(field_entry.id,))
field_value = self.request.build_absolute_uri(url)
if not csv:
parts = (field_value, split(field_entry.value)[1])
field_value = mark_safe("<a href=\"%s\">%s</a>" % parts)
# Only use values for fields that were selected.
try:
current_row[field_indexes[field_id]] = field_value
except KeyError:
pass
# Output the final row.
if valid_row and current_row is not None:
if not csv:
current_row.insert(0, current_entry)
yield current_row
| |
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oc', 'adm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding data to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member,maybe-no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
class Edit(OpenShiftCLI):
''' Class to wrap the oc command line tools
'''
# pylint: disable=too-many-arguments
def __init__(self,
kind,
namespace,
resource_name=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
separator='.',
verbose=False):
''' Constructor for OpenshiftOC '''
super(Edit, self).__init__(namespace, kubeconfig)
self.namespace = namespace
self.kind = kind
self.name = resource_name
self.kubeconfig = kubeconfig
self.separator = separator
self.verbose = verbose
def get(self):
'''return a secret by name '''
return self._get(self.kind, self.name)
def update(self, file_name, content, force=False, content_type='yaml'):
'''run update '''
if file_name:
if content_type == 'yaml':
data = yaml.load(open(file_name))
elif content_type == 'json':
data = json.loads(open(file_name).read())
changes = []
yed = Yedit(filename=file_name, content=data, separator=self.separator)
for key, value in content.items():
changes.append(yed.put(key, value))
if any([not change[0] for change in changes]):
return {'returncode': 0, 'updated': False}
yed.write()
atexit.register(Utils.cleanup, [file_name])
return self._replace(file_name, force=force)
return self._replace_content(self.kind, self.name, content, force=force, sep=self.separator)
def main():
'''
ansible oc module for editing objects
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kind=dict(required=True,
type='str',
choices=['dc', 'deploymentconfig',
'rc', 'replicationcontroller',
'svc', 'service',
'scc', 'securitycontextconstraints',
'ns', 'namespace', 'project', 'projects',
'is', 'imagestream',
'istag', 'imagestreamtag',
'bc', 'buildconfig',
'routes',
'node',
'secret',
'pv', 'persistentvolume',
]),
file_name=dict(default=None, type='str'),
file_format=dict(default='yaml', type='str'),
content=dict(default=None, required=True, type='dict'),
force=dict(default=False, type='bool'),
separator=dict(default='.', type='str'),
),
supports_check_mode=True,
)
ocedit = Edit(module.params['kind'],
module.params['namespace'],
module.params['name'],
kubeconfig=module.params['kubeconfig'],
separator=module.params['separator'],
verbose=module.params['debug'])
state = module.params['state']
api_rval = ocedit.get()
########
# Create
########
if not Utils.exists(api_rval['results'], module.params['name']):
module.fail_json(msg=api_rval)
########
# Update
########
api_rval = ocedit.update(module.params['file_name'],
module.params['content'],
module.params['force'],
module.params['file_format'])
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
if api_rval.has_key('updated') and not api_rval['updated']:
module.exit_json(changed=False, results=api_rval, state="present")
# return the created object
api_rval = ocedit.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutronclient.common import exceptions
from neutronclient.v2_0 import client as neutronclient
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.neutron import metering
from heat.engine import scheduler
from heat.tests.common import HeatTestCase
from heat.tests import utils
metering_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to test metering resources",
"Parameters" : {},
"Resources" : {
"label": {
"Type": "OS::Neutron::MeteringLabel",
"Properties": {
"name": "TestLabel",
"description": "Description of TestLabel"
}
},
"rule": {
"Type": "OS::Neutron::MeteringRule",
"Properties": {
"metering_label_id": { "Ref" : "label" },
"remote_ip_prefix": "10.0.3.0/24",
"direction": "ingress",
"excluded": false
}
}
}
}
'''
class MeteringLabelTest(HeatTestCase):
def setUp(self):
super(MeteringLabelTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_metering_label')
self.m.StubOutWithMock(neutronclient.Client, 'delete_metering_label')
self.m.StubOutWithMock(neutronclient.Client, 'show_metering_label')
self.m.StubOutWithMock(neutronclient.Client,
'create_metering_label_rule')
self.m.StubOutWithMock(neutronclient.Client,
'delete_metering_label_rule')
self.m.StubOutWithMock(neutronclient.Client,
'show_metering_label_rule')
self.stub_keystoneclient()
def create_metering_label(self):
neutronclient.Client.create_metering_label({
'metering_label': {
'name': 'TestLabel',
'description': 'Description of TestLabel'}
}).AndReturn({'metering_label': {'id': '1234'}})
snippet = template_format.parse(metering_template)
stack = utils.parse_stack(snippet)
resource_defns = stack.t.resource_definitions(stack)
return metering.MeteringLabel(
'label', resource_defns['label'], stack)
def test_create(self):
rsrc = self.create_metering_label()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_failed(self):
neutronclient.Client.create_metering_label({
'metering_label': {
'name': 'TestLabel',
'description': 'Description of TestLabel'}
}).AndRaise(exceptions.NeutronClientException())
self.m.ReplayAll()
snippet = template_format.parse(metering_template)
stack = utils.parse_stack(snippet)
resource_defns = stack.t.resource_definitions(stack)
rsrc = metering.MeteringLabel(
'label', resource_defns['label'], stack)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
neutronclient.Client.delete_metering_label('1234')
neutronclient.Client.show_metering_label('1234').AndRaise(
exceptions.NeutronClientException(status_code=404))
rsrc = self.create_metering_label()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_already_gone(self):
neutronclient.Client.delete_metering_label('1234').AndRaise(
exceptions.NeutronClientException(status_code=404))
rsrc = self.create_metering_label()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_failed(self):
neutronclient.Client.delete_metering_label('1234').AndRaise(
exceptions.NeutronClientException(status_code=400))
rsrc = self.create_metering_label()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_metering_label()
neutronclient.Client.show_metering_label('1234').MultipleTimes(
).AndReturn(
{'metering_label':
{'name': 'TestLabel',
'description': 'Description of TestLabel'}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual('TestLabel', rsrc.FnGetAtt('name'))
self.assertEqual('Description of TestLabel',
rsrc.FnGetAtt('description'))
self.m.VerifyAll()
class MeteringRuleTest(HeatTestCase):
def setUp(self):
super(MeteringRuleTest, self).setUp()
self.m.StubOutWithMock(neutronclient.Client, 'create_metering_label')
self.m.StubOutWithMock(neutronclient.Client, 'delete_metering_label')
self.m.StubOutWithMock(neutronclient.Client, 'show_metering_label')
self.m.StubOutWithMock(neutronclient.Client,
'create_metering_label_rule')
self.m.StubOutWithMock(neutronclient.Client,
'delete_metering_label_rule')
self.m.StubOutWithMock(neutronclient.Client,
'show_metering_label_rule')
self.stub_keystoneclient()
def create_metering_label_rule(self):
neutronclient.Client.create_metering_label_rule({
'metering_label_rule': {
'metering_label_id': 'None',
'remote_ip_prefix': '10.0.3.0/24',
'direction': 'ingress',
'excluded': False}
}).AndReturn({'metering_label_rule': {'id': '5678'}})
snippet = template_format.parse(metering_template)
stack = utils.parse_stack(snippet)
resource_defns = stack.t.resource_definitions(stack)
return metering.MeteringRule(
'rule', resource_defns['rule'], stack)
def test_create(self):
rsrc = self.create_metering_label_rule()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_create_failed(self):
neutronclient.Client.create_metering_label_rule({
'metering_label_rule': {
'metering_label_id': 'None',
'remote_ip_prefix': '10.0.3.0/24',
'direction': 'ingress',
'excluded': False}
}).AndRaise(exceptions.NeutronClientException())
self.m.ReplayAll()
snippet = template_format.parse(metering_template)
stack = utils.parse_stack(snippet)
resource_defns = stack.t.resource_definitions(stack)
rsrc = metering.MeteringRule(
'rule', resource_defns['rule'], stack)
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.create))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.CREATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_delete(self):
neutronclient.Client.delete_metering_label_rule('5678')
neutronclient.Client.show_metering_label_rule('5678').AndRaise(
exceptions.NeutronClientException(status_code=404))
rsrc = self.create_metering_label_rule()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_already_gone(self):
neutronclient.Client.delete_metering_label_rule('5678').AndRaise(
exceptions.NeutronClientException(status_code=404))
rsrc = self.create_metering_label_rule()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.delete)()
self.assertEqual((rsrc.DELETE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_delete_failed(self):
neutronclient.Client.delete_metering_label_rule('5678').AndRaise(
exceptions.NeutronClientException(status_code=400))
rsrc = self.create_metering_label_rule()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
error = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.delete))
self.assertEqual(
'NeutronClientException: An unknown exception occurred.',
six.text_type(error))
self.assertEqual((rsrc.DELETE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_attribute(self):
rsrc = self.create_metering_label_rule()
neutronclient.Client.show_metering_label_rule('5678').MultipleTimes(
).AndReturn(
{'metering_label_rule':
{'metering_label_id': 'None',
'remote_ip_prefix': '10.0.3.0/24',
'direction': 'ingress',
'excluded': False}})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual('10.0.3.0/24', rsrc.FnGetAtt('remote_ip_prefix'))
self.assertEqual('ingress', rsrc.FnGetAtt('direction'))
self.assertIs(False, rsrc.FnGetAtt('excluded'))
self.m.VerifyAll()
| |
r"""
The following constraints are implemented:
- ``constraints.boolean``
- ``constraints.dependent``
- ``constraints.greater_than(lower_bound)``
- ``constraints.integer_interval(lower_bound, upper_bound)``
- ``constraints.interval(lower_bound, upper_bound)``
- ``constraints.lower_cholesky``
- ``constraints.lower_triangular``
- ``constraints.nonnegative_integer``
- ``constraints.positive``
- ``constraints.positive_definite``
- ``constraints.positive_integer``
- ``constraints.real``
- ``constraints.real_vector``
- ``constraints.simplex``
- ``constraints.unit_interval``
"""
import torch
__all__ = [
'Constraint',
'boolean',
'dependent',
'dependent_property',
'greater_than',
'greater_than_eq',
'integer_interval',
'interval',
'half_open_interval',
'is_dependent',
'less_than',
'lower_cholesky',
'lower_triangular',
'nonnegative_integer',
'positive',
'positive_definite',
'positive_integer',
'real',
'real_vector',
'simplex',
'unit_interval',
]
class Constraint(object):
"""
Abstract base class for constraints.
A constraint object represents a region over which a variable is valid,
e.g. within which a variable can be optimized.
"""
def check(self, value):
"""
Returns a byte tensor of `sample_shape + batch_shape` indicating
whether each event in value satisfies this constraint.
"""
raise NotImplementedError
def __repr__(self):
return self.__class__.__name__[1:] + '()'
class _Dependent(Constraint):
"""
Placeholder for variables whose support depends on other variables.
These variables obey no simple coordinate-wise constraints.
"""
def check(self, x):
raise ValueError('Cannot determine validity of dependent constraint')
def is_dependent(constraint):
return isinstance(constraint, _Dependent)
class _DependentProperty(property, _Dependent):
"""
Decorator that extends @property to act like a `Dependent` constraint when
called on a class and act like a property when called on an object.
Example::
class Uniform(Distribution):
def __init__(self, low, high):
self.low = low
self.high = high
@constraints.dependent_property
def support(self):
return constraints.interval(self.low, self.high)
"""
pass
class _Boolean(Constraint):
"""
Constrain to the two values `{0, 1}`.
"""
def check(self, value):
return (value == 0) | (value == 1)
class _IntegerInterval(Constraint):
"""
Constrain to an integer interval `[lower_bound, upper_bound]`.
"""
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def check(self, value):
return (value % 1 == 0) & (self.lower_bound <= value) & (value <= self.upper_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound)
return fmt_string
class _IntegerLessThan(Constraint):
"""
Constrain to an integer interval `(-inf, upper_bound]`.
"""
def __init__(self, upper_bound):
self.upper_bound = upper_bound
def check(self, value):
return (value % 1 == 0) & (value <= self.upper_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(upper_bound={})'.format(self.upper_bound)
return fmt_string
class _IntegerGreaterThan(Constraint):
"""
Constrain to an integer interval `[lower_bound, inf)`.
"""
def __init__(self, lower_bound):
self.lower_bound = lower_bound
def check(self, value):
return (value % 1 == 0) & (value >= self.lower_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={})'.format(self.lower_bound)
return fmt_string
class _Real(Constraint):
"""
Trivially constrain to the extended real line `[-inf, inf]`.
"""
def check(self, value):
return value == value # False for NANs.
class _GreaterThan(Constraint):
"""
Constrain to a real half line `(lower_bound, inf]`.
"""
def __init__(self, lower_bound):
self.lower_bound = lower_bound
def check(self, value):
return self.lower_bound < value
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={})'.format(self.lower_bound)
return fmt_string
class _GreaterThanEq(Constraint):
"""
Constrain to a real half line `[lower_bound, inf)`.
"""
def __init__(self, lower_bound):
self.lower_bound = lower_bound
def check(self, value):
return self.lower_bound <= value
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={})'.format(self.lower_bound)
return fmt_string
class _LessThan(Constraint):
"""
Constrain to a real half line `[-inf, upper_bound)`.
"""
def __init__(self, upper_bound):
self.upper_bound = upper_bound
def check(self, value):
return value < self.upper_bound
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(upper_bound={})'.format(self.upper_bound)
return fmt_string
class _Interval(Constraint):
"""
Constrain to a real interval `[lower_bound, upper_bound]`.
"""
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def check(self, value):
return (self.lower_bound <= value) & (value <= self.upper_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound)
return fmt_string
class _HalfOpenInterval(Constraint):
"""
Constrain to a real interval `[lower_bound, upper_bound)`.
"""
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def check(self, value):
return (self.lower_bound <= value) & (value < self.upper_bound)
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += '(lower_bound={}, upper_bound={})'.format(self.lower_bound, self.upper_bound)
return fmt_string
class _Simplex(Constraint):
"""
Constrain to the unit simplex in the innermost (rightmost) dimension.
Specifically: `x >= 0` and `x.sum(-1) == 1`.
"""
def check(self, value):
return (value >= 0).all() & ((value.sum(-1, True) - 1).abs() < 1e-6).all()
class _LowerTriangular(Constraint):
"""
Constrain to lower-triangular square matrices.
"""
def check(self, value):
value_tril = value.tril()
return (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]
class _LowerCholesky(Constraint):
"""
Constrain to lower-triangular square matrices with positive diagonals.
"""
def check(self, value):
value_tril = value.tril()
lower_triangular = (value_tril == value).view(value.shape[:-2] + (-1,)).min(-1)[0]
positive_diagonal = (value.diagonal(dim1=-2, dim2=-1) > 0).min(-1)[0]
return lower_triangular & positive_diagonal
class _PositiveDefinite(Constraint):
"""
Constrain to positive-definite matrices.
"""
def check(self, value):
matrix_shape = value.shape[-2:]
batch_shape = value.unsqueeze(0).shape[:-2]
# TODO: replace with batched linear algebra routine when one becomes available
# note that `symeig()` returns eigenvalues in ascending order
flattened_value = value.reshape((-1,) + matrix_shape)
return torch.stack([v.symeig(eigenvectors=False)[0][:1] > 0.0
for v in flattened_value]).view(batch_shape)
class _RealVector(Constraint):
"""
Constrain to real-valued vectors. This is the same as `constraints.real`,
but additionally reduces across the `event_shape` dimension.
"""
def check(self, value):
return (value == value).all() # False for NANs.
# Public interface.
dependent = _Dependent()
dependent_property = _DependentProperty
boolean = _Boolean()
nonnegative_integer = _IntegerGreaterThan(0)
positive_integer = _IntegerGreaterThan(1)
integer_interval = _IntegerInterval
real = _Real()
real_vector = _RealVector()
positive = _GreaterThan(0.)
greater_than = _GreaterThan
greater_than_eq = _GreaterThanEq
less_than = _LessThan
unit_interval = _Interval(0., 1.)
interval = _Interval
half_open_interval = _HalfOpenInterval
simplex = _Simplex()
lower_triangular = _LowerTriangular()
lower_cholesky = _LowerCholesky()
positive_definite = _PositiveDefinite()
| |
#!/usr/bin/env python
"""This file contains various utility classes used by GRR."""
import __builtin__
import base64
import copy
import functools
import os
import pipes
import Queue
import random
import re
import shlex
import shutil
import struct
import tarfile
import tempfile
import threading
import time
import zipfile
import zlib
def Proxy(f):
"""A helper to create a proxy method in a class."""
def Wrapped(self, *args):
return getattr(self, f)(*args)
return Wrapped
class TempDirectory(object):
"""A self cleaning temporary directory."""
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name, True)
# This is a synchronize decorator.
def Synchronized(f):
"""Synchronization decorator."""
def NewFunction(self, *args, **kw):
with self.lock:
return f(self, *args, **kw)
return NewFunction
class InterruptableThread(threading.Thread):
"""A class which exits once the main thread exits."""
def __init__(self, target=None, args=None, kwargs=None, sleep_time=10, **kw):
self.exit = False
self.last_run = 0
self.target = target
self.args = args or ()
self.kwargs = kwargs or {}
self.sleep_time = sleep_time
super(InterruptableThread, self).__init__(**kw)
# Do not hold up program exit
self.daemon = True
def Iterate(self):
"""This will be repeatedly called between sleeps."""
def Stop(self):
self.exit = True
def run(self):
# When the main thread exits, the time module might disappear and be already
# None. We take a local reference to the functions we need.
sleep = time.sleep
now = time.time
while not self.exit:
if self.target:
self.target(*self.args, **self.kwargs)
else:
self.Iterate()
# Implement interruptible sleep here.
self.last_run = now()
# Exit if the main thread disappears.
while (time and not self.exit and
now() < self.last_run + self.sleep_time):
sleep(1)
class Node(object):
"""An entry to a linked list."""
next = None
prev = None
data = None
def __init__(self, key, data):
self.data = data
self.key = key
def __str__(self):
return "Node %s: %s" % (self.key, SmartStr(self.data))
def __repr__(self):
return SmartStr(self)
class LinkedList(object):
"""A simple doubly linked list used for fast caches."""
def __init__(self):
# We are the head node.
self.next = self.prev = self
self.size = 0
def AppendNode(self, node):
self.size += 1
last_node = self.prev
last_node.next = node
node.prev = last_node
node.next = self
self.prev = node
def PopLeft(self):
"""Returns the head node and removes it from the list."""
if self.next is self:
raise IndexError("Pop from empty list.")
first_node = self.next
self.Unlink(first_node)
return first_node
def Pop(self):
"""Returns the tail node and removes it from the list."""
if self.prev is self:
raise IndexError("Pop from empty list.")
last_node = self.prev
self.Unlink(last_node)
return last_node
def Unlink(self, node):
"""Removes a given node from the list."""
self.size -= 1
node.prev.next = node.next
node.next.prev = node.prev
node.next = node.prev = None
def __iter__(self):
p = self.next
while p is not self:
yield p
p = p.next
def __len__(self):
return self.size
def __str__(self):
p = self.next
s = []
while p is not self:
s.append(str(p.data))
p = p.next
return "[" + ", ".join(s) + "]"
def Print(self):
p = self.next
while p is not self:
print "%s: prev %r next %r\n" % (p.data, p.prev, p.next)
p = p.next
class FastStore(object):
"""This is a cache which expires objects in oldest first manner.
This implementation first appeared in PyFlag.
"""
def __init__(self, max_size=10):
"""Constructor.
Args:
max_size: The maximum number of objects held in cache.
"""
# This class implements a LRU cache which needs fast updates of the LRU
# order for random elements. This is usually implemented by using a
# dict for fast lookups and a linked list for quick deletions / insertions.
self._age = LinkedList()
self._hash = {}
self._limit = max_size
self.lock = threading.RLock()
def KillObject(self, obj):
"""Perform cleanup on objects when they expire.
Should be overridden by classes which need to perform special cleanup.
Args:
obj: The object which was stored in the cache and is now expired.
"""
@Synchronized
def __iter__(self):
return iter([(key, n.data) for key, n in self._hash.iteritems()])
@Synchronized
def Expire(self):
"""Expires old cache entries."""
while len(self._age) > self._limit:
node = self._age.PopLeft()
self._hash.pop(node.key, None)
self.KillObject(node.data)
@Synchronized
def Put(self, key, obj):
"""Add the object to the cache."""
# Remove the old entry if it is there.
node = self._hash.pop(key, None)
if node:
self._age.Unlink(node)
# Make a new node and insert it.
node = Node(key=key, data=obj)
self._hash[key] = node
self._age.AppendNode(node)
self.Expire()
return key
@Synchronized
def ExpireObject(self, key):
"""Expire a specific object from cache."""
node = self._hash.pop(key, None)
if node:
self._age.Unlink(node)
self.KillObject(node.data)
return node.data
@Synchronized
def ExpireRegEx(self, regex):
"""Expire all the objects with the key matching the regex."""
reg = re.compile(regex)
for key in list(self._hash):
if reg.match(key):
self.ExpireObject(key)
@Synchronized
def ExpirePrefix(self, prefix):
"""Expire all the objects with the key having a given prefix."""
for key in list(self._hash):
if key.startswith(prefix):
self.ExpireObject(key)
@Synchronized
def Pop(self, key):
"""Remove the object from the cache completely."""
node = self._hash.get(key)
if node:
self._age.Unlink(node)
return node.data
@Synchronized
def Get(self, key):
"""Fetch the object from cache.
Objects may be flushed from cache at any time. Callers must always
handle the possibility of KeyError raised here.
Args:
key: The key used to access the object.
Returns:
Cached object.
Raises:
KeyError: If the object is not present in the cache.
"""
if key not in self._hash:
raise KeyError(key)
node = self._hash[key]
self._age.Unlink(node)
self._age.AppendNode(node)
return node.data
@Synchronized
def __contains__(self, obj):
return obj in self._hash
@Synchronized
def __getitem__(self, key):
return self.Get(key)
@Synchronized
def Flush(self):
"""Flush all items from cache."""
while self._age:
node = self._age.PopLeft()
self.KillObject(node.data)
self._hash = dict()
@Synchronized
def __getstate__(self):
"""When pickled the cache is flushed."""
self.Flush()
return dict(max_size=self._limit)
def __setstate__(self, state):
self.__init__(max_size=state.get("max_size", 10))
def __len__(self):
return len(self._hash)
class TimeBasedCache(FastStore):
"""A Cache which expires based on time."""
def __init__(self, max_size=10, max_age=600):
"""Constructor.
This cache will refresh the age of the cached object as long as they are
accessed within the allowed age. The age refers to the time since it was
last touched.
Args:
max_size: The maximum number of objects held in cache.
max_age: The maximum length of time an object is considered alive.
"""
super(TimeBasedCache, self).__init__(max_size)
self.max_age = max_age
def HouseKeeper():
"""A housekeeper thread which expunges old objects."""
if not time:
# This might happen when the main thread exits, we don't want to raise.
return
now = time.time()
# Only expunge while holding the lock on the data store.
with self.lock:
# We need to take a copy of the value list because we are changing this
# dict during the iteration.
for node in self._hash.values():
timestamp, obj = node.data
# Expire the object if it is too old.
if timestamp + self.max_age < now:
self.KillObject(obj)
self._age.Unlink(node)
self._hash.pop(node.key, None)
# This thread is designed to never finish.
self.house_keeper_thread = InterruptableThread(target=HouseKeeper)
self.house_keeper_thread.start()
@Synchronized
def Get(self, key):
now = time.time()
stored = super(TimeBasedCache, self).Get(key)
if stored[0] + self.max_age < now:
raise KeyError("Expired")
# This updates the timestamp in place to keep the object alive
stored[0] = now
return stored[1]
def Put(self, key, obj):
super(TimeBasedCache, self).Put(key, [time.time(), obj])
@Synchronized
def __getstate__(self):
"""When pickled the cache is flushed."""
self.Flush()
return dict(max_size=self._limit, max_age=self.max_age)
def __setstate__(self, state):
self.__init__(max_size=state["max_size"], max_age=state["max_age"])
class Memoize(object):
"""A decorator to produce a memoizing version of a method f.
"""
def __init__(self, deep_copy=False):
"""Constructor.
Args:
deep_copy: Whether to perform a deep copy of the returned object.
Otherwise, a direct reference is returned.
"""
self.deep_copy = deep_copy
def __call__(self, f):
"""Produce a memoizing version of f.
Requires that all parameters are hashable. Also, it does not copy the return
value, so changes to a returned object may be visible in future invocations.
Args:
f: The function which will be wrapped.
Returns:
A wrapped function which memoizes all values returned by f, keyed by
the arguments to f.
"""
f.memo_pad = {}
f.memo_deep_copy = self.deep_copy
@functools.wraps(f)
def Wrapped(self, *args, **kwargs):
# We keep args and kwargs separate to avoid confusing an arg which is a
# pair with a kwarg. Also, we don't try to match calls when an argument
# moves between args and kwargs.
key = tuple(args), tuple(sorted(kwargs.items(), key=lambda x: x[0]))
if key not in f.memo_pad:
f.memo_pad[key] = f(self, *args, **kwargs)
if f.memo_deep_copy:
return copy.deepcopy(f.memo_pad[key])
else:
return f.memo_pad[key]
return Wrapped
class PickleableLock(object):
"""A lock which is safe to pickle."""
lock = None
def __init__(self):
self.lock = threading.RLock()
def __getstate__(self):
return True
def __setstate__(self, _):
self.lock = threading.RLock()
def __enter__(self):
return self.lock.__enter__()
def __exit__(self, exc_type, exc_value, traceback):
return self.lock.__exit__(exc_type, exc_value, traceback)
class AgeBasedCache(TimeBasedCache):
"""A cache which holds objects for a maximum length of time.
This differs from the TimeBasedCache which keeps the objects alive as long as
they are accessed.
"""
@Synchronized
def Get(self, key):
now = time.time()
stored = FastStore.Get(self, key)
if stored[0] + self.max_age < now:
raise KeyError("Expired")
return stored[1]
class Struct(object):
"""A baseclass for parsing binary Structs."""
# Derived classes must initialize this into an array of (format,
# name) tuples.
_fields = None
def __init__(self, data):
"""Parses ourselves from data."""
format_str = "".join([x[0] for x in self._fields])
self.size = struct.calcsize(format_str)
try:
parsed_data = struct.unpack(format_str, data[:self.size])
except struct.error:
raise RuntimeError("Unable to parse")
for i in range(len(self._fields)):
setattr(self, self._fields[i][1], parsed_data[i])
def __repr__(self):
"""Produce useful text representation of the Struct."""
dat = []
for _, name in self._fields:
dat.append("%s=%s" % (name, getattr(self, name)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(dat))
@classmethod
def GetSize(cls):
"""Calculate the size of the struct."""
format_str = "".join([x[0] for x in cls._fields])
return struct.calcsize(format_str)
def GroupBy(items, key):
"""A generator that groups all items by a key.
Args:
items: A list of items or a single item.
key: A function which given each item will return the key.
Returns:
A dict with keys being each unique key and values being a list of items of
that key.
"""
key_map = {}
# Make sure we are given a sequence of items here.
try:
item_iter = iter(items)
except TypeError:
item_iter = [items]
for item in item_iter:
key_id = key(item)
key_map.setdefault(key_id, []).append(item)
return key_map
def SmartStr(string):
"""Returns a string or encodes a unicode object.
This function essentially will always return an encoded string. It should be
used on an interface to the system which must accept a string and not unicode.
Args:
string: The string to convert.
Returns:
an encoded string.
"""
if type(string) == unicode:
return string.encode("utf8", "ignore")
return str(string)
def SmartUnicode(string):
"""Returns a unicode object.
This function will always return a unicode object. It should be used to
guarantee that something is always a unicode object.
Args:
string: The string to convert.
Returns:
a unicode object.
"""
if type(string) != unicode:
try:
return string.__unicode__()
except (AttributeError, UnicodeError):
return str(string).decode("utf8", "ignore")
return string
def Xor(string, key):
"""Returns a string where each character has been xored with key."""
return "".join([chr(c ^ key) for c in bytearray(string)])
def XorByteArray(array, key):
"""Xors every item in the array with key and returns it."""
for i in xrange(len(array)):
array[i] ^= key
return array
def FormatAsHexString(num, width=None, prefix="0x"):
"""Takes an int and returns the number formatted as a hex string."""
# Strip "0x".
hex_str = hex(num)[2:]
# Strip "L" for long values.
hex_str = hex_str.replace("L", "")
if width:
hex_str = hex_str.rjust(width, "0")
return "%s%s" % (prefix, hex_str)
def FormatAsTimestamp(timestamp):
if not timestamp:
return "-"
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timestamp))
def NormalizePath(path, sep="/"):
"""A sane implementation of os.path.normpath.
The standard implementation treats leading / and // as different leading to
incorrect normal forms.
NOTE: Its ok to use a relative path here (without leading /) but any /../ will
still be removed anchoring the path at the top level (e.g. foo/../../../../bar
=> bar).
Args:
path: The path to normalize.
sep: Separator used.
Returns:
A normalized path. In this context normalized means that all input paths
that would result in the system opening the same physical file will produce
the same normalized path.
"""
if not path:
return sep
path = SmartUnicode(path)
path_list = path.split(sep)
# This is a relative path and the first element is . or ..
if path_list[0] in [".", "..", ""]:
path_list.pop(0)
# Deliberately begin at index 1 to preserve a single leading /
i = 0
while True:
list_len = len(path_list)
# We begin at the last known good position so we never iterate over path
# elements which are already examined
for i in range(i, len(path_list)):
# Remove /./ form
if path_list[i] == "." or not path_list[i]:
path_list.pop(i)
break
# Remove /../ form
elif path_list[i] == "..":
path_list.pop(i)
# Anchor at the top level
if (i == 1 and path_list[0]) or i > 1:
i -= 1
path_list.pop(i)
break
# If we didnt alter the path so far we can quit
if len(path_list) == list_len:
return sep + sep.join(path_list)
def JoinPath(stem="", *parts):
"""A sane version of os.path.join.
The intention here is to append the stem to the path. The standard module
removes the path if the stem begins with a /.
Args:
stem: The stem to join to.
*parts: parts of the path to join. The first arg is always the root and
directory traversal is not allowed.
Returns:
a normalized path.
"""
# Ensure all path components are unicode
parts = [SmartUnicode(path) for path in parts]
result = (stem + NormalizePath(u"/".join(parts))).replace("//", "/")
result = result.rstrip("/")
return result or "/"
def GuessWindowsFileNameFromString(str_in):
"""Take a commandline string and guess the file path.
Commandline strings can be space separated and contain options.
e.g. C:\\Program Files\\ACME Corporation\\wiz.exe /quiet /blah
See here for microsoft doco on commandline parsing:
http://msdn.microsoft.com/en-us/library/windows/desktop/ms682425(v=vs.85).aspx
Args:
str_in: commandline string
Returns:
list of candidate filename strings.
"""
guesses = []
current_str = ""
# If paths are quoted as recommended, just use that path.
if str_in.startswith(("\"", "'")):
components = shlex.split(str_in)
guesses = [components[0]]
# If first component has something like "rundll" in it, we expect the
# next one to point at a DLL and a function. For example:
# rundll32.exe "C:\Windows\system32\advpack.dll",DelNodeRunDLL32
if "rundll" in guesses[0] and len(components) > 1:
guesses.append(components[1].rsplit(",", 1)[0])
else:
components = str_in.split(" ")
while components:
component = components.pop(0)
if current_str:
current_str = " ".join((current_str, component))
else:
current_str = component
guesses.append(current_str)
# If current str contains something like "rundll" in it, we expect the
# rest of the string to point to a DLL and a function. We don't know
# if the rest of the string is quoted or not, so we continue
# recursively.
if "rundll" in current_str:
for guess in GuessWindowsFileNameFromString(" ".join(components)):
guesses.append(guess.rsplit(",", 1)[0])
break
return guesses
def ShellQuote(value):
"""Escapes the string for the safe use inside shell command line."""
# TODO(user): replace pipes.quote with shlex.quote when time comes.
return pipes.quote(SmartUnicode(value))
def Join(*parts):
"""Join (AFF4) paths without normalizing.
A quick join method that can be used to express the precondition that
the parts are already normalized.
Args:
*parts: The parts to join
Returns:
The joined path.
"""
return "/".join(parts)
def Grouper(iterable, n):
"""Group iterable into lists of size n. Last list will be short."""
items = []
for count, item in enumerate(iterable):
items.append(item)
if (count + 1) % n == 0:
yield items
items = []
if items:
yield items
def EncodeReasonString(reason):
return base64.urlsafe_b64encode(SmartStr(reason))
def DecodeReasonString(reason):
return SmartUnicode(base64.urlsafe_b64decode(SmartStr(reason)))
# Regex chars that should not be in a regex
disallowed_chars = re.compile(r"[[\](){}+*?.$^\\]")
def EscapeRegex(string):
return re.sub(disallowed_chars,
lambda x: "\\" + x.group(0),
SmartUnicode(string))
def GeneratePassphrase(length=20):
"""Create a 20 char passphrase with easily typeable chars."""
valid_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
valid_chars += "0123456789 ,-_&$#"
return "".join(random.choice(valid_chars) for i in range(length))
class PRNG(object):
"""An optimized PRNG."""
random_list = []
@classmethod
def GetUShort(cls):
return cls.GetULong() & 0xFFFF
@classmethod
def GetULong(cls):
while True:
try:
return cls.random_list.pop()
except IndexError:
PRNG.random_list = list(
struct.unpack("=" + "L" * 1000,
os.urandom(struct.calcsize("=L") * 1000)))
def FormatNumberAsString(num):
"""Return a large number in human readable form."""
for suffix in ["b", "KB", "MB", "GB"]:
if num < 1024.0:
return "%3.2f%s" % (num, suffix)
num /= 1024.0
return "%3.1f%s" % (num, "TB")
class NotAValue(object):
pass
def issubclass(obj, cls): # pylint: disable=redefined-builtin,g-bad-name
"""A sane implementation of issubclass.
See http://bugs.python.org/issue10569
Python bare issubclass must be protected by an isinstance test first since it
can only work on types and raises when provided something which is not a type.
Args:
obj: Any object or class.
cls: The class to check against.
Returns:
True if obj is a subclass of cls and False otherwise.
"""
return isinstance(obj, type) and __builtin__.issubclass(obj, cls)
class HeartbeatQueue(Queue.Queue):
"""A queue that periodically calls a provided callback while waiting."""
def __init__(self, callback=None, fast_poll_time=60, *args, **kw):
Queue.Queue.__init__(self, *args, **kw)
self.callback = callback or (lambda: None)
self.last_item_time = time.time()
self.fast_poll_time = fast_poll_time
def get(self, poll_interval=5):
while True:
try:
# Using Queue.get() with a timeout is really expensive - Python uses
# busy waiting that wakes up the process every 50ms - so we switch
# to a more efficient polling method if there is no activity for
# <fast_poll_time> seconds.
if time.time() - self.last_item_time < self.fast_poll_time:
message = Queue.Queue.get(self, block=True, timeout=poll_interval)
else:
time.sleep(poll_interval)
message = Queue.Queue.get(self, block=False)
break
except Queue.Empty:
self.callback()
self.last_item_time = time.time()
return message
class StreamingZipWriter(object):
"""A streaming zip file writer which can copy from file like objects.
The streaming writer should be capable of compressing files of arbitrary
size without eating all the memory. It's built on top of Python's zipfile
module, but has to use some hacks, as standard library doesn't provide
all the necessary API to do streaming writes.
"""
def __init__(self, fd_or_path, mode="w", compression=zipfile.ZIP_STORED):
"""Open streaming ZIP file with mode read "r", write "w" or append "a".
Args:
fd_or_path: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by
ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
"""
self.zip_fd = zipfile.ZipFile(fd_or_path, mode,
compression=zipfile.ZIP_STORED,
allowZip64=True)
self.out_fd = self.zip_fd.fp
self.compression = compression
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Close()
def Close(self):
self.zip_fd.close()
def GenerateZipInfo(self, arcname=None, compress_type=None, st=None):
"""Generate ZipInfo instance for the given name, compression and stat.
Args:
arcname: The name in the archive this should take.
compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)
st: An optional stat object to be used for setting headers.
Returns:
ZipInfo instance.
Raises:
ValueError: If arcname is not provided.
"""
# Fake stat response.
if st is None:
st = os.stat_result((0100644, 0, 0, 0, 0, 0, 0, 0, 0, 0))
mtime = time.localtime(st.st_mtime or time.time())
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
raise ValueError("An arcname must be provided.")
zinfo = zipfile.ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.flag_bits = 0x08 # Setting data descriptor flag.
zinfo.CRC = 0x08074b50 # Predefined CRC for archives using data
# descriptors.
# This fills an empty Info-ZIP Unix extra field.
zinfo.extra = struct.pack("<HHIIHH", 0x5855, 12,
0, # time of last access (UTC/GMT)
0, # time of last modification (UTC/GMT)
0, # user ID
0) # group ID
return zinfo
def WriteSymlink(self, src_arcname, dst_arcname):
"""Writes a symlink into the archive."""
# Inspired by:
# http://www.mail-archive.com/python-list@python.org/msg34223.html
src_arcname = SmartStr(src_arcname)
dst_arcname = SmartStr(dst_arcname)
zinfo = zipfile.ZipInfo(dst_arcname)
# This marks a symlink.
zinfo.external_attr = (0644 | 0120000) << 16
# This marks create_system as UNIX.
zinfo.create_system = 3
# This fills the ASi UNIX extra field, see:
# http://www.opensource.apple.com/source/zip/zip-6/unzip/unzip/proginfo/extra.fld
zinfo.extra = struct.pack("<HHIHIHHs", 0x756e, len(src_arcname) + 14,
0, # CRC-32 of the remaining data
0120000, # file permissions
0, # target file size
0, # user ID
0, # group ID
src_arcname)
self.zip_fd.writestr(zinfo, src_arcname)
def WriteFromFD(self, src_fd, arcname=None, compress_type=None, st=None):
"""Write a zip member from a file like object.
Args:
src_fd: A file like object, must support seek(), tell(), read().
arcname: The name in the archive this should take.
compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED)
st: An optional stat object to be used for setting headers.
Raises:
RuntimeError: If the zip if already closed.
"""
zinfo = self.GenerateZipInfo(arcname=arcname, compress_type=compress_type,
st=st)
crc = 0
compress_size = 0
if not self.out_fd:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
zinfo.header_offset = self.out_fd.tell()
# Call _writeCheck(zinfo) to do sanity checking on zinfo structure that
# we've constructed.
self.zip_fd._writecheck(zinfo) # pylint: disable=protected-access
# Mark ZipFile as dirty. We have to keep self.zip_fd's internal state
# coherent so that it behaves correctly when close() is called.
self.zip_fd._didModify = True # pylint: disable=protected-access
# Write FileHeader now. It's incomplete, but CRC and uncompressed/compressed
# sized will be written later in data descriptor.
self.out_fd.write(zinfo.FileHeader())
if zinfo.compress_type == zipfile.ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
file_size = 0
while 1:
buf = src_fd.read(1024 * 8)
if not buf:
break
file_size += len(buf)
crc = zipfile.crc32(buf, crc) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size += len(buf)
self.out_fd.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size += len(buf)
zinfo.compress_size = compress_size
self.out_fd.write(buf)
else:
zinfo.compress_size = file_size
zinfo.CRC = crc
zinfo.file_size = file_size
if file_size > zipfile.ZIP64_LIMIT or compress_size > zipfile.ZIP64_LIMIT:
# Writing data descriptor ZIP64-way:
# crc-32 8 bytes (little endian)
# compressed size 8 bytes (little endian)
# uncompressed size 8 bytes (little endian)
self.out_fd.write(struct.pack("<LLL", crc, compress_size, file_size))
else:
# Writing data descriptor non-ZIP64-way:
# crc-32 4 bytes (little endian)
# compressed size 4 bytes (little endian)
# uncompressed size 4 bytes (little endian)
self.out_fd.write(struct.pack("<III", crc, compress_size, file_size))
# Register the file in the zip file, so that central directory gets
# written correctly.
self.zip_fd.filelist.append(zinfo)
self.zip_fd.NameToInfo[zinfo.filename] = zinfo
class StreamingTarWriter(object):
"""A streaming tar file writer which can copy from file like objects.
The streaming writer should be capable of compressing files of arbitrary
size without eating all the memory. It's built on top of Python's tarfile
module.
"""
def __init__(self, fd_or_path, mode="w"):
if hasattr(fd_or_path, "write"):
self.tar_fd = tarfile.open(mode=mode, fileobj=fd_or_path,
encoding="utf-8")
else:
self.tar_fd = tarfile.open(name=fd_or_path, mode=mode, encoding="utf-8")
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Close()
def Close(self):
self.tar_fd.close()
def WriteSymlink(self, src_arcname, dst_arcname):
"""Writes a symlink into the archive."""
info = self.tar_fd.tarinfo()
info.tarfile = self.tar_fd
info.name = SmartStr(dst_arcname)
info.size = 0
info.mtime = time.time()
info.type = tarfile.SYMTYPE
info.linkname = SmartStr(src_arcname)
self.tar_fd.addfile(info)
def WriteFromFD(self, src_fd, arcname=None, st=None):
"""Write an archive member from a file like object.
Args:
src_fd: A file like object, must support seek(), tell(), read().
arcname: The name in the archive this should take.
st: A stat object to be used for setting headers.
Raises:
ValueError: If st is omitted.
"""
if st is None:
raise ValueError("Stat object can't be None.")
info = self.tar_fd.tarinfo()
info.tarfile = self.tar_fd
info.type = tarfile.REGTYPE
info.name = SmartStr(arcname)
info.size = st.st_size
info.mode = st.st_mode
info.mtime = st.st_mtime or time.time()
self.tar_fd.addfile(info, src_fd)
class Stubber(object):
"""A context manager for doing simple stubs."""
def __init__(self, module, target_name, stub):
self.target_name = target_name
self.module = module
self.stub = stub
def __enter__(self):
self.Start()
def Stop(self):
setattr(self.module, self.target_name, self.old_target)
def Start(self):
self.old_target = getattr(self.module, self.target_name, None)
try:
self.stub.old_target = self.old_target
except AttributeError:
pass
setattr(self.module, self.target_name, self.stub)
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Stop()
class MultiStubber(object):
"""A context manager for doing simple stubs."""
def __init__(self, *args):
self.stubbers = [Stubber(*x) for x in args]
def Start(self):
for x in self.stubbers:
x.Start()
def Stop(self):
for x in self.stubbers:
x.Stop()
def __enter__(self):
self.Start()
def __exit__(self, t, value, traceback):
self.Stop()
class DataObject(dict):
"""This class wraps a dict and provides easier access functions."""
def Register(self, item, value=None):
if item in self:
raise AttributeError("Item %s already registered." % item)
self[item] = value
def __setattr__(self, item, value):
self[item] = value
def __getattr__(self, item):
try:
return self[item]
except KeyError as e:
raise AttributeError(e)
def __dir__(self):
return sorted(self.keys()) + dir(self.__class__)
def __str__(self):
result = []
for k, v in self.items():
tmp = " %s = " % k
try:
for line in SmartUnicode(v).splitlines():
tmp += " %s\n" % line
except Exception as e: # pylint: disable=broad-except
tmp += "Error: %s\n" % e
result.append(tmp)
return "{\n%s}\n" % "".join(result)
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Utilities for working with the HMMER software package.
The HMMER suite of programs provides utilities to build HMM profiles from
alignments of protein sequences, and evaluate the predicted class membership for
sets of unaligned protein sequences. The HMMER suite can be installed by running
apt-get install hmmer
The HMMER manual can be found at
http://eddylab.org/software/hmmer/Userguide.pdf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
from Bio.SeqIO import FastaIO
import pandas as pd
import tensorflow.compat.v1 as tf
from tqdm import tqdm
import util
NO_SEQUENCE_MATCH_SCORE_SENTINEL = 0.
NO_SEQUENCE_MATCH_DOMAIN_EVALUE_SENTINEL = -1.
NO_SEQUENCE_MATCH_FAMILY_NAME_SENTINEL = 'PF00000.0'
NO_SEQUENCE_MATCH_SEQUENCE_NAME_SENTINEL = 'no_sequence/0-0'
DATAFRAME_SCORE_NAME_KEY = 'score'
DATAFRAME_DOMAIN_EVALUE_NAME_KEY = 'domain_evalue'
HMMER_OUTPUT_CSV_COLUMN_HEADERS = util.PREDICTION_FILE_COLUMN_NAMES + [
DATAFRAME_DOMAIN_EVALUE_NAME_KEY
]
def get_sequence_name_from(seq_name_and_family):
"""Get sequence name from concatenated sequence name and family string.
Args:
seq_name_and_family: string. Of the form `sequence_name`_`family_accession`,
like OLF1_CHICK/41-290_PF00001.20. Output would be OLF1_CHICK/41-290.
Returns:
string. Sequence name.
"""
return '_'.join(seq_name_and_family.split('_')[0:2])
def get_family_name_from(seq_name_and_family):
"""Get family accession from concatenated sequence name and family string.
Args:
seq_name_and_family: string. Of the form `sequence_name`_`family_accession`,
like OLF1_CHICK/41-290_PF00001.20. Assumes the family does not have an
underscore.
Returns:
string. PFam family accession.
"""
return seq_name_and_family.split('_')[-1]
def get_name_from_seq_filename(seq_filename):
"""Get sequence name from the name of an individual sequence fasta filename.
Args:
seq_filename: string. Of the form 'sequence_name'.fasta, like
OLF1_CHICK/41-290.fasta.
Returns:
string. Sequence name.
"""
return seq_filename.split('.')[0]
def get_family_from(hmm_filepath):
"""Get family name from the name of an individual family hmm filename.
Args:
hmm_filepath: string. Of the form '~/family_name'.hmm, like
hmm_files/PF00001.21.hmm.
Returns:
string. Family name.
"""
hmm_filename = hmm_filepath.split('/')[-1]
return '.'.join(hmm_filename.split('.')[0:2])
class HMMEROutput(
collections.namedtuple('HMMEROutput', HMMER_OUTPUT_CSV_COLUMN_HEADERS)):
"""Parsed tblout output from HMMER.
Args:
sequence_name: str. the query sequence.
true_label: str. the family that the sequence belongs to.
predicted_label: str. the predicted family.
score: float. the score of this match.
"""
__slots__ = ()
def format_as_csv(self):
"""Convert HMMEROutput to csv."""
return ','.join([
self.sequence_name,
self.true_label,
self.predicted_label,
str(self.score),
str(self.domain_evalue),
])
def parse_hmmer_output(hmmer_output, query_identifier):
"""Return HMMEROutput from the text output of a hmmer binary.
Args:
hmmer_output: string. The output of running a hmmer binary.
query_identifier: string. Identity of the query sequence or profile family.
Returns:
list of HMMEROutputs. If none, returns a 'no result' HMMEROutput; this will
be populated differently for phmmer and hmmer, because their use cases
differ. If hmmer_output is mal-formed, returns [].
"""
outputs = []
all_lines = hmmer_output.split('\n')
hmmer_output_lines = [line for line in all_lines if line]
# Remove blank lines
for i, line in enumerate(hmmer_output_lines):
if ('--- full sequence ----' in line) and (
'# Program:' in hmmer_output_lines[i + 4]):
# In this case, there was no match above the inclusion threshold, so
# we say there isn't a prediction at all.
sequence = NO_SEQUENCE_MATCH_SEQUENCE_NAME_SENTINEL
true_family = NO_SEQUENCE_MATCH_FAMILY_NAME_SENTINEL
predicted_family = query_identifier
score = NO_SEQUENCE_MATCH_SCORE_SENTINEL
domain_evalue = NO_SEQUENCE_MATCH_DOMAIN_EVALUE_SENTINEL
outputs.append(
HMMEROutput(
sequence_name=sequence,
true_label=true_family,
predicted_label=predicted_family,
score=score,
domain_evalue=domain_evalue,
))
return outputs
else:
# There is some output, find it.
if '#' in line:
# This is a comment line, not an output line.
pass
else:
# This is an output line. The sequence name is found in the 1st field,
# formatted as: MT4_CANLF/1-62_PF00131.19
seq_name_and_family = line.split()[0]
sequence = get_sequence_name_from(seq_name_and_family)
true_family = get_family_name_from(seq_name_and_family)
domain_evalue = float(line.split()[4])
score = float(line.split()[5])
predicted_family = line.split()[2]
outputs.append(
HMMEROutput(
sequence_name=sequence,
true_label=true_family,
predicted_label=predicted_family,
score=score,
domain_evalue=domain_evalue))
return outputs
def _get_sentinel_phmmer_output(query_identifier):
return HMMEROutput(
sequence_name=get_sequence_name_from(query_identifier),
true_label=get_family_name_from(query_identifier),
predicted_label=NO_SEQUENCE_MATCH_FAMILY_NAME_SENTINEL,
score=NO_SEQUENCE_MATCH_SCORE_SENTINEL,
domain_evalue=NO_SEQUENCE_MATCH_DOMAIN_EVALUE_SENTINEL,
)
def _report_hmmer_outputs_for_set_difference(seen_identifiers, all_identifiers):
"""Return HMMEROutputs for identifiers that were not seen in phmmer output.
Args:
seen_identifiers: iterable of string. All the query identifiers seen in the
output of phmmer.
all_identifiers: iterable of string. All fasta entry identifiers passed into
phmmer. These should be formatted seqName_actualFamily.
Returns:
List of HMMEROutput. Sentinel values for query identifiers in
all_identifiers that are not in seen_identifiers.
"""
outputs = []
for identifier in all_identifiers:
if identifier not in seen_identifiers:
outputs.append(_get_sentinel_phmmer_output(identifier))
return outputs
def _phmmer_output_line_to_hmmer_output(line):
"""Convert line of output of phmmer to a HMMEROutput.
Args:
line: Line from running phmmer --tblout. See section "Tabular output
formats" of
http://eddylab.org/software/hmmer/Userguide.pdf.
Returns:
HMMEROutput.
"""
# This is an output line.
# The query sequence name is found in the 3rd field, formatted as:
# MT4_CANLF/1-62_PF00131.19
query_seq_name_and_family = line.split()[2]
query_sequence = get_sequence_name_from(query_seq_name_and_family)
true_family = get_family_name_from(query_seq_name_and_family)
domain_evalue = float(line.split()[4])
score = float(line.split()[5])
# The matching sequence name is found in the 1st field, formatted as:
# MT4_CANLF/1-62_PF00131.19
matching_seq_name_and_family = line.split()[0]
predicted_family = get_family_name_from(matching_seq_name_and_family)
return HMMEROutput(
sequence_name=query_sequence,
true_label=true_family,
predicted_label=predicted_family,
score=score,
domain_evalue=domain_evalue,
)
def parse_phmmer_output(hmmer_output, query_identifiers):
"""Return list of HMMEROutput from stdout of phmmer.
Args:
hmmer_output: stdout of phmmer --tblout.
query_identifiers: all fasta entry identifiers passed into phmmer. These
should be formatted seqName_actualFamily.
Returns:
list of HMMEROutput. Entries in query_identifiers that were not in the
hmmer_output (those for which there was not a match) are reported with
sentinel values (see _get_sentinel_phmmer_output).
"""
query_identifiers_seen = set()
outputs = []
all_lines = hmmer_output.split('\n')
for line in all_lines:
is_output_line = (
line and # Not a blank line.
# Lines beginning with '#' are comment lines, not output lines.
'#' not in line)
if is_output_line:
hmmer_output = _phmmer_output_line_to_hmmer_output(line)
query_identifiers_seen.add(hmmer_output.sequence_name + '_' +
hmmer_output.true_label)
outputs.append(hmmer_output)
return outputs + _report_hmmer_outputs_for_set_difference(
seen_identifiers=query_identifiers_seen,
all_identifiers=query_identifiers)
def yield_top_el_by_score_for_each_sequence_name(hmmer_predictions):
"""Return the predictions with the top full sequence scores."""
logging.info('Picking top prediction by score in hmmer predictions.')
grouped_duplicates = hmmer_predictions.groupby(
util.DATAFRAME_SEQUENCE_NAME_KEY)
for _, group in tqdm(grouped_duplicates, position=0):
max_el = group.loc[group[DATAFRAME_SCORE_NAME_KEY].idxmax()]
yield pd.DataFrame(
data={
util.PREDICTED_LABEL_KEY: [max_el.predicted_label],
util.TRUE_LABEL_KEY: [max_el.true_label],
util.DATAFRAME_SEQUENCE_NAME_KEY: [max_el.sequence_name],
DATAFRAME_SCORE_NAME_KEY: [max_el.score],
DATAFRAME_DOMAIN_EVALUE_NAME_KEY: [max_el.domain_evalue]
})
def sequences_with_no_prediction(hmmer_predictions, all_sequence_names):
"""Returns sequence names that are not in hmmer_predictions.
Args:
hmmer_predictions: pd.DataFrame with the column
util.DATAFRAME_SEQUENCE_NAME_KEY.
all_sequence_names: iterable of string.
Returns:
set of string.
"""
sequence_names_with_predictions = hmmer_predictions.sequence_name.values
all_sequence_names_without_family = set(
get_sequence_name_from(s) for s in all_sequence_names)
return set(all_sequence_names_without_family) - set(
sequence_names_with_predictions)
def all_sequence_names_from_fasta_file(input_fasta_file_name):
"""Returns all sequence names from a fasta file.
Args:
input_fasta_file_name: string.
Returns:
list of string.
"""
with tf.io.gfile.GFileText(input_fasta_file_name) as input_file:
return [
get_sequence_name_from(protein_name_incl_family)
for protein_name_incl_family, _ in FastaIO.SimpleFastaParser(input_file)
]
def filter_fasta_file_by_sequence_name(input_fasta_file_name,
acceptable_sequence_names):
"""Yield only entries from a fasta file that are in acceptable_sequence_names.
Args:
input_fasta_file_name: string. This file should contain fasta entries that
are formatted seqName_actualFamily, as above.
acceptable_sequence_names: iterable of string. This set just seqName (no
actualFamily, as with `input_fasta_file_name`).
Yields:
strings, each of which is an entry for a fasta file.
"""
acceptable_sequence_names = set(acceptable_sequence_names)
with tf.io.gfile.GFileText(input_fasta_file_name) as input_file:
for protein_name, sequence in FastaIO.SimpleFastaParser(input_file):
if get_sequence_name_from(protein_name) in acceptable_sequence_names:
yield '>' + protein_name + '\n' + sequence + '\n'
def parse_domtblout(domtblout_text):
"""Parses hmmer output from flag --domtblout.
Args:
domtblout_text: output from hmmer. See test.
Returns:
pd.DataFrame with columns
full_sequence_name (str) - e.g. pfamseq accession without start and end
sequence_name (str) - full_sequence_name with start and end
sequence_start (int) - 1-index based (not 0-index)
sequence_end (int) - 1-index based (not 0-index)
predicted_label (str) - name of hmm profile, e.g. PF00001
domain_evalue_score (float) - independent evalue for the domain
(not the whole sequence!)
domain_bit_score (float) - bit score for the domain
(not the whole sequence!)
"""
domtblout_rows = collections.defaultdict(list)
for line in domtblout_text.split('\n'):
if line.startswith('#') or not line:
continue
split = line.split()
# Parse columns.
full_sequence_name = split[0]
sequence_start = int(split[17])
sequence_end = int(split[18])
sequence_name = f'{full_sequence_name}/{sequence_start}-{sequence_end}'
hmm_label = split[4]
domain_evalue_score = float(split[12])
domain_bit_score = float(split[13])
# Assign columns values.
domtblout_rows['full_sequence_name'].append(full_sequence_name)
domtblout_rows['sequence_name'].append(sequence_name)
domtblout_rows['sequence_start'].append(sequence_start)
domtblout_rows['sequence_end'].append(sequence_end)
domtblout_rows['predicted_label'].append(hmm_label)
domtblout_rows['domain_evalue_score'].append(domain_evalue_score)
domtblout_rows['domain_bit_score'].append(domain_bit_score)
domtblout_df = pd.DataFrame(domtblout_rows)
return domtblout_df
| |
"""gallium
Frontend-tool for Gallium3D architecture.
"""
#
# Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sub license, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice (including the
# next paragraph) shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
# IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import distutils.version
import os
import os.path
import re
import subprocess
import platform as _platform
import SCons.Action
import SCons.Builder
import SCons.Scanner
def symlink(target, source, env):
target = str(target[0])
source = str(source[0])
if os.path.islink(target) or os.path.exists(target):
os.remove(target)
os.symlink(os.path.basename(source), target)
def install(env, source, subdir):
target_dir = os.path.join(env.Dir('#.').srcnode().abspath, env['build_dir'], subdir)
return env.Install(target_dir, source)
def install_program(env, source):
return install(env, source, 'bin')
def install_shared_library(env, sources, version = ()):
targets = []
install_dir = os.path.join(env.Dir('#.').srcnode().abspath, env['build_dir'])
version = tuple(map(str, version))
if env['SHLIBSUFFIX'] == '.dll':
dlls = env.FindIxes(sources, 'SHLIBPREFIX', 'SHLIBSUFFIX')
targets += install(env, dlls, 'bin')
libs = env.FindIxes(sources, 'LIBPREFIX', 'LIBSUFFIX')
targets += install(env, libs, 'lib')
else:
for source in sources:
target_dir = os.path.join(install_dir, 'lib')
target_name = '.'.join((str(source),) + version)
last = env.InstallAs(os.path.join(target_dir, target_name), source)
targets += last
while len(version):
version = version[:-1]
target_name = '.'.join((str(source),) + version)
action = SCons.Action.Action(symlink, " Symlinking $TARGET ...")
last = env.Command(os.path.join(target_dir, target_name), last, action)
targets += last
return targets
def createInstallMethods(env):
env.AddMethod(install_program, 'InstallProgram')
env.AddMethod(install_shared_library, 'InstallSharedLibrary')
def num_jobs():
try:
return int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
pass
try:
return os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
pass
try:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
except ValueError:
pass
return 1
def pkg_config_modules(env, name, modules):
'''Simple wrapper for pkg-config.'''
env[name] = False
if env['platform'] == 'windows':
return
if not env.Detect('pkg-config'):
return
if subprocess.call(["pkg-config", "--exists", ' '.join(modules)]) != 0:
return
# Put -I and -L flags directly into the environment, as these don't affect
# the compilation of targets that do not use them
try:
env.ParseConfig('pkg-config --cflags-only-I --libs-only-L ' + ' '.join(modules))
except OSError:
return
# Other flags may affect the compilation of unrelated targets, so store
# them with a prefix, (e.g., XXX_CFLAGS, XXX_LIBS, etc)
try:
flags = env.ParseFlags('!pkg-config --cflags-only-other --libs-only-l --libs-only-other ' + ' '.join(modules))
except OSError:
return
prefix = name.upper() + '_'
for flag_name, flag_value in flags.iteritems():
env[prefix + flag_name] = flag_value
env[name] = True
def generate(env):
"""Common environment generation code"""
# Tell tools which machine to compile for
env['TARGET_ARCH'] = env['machine']
env['MSVS_ARCH'] = env['machine']
# Toolchain
platform = env['platform']
if env['toolchain'] == 'default':
if platform == 'winddk':
env['toolchain'] = 'winddk'
elif platform == 'wince':
env['toolchain'] = 'wcesdk'
env.Tool(env['toolchain'])
# Allow override compiler and specify additional flags from environment
if os.environ.has_key('CC'):
env['CC'] = os.environ['CC']
# Update CCVERSION to match
pipe = SCons.Action._subproc(env, [env['CC'], '--version'],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
if pipe.wait() == 0:
line = pipe.stdout.readline()
match = re.search(r'[0-9]+(\.[0-9]+)+', line)
if match:
env['CCVERSION'] = match.group(0)
if os.environ.has_key('CFLAGS'):
env['CCFLAGS'] += SCons.Util.CLVar(os.environ['CFLAGS'])
if os.environ.has_key('CXX'):
env['CXX'] = os.environ['CXX']
if os.environ.has_key('CXXFLAGS'):
env['CXXFLAGS'] += SCons.Util.CLVar(os.environ['CXXFLAGS'])
if os.environ.has_key('LDFLAGS'):
env['LINKFLAGS'] += SCons.Util.CLVar(os.environ['LDFLAGS'])
env['gcc'] = 'gcc' in os.path.basename(env['CC']).split('-')
env['msvc'] = env['CC'] == 'cl'
if env['msvc'] and env['toolchain'] == 'default' and env['machine'] == 'x86_64':
# MSVC x64 support is broken in earlier versions of scons
env.EnsurePythonVersion(2, 0)
# shortcuts
machine = env['machine']
platform = env['platform']
x86 = env['machine'] == 'x86'
ppc = env['machine'] == 'ppc'
gcc = env['gcc']
msvc = env['msvc']
# Determine whether we are cross compiling; in particular, whether we need
# to compile code generators with a different compiler as the target code.
host_platform = _platform.system().lower()
if host_platform.startswith('cygwin'):
host_platform = 'cygwin'
host_machine = os.environ.get('PROCESSOR_ARCHITEW6432', os.environ.get('PROCESSOR_ARCHITECTURE', _platform.machine()))
host_machine = {
'x86': 'x86',
'i386': 'x86',
'i486': 'x86',
'i586': 'x86',
'i686': 'x86',
'ppc' : 'ppc',
'AMD64': 'x86_64',
'x86_64': 'x86_64',
}.get(host_machine, 'generic')
env['crosscompile'] = platform != host_platform
if machine == 'x86_64' and host_machine != 'x86_64':
env['crosscompile'] = True
env['hostonly'] = False
# Backwards compatability with the debug= profile= options
if env['build'] == 'debug':
if not env['debug']:
print 'scons: warning: debug option is deprecated and will be removed eventually; use instead'
print
print ' scons build=release'
print
env['build'] = 'release'
if env['profile']:
print 'scons: warning: profile option is deprecated and will be removed eventually; use instead'
print
print ' scons build=profile'
print
env['build'] = 'profile'
if False:
# Enforce SConscripts to use the new build variable
env.popitem('debug')
env.popitem('profile')
else:
# Backwards portability with older sconscripts
if env['build'] in ('debug', 'checked'):
env['debug'] = True
env['profile'] = False
if env['build'] == 'profile':
env['debug'] = False
env['profile'] = True
if env['build'] == 'release':
env['debug'] = False
env['profile'] = False
# Put build output in a separate dir, which depends on the current
# configuration. See also http://www.scons.org/wiki/AdvancedBuildExample
build_topdir = 'build'
build_subdir = env['platform']
if env['embedded']:
build_subdir = 'embedded-' + build_subdir
if env['machine'] != 'generic':
build_subdir += '-' + env['machine']
if env['build'] != 'release':
build_subdir += '-' + env['build']
build_dir = os.path.join(build_topdir, build_subdir)
# Place the .sconsign file in the build dir too, to avoid issues with
# different scons versions building the same source file
env['build_dir'] = build_dir
env.SConsignFile(os.path.join(build_dir, '.sconsign'))
if 'SCONS_CACHE_DIR' in os.environ:
print 'scons: Using build cache in %s.' % (os.environ['SCONS_CACHE_DIR'],)
env.CacheDir(os.environ['SCONS_CACHE_DIR'])
env['CONFIGUREDIR'] = os.path.join(build_dir, 'conf')
env['CONFIGURELOG'] = os.path.join(os.path.abspath(build_dir), 'config.log')
# Parallel build
if env.GetOption('num_jobs') <= 1:
env.SetOption('num_jobs', num_jobs())
env.Decider('MD5-timestamp')
env.SetOption('max_drift', 60)
# C preprocessor options
cppdefines = []
if env['build'] in ('debug', 'checked'):
cppdefines += ['DEBUG']
else:
cppdefines += ['NDEBUG']
if env['build'] == 'profile':
cppdefines += ['PROFILE']
if env['platform'] in ('posix', 'linux', 'freebsd', 'darwin'):
cppdefines += [
'_POSIX_SOURCE',
('_POSIX_C_SOURCE', '199309L'),
'_SVID_SOURCE',
'_BSD_SOURCE',
'_GNU_SOURCE',
'PTHREADS',
'HAVE_POSIX_MEMALIGN',
]
if env['platform'] == 'darwin':
cppdefines += ['_DARWIN_C_SOURCE']
if platform == 'windows':
cppdefines += [
'WIN32',
'_WINDOWS',
#'_UNICODE',
#'UNICODE',
# http://msdn.microsoft.com/en-us/library/aa383745.aspx
('_WIN32_WINNT', '0x0601'),
('WINVER', '0x0601'),
]
if msvc and env['toolchain'] != 'winddk':
cppdefines += [
'VC_EXTRALEAN',
'_USE_MATH_DEFINES',
'_CRT_SECURE_NO_WARNINGS',
'_CRT_SECURE_NO_DEPRECATE',
'_SCL_SECURE_NO_WARNINGS',
'_SCL_SECURE_NO_DEPRECATE',
]
if env['build'] in ('debug', 'checked'):
cppdefines += ['_DEBUG']
if env['toolchain'] == 'winddk':
# Mimic WINDDK's builtin flags. See also:
# - WINDDK's bin/makefile.new i386mk.inc for more info.
# - buildchk_wxp_x86.log files, generated by the WINDDK's build
# - http://alter.org.ua/docs/nt_kernel/vc8_proj/
if machine == 'x86':
cppdefines += ['_X86_', 'i386']
if machine == 'x86_64':
cppdefines += ['_AMD64_', 'AMD64']
if platform == 'winddk':
cppdefines += [
'STD_CALL',
('CONDITION_HANDLING', '1'),
('NT_INST', '0'),
('WIN32', '100'),
('_NT1X_', '100'),
('WINNT', '1'),
('_WIN32_WINNT', '0x0501'), # minimum required OS version
('WINVER', '0x0501'),
('_WIN32_IE', '0x0603'),
('WIN32_LEAN_AND_MEAN', '1'),
('DEVL', '1'),
('__BUILDMACHINE__', 'WinDDK'),
('FPO', '0'),
]
if env['build'] in ('debug', 'checked'):
cppdefines += [('DBG', 1)]
if platform == 'wince':
cppdefines += [
'_CRT_SECURE_NO_DEPRECATE',
'_USE_32BIT_TIME_T',
'UNICODE',
'_UNICODE',
('UNDER_CE', '600'),
('_WIN32_WCE', '0x600'),
'WINCEOEM',
'WINCEINTERNAL',
'WIN32',
'STRICT',
'x86',
'_X86_',
'INTERNATIONAL',
('INTLMSG_CODEPAGE', '1252'),
]
if platform == 'windows':
cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_USER']
if platform == 'winddk':
cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_DISPLAY']
if platform == 'wince':
cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_CE']
cppdefines += ['PIPE_SUBSYSTEM_WINDOWS_CE_OGL']
if env['embedded']:
cppdefines += ['PIPE_SUBSYSTEM_EMBEDDED']
env.Append(CPPDEFINES = cppdefines)
# C compiler options
cflags = [] # C
cxxflags = [] # C++
ccflags = [] # C & C++
if gcc:
ccversion = env['CCVERSION']
if env['build'] == 'debug':
ccflags += ['-O0']
elif ccversion.startswith('4.2.'):
# gcc 4.2.x optimizer is broken
print "warning: gcc 4.2.x optimizer is broken -- disabling optimizations"
ccflags += ['-O0']
else:
ccflags += ['-O3']
ccflags += ['-g3']
if env['build'] in ('checked', 'profile'):
# See http://code.google.com/p/jrfonseca/wiki/Gprof2Dot#Which_options_should_I_pass_to_gcc_when_compiling_for_profiling?
ccflags += [
'-fno-omit-frame-pointer',
'-fno-optimize-sibling-calls',
]
if env['machine'] == 'x86':
ccflags += [
'-m32',
#'-march=pentium4',
]
if distutils.version.LooseVersion(ccversion) >= distutils.version.LooseVersion('4.2') \
and (platform != 'windows' or env['build'] == 'debug' or True):
# NOTE: We need to ensure stack is realigned given that we
# produce shared objects, and have no control over the stack
# alignment policy of the application. Therefore we need
# -mstackrealign ore -mincoming-stack-boundary=2.
#
# XXX: -O and -mstackrealign causes stack corruption on MinGW
#
# XXX: We could have SSE without -mstackrealign if we always used
# __attribute__((force_align_arg_pointer)), but that's not
# always the case.
ccflags += [
'-mstackrealign', # ensure stack is aligned
'-mmmx', '-msse', '-msse2', # enable SIMD intrinsics
#'-mfpmath=sse',
]
if platform in ['windows', 'darwin']:
# Workaround http://gcc.gnu.org/bugzilla/show_bug.cgi?id=37216
ccflags += ['-fno-common']
if env['machine'] == 'x86_64':
ccflags += ['-m64']
if platform == 'darwin':
ccflags += ['-fno-common']
if env['platform'] != 'windows':
ccflags += ['-fvisibility=hidden']
# See also:
# - http://gcc.gnu.org/onlinedocs/gcc/Warning-Options.html
ccflags += [
'-Wall',
'-Wno-long-long',
'-ffast-math',
'-fmessage-length=0', # be nice to Eclipse
]
cflags += [
'-Wmissing-prototypes',
'-std=gnu99',
]
if distutils.version.LooseVersion(ccversion) >= distutils.version.LooseVersion('4.0'):
ccflags += [
'-Wmissing-field-initializers',
]
if distutils.version.LooseVersion(ccversion) >= distutils.version.LooseVersion('4.2'):
ccflags += [
'-Werror=pointer-arith',
]
cflags += [
'-Werror=declaration-after-statement',
]
if msvc:
# See also:
# - http://msdn.microsoft.com/en-us/library/19z1t1wy.aspx
# - cl /?
if env['build'] == 'debug':
ccflags += [
'/Od', # disable optimizations
'/Oi', # enable intrinsic functions
'/Oy-', # disable frame pointer omission
]
else:
ccflags += [
'/O2', # optimize for speed
]
if env['build'] == 'release':
ccflags += [
'/GL', # enable whole program optimization
]
else:
ccflags += [
'/GL-', # disable whole program optimization
]
ccflags += [
'/fp:fast', # fast floating point
'/W3', # warning level
#'/Wp64', # enable 64 bit porting warnings
]
if env['machine'] == 'x86':
ccflags += [
#'/arch:SSE2', # use the SSE2 instructions
]
if platform == 'windows':
ccflags += [
# TODO
]
if platform == 'winddk':
ccflags += [
'/Zl', # omit default library name in .OBJ
'/Zp8', # 8bytes struct member alignment
'/Gy', # separate functions for linker
'/Gm-', # disable minimal rebuild
'/WX', # treat warnings as errors
'/Gz', # __stdcall Calling convention
'/GX-', # disable C++ EH
'/GR-', # disable C++ RTTI
'/GF', # enable read-only string pooling
'/G6', # optimize for PPro, P-II, P-III
'/Ze', # enable extensions
'/Gi-', # disable incremental compilation
'/QIfdiv-', # disable Pentium FDIV fix
'/hotpatch', # prepares an image for hotpatching.
#'/Z7', #enable old-style debug info
]
if platform == 'wince':
# See also C:\WINCE600\public\common\oak\misc\makefile.def
ccflags += [
'/Zl', # omit default library name in .OBJ
'/GF', # enable read-only string pooling
'/GR-', # disable C++ RTTI
'/GS', # enable security checks
# Allow disabling language conformance to maintain backward compat
#'/Zc:wchar_t-', # don't force wchar_t as native type, instead of typedef
#'/Zc:forScope-', # don't enforce Standard C++ for scoping rules
#'/wd4867',
#'/wd4430',
#'/MT',
#'/U_MT',
]
# Automatic pdb generation
# See http://scons.tigris.org/issues/show_bug.cgi?id=1656
env.EnsureSConsVersion(0, 98, 0)
env['PDB'] = '${TARGET.base}.pdb'
env.Append(CCFLAGS = ccflags)
env.Append(CFLAGS = cflags)
env.Append(CXXFLAGS = cxxflags)
if env['platform'] == 'windows' and msvc:
# Choose the appropriate MSVC CRT
# http://msdn.microsoft.com/en-us/library/2kzt1wy3.aspx
if env['build'] in ('debug', 'checked'):
env.Append(CCFLAGS = ['/MTd'])
env.Append(SHCCFLAGS = ['/LDd'])
else:
env.Append(CCFLAGS = ['/MT'])
env.Append(SHCCFLAGS = ['/LD'])
# Assembler options
if gcc:
if env['machine'] == 'x86':
env.Append(ASFLAGS = ['-m32'])
if env['machine'] == 'x86_64':
env.Append(ASFLAGS = ['-m64'])
# Linker options
linkflags = []
shlinkflags = []
if gcc:
if env['machine'] == 'x86':
linkflags += ['-m32']
if env['machine'] == 'x86_64':
linkflags += ['-m64']
if env['platform'] not in ('darwin'):
shlinkflags += [
'-Wl,-Bsymbolic',
]
# Handle circular dependencies in the libraries
if env['platform'] in ('darwin'):
pass
else:
env['_LIBFLAGS'] = '-Wl,--start-group ' + env['_LIBFLAGS'] + ' -Wl,--end-group'
if msvc:
if env['build'] == 'release':
# enable Link-time Code Generation
linkflags += ['/LTCG']
env.Append(ARFLAGS = ['/LTCG'])
if platform == 'windows' and msvc:
# See also:
# - http://msdn2.microsoft.com/en-us/library/y0zzbyt4.aspx
linkflags += [
'/fixed:no',
'/incremental:no',
]
if platform == 'winddk':
linkflags += [
'/merge:_PAGE=PAGE',
'/merge:_TEXT=.text',
'/section:INIT,d',
'/opt:ref',
'/opt:icf',
'/ignore:4198,4010,4037,4039,4065,4070,4078,4087,4089,4221',
'/incremental:no',
'/fullbuild',
'/release',
'/nodefaultlib',
'/wx',
'/debug',
'/debugtype:cv',
'/version:5.1',
'/osversion:5.1',
'/functionpadmin:5',
'/safeseh',
'/pdbcompress',
'/stack:0x40000,0x1000',
'/driver',
'/align:0x80',
'/subsystem:native,5.01',
'/base:0x10000',
'/entry:DrvEnableDriver',
]
if env['build'] != 'release':
linkflags += [
'/MAP', # http://msdn.microsoft.com/en-us/library/k7xkk3e2.aspx
]
if platform == 'wince':
linkflags += [
'/nodefaultlib',
#'/incremental:no',
#'/fullbuild',
'/entry:_DllMainCRTStartup',
]
env.Append(LINKFLAGS = linkflags)
env.Append(SHLINKFLAGS = shlinkflags)
# We have C++ in several libraries, so always link with the C++ compiler
if env['gcc']:
env['LINK'] = env['CXX']
# Default libs
libs = []
if env['platform'] in ('posix', 'linux', 'freebsd', 'darwin'):
libs += ['m', 'pthread', 'dl']
env.Append(LIBS = libs)
# Load tools
env.Tool('lex')
env.Tool('yacc')
if env['llvm']:
env.Tool('llvm')
pkg_config_modules(env, 'x11', ['x11', 'xext'])
pkg_config_modules(env, 'drm', ['libdrm'])
pkg_config_modules(env, 'drm_intel', ['libdrm_intel'])
pkg_config_modules(env, 'drm_radeon', ['libdrm_radeon'])
pkg_config_modules(env, 'xorg', ['xorg-server'])
pkg_config_modules(env, 'kms', ['libkms'])
env['dri'] = env['x11'] and env['drm']
# Custom builders and methods
env.Tool('custom')
createInstallMethods(env)
# for debugging
#print env.Dump()
def exists(env):
return 1
| |
from host import Host
from router import Router
from link import Link
from flow import Flow
from tahoe_flow import TahoeFlow
from reno_flow import RenoFlow
from fast_flow import FastFlow
from Queue import PriorityQueue
import networkx as nx
import matplotlib.pyplot as plt
# Constants
# Time to update router info, in ms.
ROUTER_UPDATE_PERIOD = 100
class Network():
"""Python representation of the network.
Each host, router, link, and flow object is denoted by a
unique character id, and placed in a distinct dictionary.
The check_id function checks the unique id constraint before
construction any new objects. This is a global id constraint
across all objects.
Parameters
----------
bw : `Blackwidow`
The simulation object containing settings and data recording.
Attributes
----------
time : float
The currenet simulation time.
"""
def __init__(self, bw):
self.devices = {}
self.hosts = {}
self.routers = {}
self.links = {}
self.flows = {}
self.ids = []
self._time = 0
self.bw = bw
self._events = PriorityQueue()
self.num_flows_active = 0
self.g = nx.MultiDiGraph()
self.deleted = []
@property
def time(self):
return self._time
@time.setter
def time(self, value):
raise AttributeError("Cannot modify network time")
def check_id(self, obj_id):
"""Check if the id is not already used.
This function checks if the id is not already used. This function
raises an exception if object id is not unique.
Parameters
----------
obj_id : string
The id to check.
"""
if obj_id in self.ids:
raise ValueError('id {0} already exists.'.format(obj_id))
def dump(self, output=False):
"""Prints out network and returns networkx graph
Prints the devices, links, and flows associated with the network, and
returns a pydot object with the network graph.
Parameters
----------
output : boolean, optional
Specifies whether to print the network information (the default is
False).
Returns
-------
pydot
pydot object containing the network graph
"""
# Print network information if output is True
if output:
print "Devices:\n"
for device_id in self.devices:
print self.devices[device_id]
print "Links:\n"
for link_id in self.links:
print self.links[link_id]
print "Flows:\n"
for flow_id in self.flows:
print self.flows[flow_id]
# Convert the graph to a pydot object and return
return nx.to_pydot(self.g)
def add_host(self, host_id):
"""Construct host and add to dictionary of hosts.
Parameters
----------
host_id : string
A unique id for the host.
"""
# Check if the id is not already used
self.check_id(host_id)
# Create a Host object
self.devices[host_id] = Host(host_id)
# Update dictionaries
self.hosts[host_id] = self.devices[host_id]
self.ids.append(host_id)
# Update the graph
self.g.add_node(host_id, shape="square")
def add_router(self, router_id):
"""Construct router and add to dictionary of routers.
Parameters
----------
router_id : string
A unique id for the router.
"""
# Check if the id is not already used
self.check_id(router_id)
# Create a Router
self.devices[router_id] = Router(router_id, self, self.bw)
# Update dictionaries
self.routers[router_id] = self.devices[router_id]
self.ids.append(router_id)
# Update the graph
self.g.add_node(router_id)
def delete_device(self, device_id):
"""Deletes a device in the network.
Parameters
----------
device_id : string
The id of the `Device` to delete.
"""
# Get device
device = self.devices[device_id]
# Delete all links connected to device
for link in device.links[:]:
self.delete_link(link.id)
# Delete all flows from device
try:
for flow in device.flows[:]:
self.delete_flow(flow.flow_id)
except:
pass
# Update graph
self.g.remove_node(device_id)
# Update dictionaries
self.ids.remove(device_id)
if device_id in self.hosts:
del self.hosts[device_id]
if device_id in self.routers:
del self.routers[device_id]
self.deleted.append(device_id)
del self.devices[device_id]
def add_link(self, link_id, device_id1, device_id2,
delay, rate, capacity):
"""Adds a link to the network.
Parameters
----------
link_id : string
A unique id for the link.
device_id1 : string
The id of one of the `Device` objects to connect to the link.
device_id2 : string
The id of one of the `Device` objects to connect to the link.
delay : float
The propagation delay of the link in ms.
rate : float
The rate at which the link can send a packet in Mbps.
capacity : int
The capacity of the link buffer in KB.
"""
# Check if the id is not already used
self.check_id(link_id)
# Make sure both device ids correspond to existing `Device` objects
if device_id1 not in self.ids:
raise KeyError('id {0} does not exist.'.format(device_id1))
if device_id2 not in self.ids:
raise KeyError('id {0} does not exist.'.format(device_id2))
# Get devices
device_1 = self.devices[device_id1]
device_2 = self.devices[device_id2]
# Create link
self.links[link_id] = Link(link_id, device_1, device_2, delay, rate,
capacity, self, self.bw)
# Update devices with link
device_1.add_link(self.links[link_id])
device_2.add_link(self.links[link_id])
# Update dictionaries
self.ids.append(link_id)
# Update graph
self.g.add_edge(device_id1, device_id2, label=link_id, dir="none",
len=str(delay))
def delete_link(self, link_id):
"""Deletes a link from the network.
Parameters
----------
link_id : string
The id of the link to delete.
"""
# Get the link
link = self.links[link_id]
# Delete the link from the connected devices
link.device_a.delete_link(link)
link.device_b.delete_link(link)
# Remove the edge from the graph. Since the grap is a digraph, we try
# to remove the edge in both directions.
try:
self.g.remove_edge(link.device_a.network_id,
link.device_b.network_id)
except:
self.g.remove_edge(link.device_b.network_id,
link.device_a.network_id)
# Update dictionaries
self.ids.remove(link_id)
self.deleted.append(link_id)
del self.links[link_id]
def add_flow(self, flow_id, flow_src, flow_dest, data_amt, flow_start):
"""Adds a flow to the network.
Parameters
----------
flow_id : string
A unique id for the flow.
flow_src : string
The id for the source `Device` for the flow.
flow_dest : string
The id for the destination `Device` for the flow.
data_amt : float
The amount of data for the flow to send in MB.
flow_start : float
The amount of time to wait before starting the flow in ms.
"""
# Check if the id is not already used
self.check_id(flow_id)
# Get the source and destination devices
device_1 = self.devices[flow_src]
device_2 = self.devices[flow_dest]
# Increment the number of flow active
self.num_flows_active += 1
# Determine TCP alg from bw.tcp_alg
if self.bw.tcp_alg == 'Reno':
flow = RenoFlow(flow_id, device_1, device_2, data_amt,
self, flow_start, self.bw)
elif self.bw.tcp_alg == 'Tahoe':
flow = TahoeFlow(flow_id, device_1, device_2, data_amt,
self, flow_start, self.bw)
elif self.bw.tcp_alg == 'Fast':
flow = FastFlow(flow_id, device_1, device_2, data_amt,
self, flow_start, self.bw)
else:
raise Exception("Unknown TCP algorithm.")
# Update dictionaries
self.flows[flow_id] = flow
self.ids.append(flow_id)
# Update devices with flow
device_1.add_flow(flow)
device_2.add_flow(flow)
# Update graph
self.g.add_edge(flow_src, flow_dest, label=flow_id)
def delete_flow(self, flow_id):
"""Delete a flow from the network.
Parameters
----------
flow_id : string
The id of the flow to delete.
"""
# Get the flow
flow = self.flows[flow_id]
# Delete the flow from the source and destination devices
flow.src.delete_flow(flow)
flow.dest.delete_flow(flow)
# Update the graph
self.g.remove_edge(flow.src.network_id, flow.dest.network_id)
# Update dictionaries
del self.flows[flow_id]
self.ids.remove(flow_id)
self.deleted.append(flow_id)
# Decrement the number of active flows
self.num_flows_active -= 1
def decrement_flows(self):
"""Decrements the number of active flows."""
self.num_flows_active -= 1
def empty(self):
"""Empties the event queue."""
self._events = PriorityQueue()
def add_event(self, event, delay):
"""
Function to add an event to the queue
This function adds an event to the queue to be run after delay time.
Parameters
----------
event : `Event`
The event to be run.
delay : float
The amount of time in ms to wait before running the event.
"""
# Add the event to the queue with key equal to the time to run the
# event.
self._events.put((self._time + delay, event))
def to_json(self):
"""Returns a JSON representation of the network."""
data = {}
hosts = []
for host_id in self.hosts:
hosts.append(host_id)
data["Hosts"] = hosts
routers = []
for router_id in self.routers:
routers.append(router_id)
data["Routers"] = routers
links = []
for link_id in self.links:
link_data = {}
link_data["network_id"] = link_id
link_data["devices"] = [self.links[link_id].device_a.network_id,
self.links[link_id].device_b.network_id]
link_data["rate"] = self.links[link_id].rate / (10 ** 3)
link_data["delay"] = self.links[link_id].delay
link_data["buffer"] = self.links[link_id].capacity / 1000 / 8
links.append(link_data)
data["Links"] = links
flows = []
for flow_id in self.flows:
flow_data = {}
flow_data["network_id"] = flow_id
flow_data["src"] = self.flows[flow_id].src.network_id
flow_data["dest"] = self.flows[flow_id].dest.network_id
flow_data["amount"] = self.flows[flow_id].amount / 8 / (10 ** 6)
flow_data["start"] = self.flows[flow_id].flow_start / 1000
flows.append(flow_data)
data["Flows"] = flows
return data
def run(self):
"""Runs the network.
Dequeues events from the queue and runs them in order until the queue
is empty or there are 0 flows active.
Returns
-------
time : int
The amount of time taken for the network to run.
"""
# Keep running while we have events to run and there are active flows.
# The first events will be enqueued by the flows when they are
# initialized.
while not self._events.empty() and self.num_flows_active != 0:
# Get the event and time
(time, current_event) = self._events.get()
# Don't run the event if it source has been deleted
if current_event.src_id in self.deleted:
continue
print ("{0} at time {1} with {2} "
"flows active".format(str(current_event),
time,
self.num_flows_active))
# Update the current time
self._time = time
# Run the event
current_event.run()
# Return end time.
self.bw.write()
return self._time
| |
"""
Unit test for base_service - Basic Treadmill service capabilities
"""
import os
import tempfile
import unittest
import select
import socket
# Disable W0611: Unused import
import tests.treadmill_test_deps # pylint: disable=W0611
import mock
from treadmill.services import _base_service
class MyTestService(_base_service.BaseResourceServiceImpl):
"""Test Service implementation.
"""
def __init__(self, *_args, **_kw_args):
super(MyTestService, self).__init__()
def initialize(self, service_dir):
pass
def on_create_request(self, _rsrc_id, _rsrc_data):
pass
def on_delete_request(self, _rsrc_id):
pass
def report_status(self):
pass
def synchronize(self):
pass
class BaseServiceTest(unittest.TestCase):
"""Unit tests for the base service class.
"""
def setUp(self):
self.root = tempfile.mkdtemp()
def test_init(self):
"""Validate simple instanciation.
"""
instance = _base_service.ResourceService(
service_dir=self.root,
impl='a.sample.module',
)
self.assertEqual(
instance.name,
'module'
)
def test_load(self):
"""Verifies that only valid classes are accepted as implementation.
"""
# Access to a protected member _load_impl of a client class
# pylint: disable=W0212
self.assertRaises(
AssertionError,
_base_service.ResourceService(
service_dir=self.root,
impl=object,
)._load_impl
)
self.assertRaises(
AssertionError,
_base_service.ResourceService(
service_dir=self.root,
impl='socket.socket',
)._load_impl
)
self.assertTrue(
_base_service.ResourceService(
service_dir=self.root,
impl=MyTestService,
)._load_impl()
)
def test_name(self):
"""Check how the name is derived from the class name.
"""
self.assertEqual(
_base_service.ResourceService(
service_dir=self.root,
impl='treadmill.services.MyClass',
).name,
'MyClass',
)
self.assertEqual(
_base_service.ResourceService(
service_dir=self.root,
impl='treadmill.services.MyClass',
).name,
'MyClass',
)
self.assertEqual(
_base_service.ResourceService(
service_dir=self.root,
impl=MyTestService,
).name,
'MyTestService',
)
@mock.patch('select.poll', autospec=True)
@mock.patch('treadmill.idirwatch.DirWatcher', autospec=True)
@mock.patch('treadmill.services._base_service.ResourceService'
'._create_status_socket',
mock.Mock(return_value='status_socket'))
@mock.patch('treadmill.services._base_service.ResourceService'
'._check_requests',
mock.Mock(return_value=['foo-1', 'foo-2']))
@mock.patch('treadmill.services._base_service.ResourceService'
'._load_impl',
return_value=mock.create_autospec(MyTestService))
@mock.patch('treadmill.services._base_service.ResourceService'
'._on_created',
mock.Mock(return_value=True))
@mock.patch('treadmill.services._base_service.ResourceService'
'._update_poll_registration',
mock.Mock())
@mock.patch('treadmill.watchdog.Watchdog', autospec=True)
@mock.patch('treadmill.syscall.eventfd.eventfd',
mock.Mock(return_value='eventfd'))
def test_run(self,
mock_watchdog, mock_load_impl, mock_dirwatcher, mock_poll):
"""Test the run method setup before the main loop.
"""
# Access to a protected member _is_dead of a client class
# pylint: disable=W0212
mock_impl_instance = mock_load_impl.return_value.return_value
mock_impl_instance.configure_mock(
WATCHDOG_HEARTBEAT_SEC=60
)
mock_impl_instance.report_status.return_value = {
'hello': 'world'
}
mock_impl_instance.event_handlers.return_value = [
('filenoA', 'eventsA', 'callbackA'),
('filenoB', 'eventsB', 'callbackB'),
]
mock_dirwatcher.return_value.configure_mock(
inotify='mock_inotiy',
)
instance = _base_service.ResourceService(
service_dir=self.root,
impl='MyTestService',
)
instance._is_dead = True
instance.run(
os.path.join(self.root, 'watchdogs'),
'foo',
bar='baz',
)
mock_load_impl.assert_called_with()
# Make sure the implementation was passed the correct parameters.
mock_load_impl.return_value.assert_called_with(
'foo',
bar='baz',
)
# Watchdog should be set
mock_watchdog.assert_called_with(
os.path.join(self.root, 'watchdogs'),
)
mock_watchdog.return_value.create.assert_called_with(
content=mock.ANY,
name='svc-MyTestService',
timeout='60s'
)
mock_watchdog_lease = mock_watchdog.return_value.create.return_value
# Implementation should be given the root as argument to `initialize`
mock_impl_instance.initialize.assert_called_with(
self.root
)
# First watcher should be setup
mock_dirwatcher.assert_called_with(
os.path.join(self.root, 'resources')
)
# Then we check/cleanup pre-existing requests
_base_service.ResourceService._check_requests.assert_called_with()
_base_service.ResourceService._on_created.assert_has_calls([
mock.call(mock_impl_instance, 'foo-1'),
mock.call(mock_impl_instance, 'foo-2'),
])
# Status should be queried first
mock_impl_instance.report_status.assert_called_with()
# The poll registration should be properly initialized
mock_impl_instance.event_handlers.assert_called_with()
instance._update_poll_registration.assert_called_with(
mock_poll.return_value,
{},
[
('eventfd', mock.ANY, mock.ANY),
('mock_inotiy', mock.ANY, mock.ANY),
('status_socket', mock.ANY, mock.ANY),
('filenoA', mock.ANY, mock.ANY),
('filenoB', mock.ANY, mock.ANY),
],
)
# Loop exits immediately
# Watchdog lease should be cleared
mock_watchdog_lease.remove.assert_called_with()
@mock.patch('os.chmod', mock.Mock())
@mock.patch('os.unlink', mock.Mock())
@mock.patch('socket.socket', auto_spec=True)
def test__create_status_socket(self, mock_sock):
"""Test status socket creation.
"""
# Access to a protected member _create_status_socket of a client class
# pylint: disable=W0212
mock_self = mock.Mock(
spec_set=_base_service.ResourceService,
status_sock='/tmp/foo',
)
_base_service.ResourceService._create_status_socket(
mock_self,
)
os.unlink.assert_called_with(
'/tmp/foo'
)
mock_sock.assert_called_with(
family=socket.AF_UNIX,
type=socket.SOCK_STREAM,
proto=0
)
mock_socket = mock_sock.return_value
mock_socket.bind.assert_called_with(
'/tmp/foo'
)
mock_socket.listen.assert_called_with(mock.ANY)
os.chmod.assert_called_with(
'/tmp/foo', 0o666
)
@mock.patch('select.poll', autospec=True)
def test__run_events(self, mock_poll):
"""Test event dispatcher.
"""
# Access to a protected member _run_events of a client class
# pylint: disable=W0212
instance = _base_service.ResourceService(
service_dir=self.root,
impl='a.sample.module',
)
mock_callbacks = {
i: {'callback': mock.Mock(return_value=i)}
for i in range(3)
}
loop_poll = mock_poll.return_value
loop_poll.poll.return_value = ((i, select.POLLIN) for i in range(2))
res = instance._run_events(loop_poll, 42, mock_callbacks)
loop_poll.poll.assert_called_with(42*1000)
self.assertTrue(mock_callbacks[0]['callback'].called)
self.assertTrue(mock_callbacks[1]['callback'].called)
self.assertFalse(mock_callbacks[2]['callback'].called)
self.assertTrue(res)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/python
#============================ adjust path =====================================
import sys
import os
if __name__ == "__main__":
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..', '..','libs'))
sys.path.insert(0, os.path.join(here, '..', '..','external_libs'))
#============================ verify installation =============================
from SmartMeshSDK.utils import SmsdkInstallVerifier
(goodToGo,reason) = SmsdkInstallVerifier.verifyComponents(
[
SmsdkInstallVerifier.PYTHON,
SmsdkInstallVerifier.PYSERIAL,
]
)
if not goodToGo:
print "Your installation does not allow this application to run:\n"
print reason
raw_input("Press any button to exit")
sys.exit(1)
#============================ imports =========================================
import threading
from SmartMeshSDK.utils import AppUtils, \
FormatUtils, \
RateCalculator
from SmartMeshSDK.ApiDefinition import IpMgrDefinition
from SmartMeshSDK.IpMgrConnectorMux import IpMgrSubscribe
from SmartMeshSDK.protocols.oap import OAPDispatcher, \
OAPClient, \
OAPMessage
from dustUI import dustWindow, \
dustFrameConnection, \
dustFrameLEDPing, \
dustFrameText
#============================ logging =========================================
# local
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('App')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
# global
AppUtils.configureLogging()
#============================ defines =========================================
LEDUPDATEPERIOD = 100 # in ms
RATEUPDATEPERIOD = 500 # in ms
OAP_PORT = 0xf0b9
#============================ body ============================================
##
# \addtogroup LEDPing
# \{
#
class LEDPingApp(object):
def __init__(self):
# local variables
self.guiLock = threading.Lock()
self.apiDef = IpMgrDefinition.IpMgrDefinition()
self.ledOn = False
self.pingOngoing = False
self.oap_client = None
self.ratecalculator = RateCalculator.RateCalculator()
self.connector = None
# create window
self.window = dustWindow.dustWindow(
'LEDPing',
self._windowCb_close
)
# add a connection frame
self.connectionFrame = dustFrameConnection.dustFrameConnection(
self.window,
self.guiLock,
self._connectionFrameCb_connected,
frameName="manager connection",
row=0,column=0
)
self.connectionFrame.apiLoaded(self.apiDef)
self.connectionFrame.show()
# add a LEDPing frame
self.LEDPingFrame = dustFrameLEDPing.dustFrameLEDPing(
self.window,
self.guiLock,
self._LEDPingFrameCb_startPressed,
self._LEDPingFrameCb_stopPressed,
frameName="LED ping",
row=1,column=0
)
self.LEDPingFrame.show()
# add a text frame
self.textFrame = dustFrameText.dustFrameText(
self.window,
self.guiLock,
frameName="Notes",
row=2,column=0
)
self.textFrame.show()
# put information in text frame
output = []
output += ['The mote this application drives needs to run the']
output += ['default firmware, and operate in master mode.']
output += ['']
output += ['']
output += ['The start button is only active when the']
output += ['application is connected to a SmartMesh IP manager.']
output = '\n'.join(output)
self.textFrame.write(output)
#======================== public ==========================================
def start(self):
'''
This command instructs the GUI to start executing and reacting to
user interactions. It never returns and should therefore be the last
command called.
'''
try:
self.window.mainloop()
except SystemExit:
sys.exit()
#======================== private =========================================
#===== GUI refresh
def _gui_refresh_led(self):
# ask LED frame to update
self.LEDPingFrame.updateLed(self.ledOn)
# schedule the next update
self.LEDPingFrame.after(LEDUPDATEPERIOD,self._gui_refresh_led)
def _gui_refresh_rate(self):
# ask rate calculator for rate
try:
self.LEDPingFrame.updateRttLabel(1.0/float(self.ratecalculator.getRate()))
except RateCalculator.RateCalculatorError:
self.LEDPingFrame.updateRttLabel(None)
# schedule the next update
self.LEDPingFrame.after(RATEUPDATEPERIOD,self._gui_refresh_rate)
#===== GUI interaction
def _connectionFrameCb_connected(self,connector):
'''
\brief Called when the connectionFrame has connected.
'''
# store the connector
self.connector = connector
# schedule the GUI to update
self.LEDPingFrame.after(LEDUPDATEPERIOD,self._gui_refresh_led)
self.LEDPingFrame.after(RATEUPDATEPERIOD,self._gui_refresh_rate)
# have LEDPingFrame enable button
self.LEDPingFrame.enableButton()
# OAP dispatcher
self.oap_dispatch = OAPDispatcher.OAPDispatcher()
# create a subscriber
self.subscriber = IpMgrSubscribe.IpMgrSubscribe(self.connector)
self.subscriber.start()
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA,
],
fun = self.oap_dispatch.dispatch_pkt,
isRlbl = False,
)
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.ERROR,
IpMgrSubscribe.IpMgrSubscribe.FINISH,
],
fun = self._notifClientCb_disconnected,
isRlbl = True,
)
def _LEDPingFrameCb_startPressed(self,mac):
# remember that the app is started
self.pingOngoing = True
# disable further editing of MAC address
self.LEDPingFrame.disableMacText()
# create OAPClient
if not self.oap_client:
self.oap_client = OAPClient.OAPClient(
mac,
self.connector.dn_sendData,
self.oap_dispatch
)
# initiate first LED toggle
self._toggleLed()
def _LEDPingFrameCb_stopPressed(self):
# remember that the app is stopped
self.pingOngoing = False
# stop measuring rate
self.ratecalculator.clearBuf()
def _windowCb_close(self):
'''
\brief Called when the window is closed.
'''
if self.connector:
self.connector.disconnect()
#===== notifications
def _oap_response(self,mac,oap_resp):
# initiate next LED toggle
if self.pingOngoing:
self._toggleLed()
def _notifClientCb_disconnected(self,notifName,notifParams):
'''
\brief Called when the connectionFrame has disconnected.
'''
# update the GUI
self.connectionFrame.updateGuiDisconnected()
self.LEDPingFrame.disableButton()
# delete the connector
if self.connector:
self.connector.disconnect()
self.connector = None
# the app is stopped
self.pingOngoing = False
#===== helpers
def _toggleLed(self):
# indicate event
self.ratecalculator.signalEvent()
# pick the command to send
if self.ledOn:
ledVal = 0x00 # turn LED off
else:
ledVal = 0x01 # turn LED on
# send packet
self.oap_client.send(
OAPMessage.CmdType.PUT, # command
[3,2], # address (digital_out=3,Actuate LED (2))
data_tags=[OAPMessage.TLVByte(t=0,v=ledVal)], # parameters
cb=self._oap_response # callback
)
# update my local view of the LED
self.ledOn = not self.ledOn
#============================ main ============================================
def main():
app = LEDPingApp()
app.start()
if __name__ == '__main__':
main()
##
# end of LEDPing
# \}
#
| |
from .. import fileio
from ...weights.weights import W, WSP
from scipy import sparse
import numpy as np
__author__ = 'Charles R Schmidt <schmidtc@gmail.com>'
__all__ = ['GalIO']
class GalIO(fileio.FileIO):
"""
Opens, reads, and writes file objects in GAL format.
"""
FORMATS = ['gal']
MODES = ['r', 'w']
def __init__(self, *args, **kwargs):
self._typ = str
fileio.FileIO.__init__(self, *args, **kwargs)
self.file = open(self.dataPath, self.mode)
def read(self, n=-1, sparse=False):
"""
sparse: boolean
If true return scipy sparse object
If false return pysal w object
"""
self._sparse = sparse
self._complain_ifclosed(self.closed)
return self._read()
def seek(self, pos):
if pos == 0:
self.file.seek(0)
self.pos = 0
def _get_data_type(self):
return self._typ
def _set_data_type(self, typ):
if callable(typ):
self._typ = typ
else:
raise TypeError("Expecting a callable")
data_type = property(fset=_set_data_type, fget=_get_data_type)
def _read(self):
"""
Parameters
----------
reads in a GalIO object
Returns
-------
returns a W object
Examples
--------
>>> import tempfile, pysal, os
Read in a file GAL file
>>> testfile = pysal.open(pysal.examples.get_path('sids2.gal'),'r')
Return a W object
>>> w = testfile.read()
>>> w.n == 100
True
>>> print(round(w.sd,6))
1.515124
>>> testfile = pysal.open(pysal.examples.get_path('sids2.gal'),'r')
Return a sparse matrix for the w information
>>> wsp = testfile.read(sparse=True)
>>> wsp.sparse.nnz
462
"""
if self._sparse:
if self.pos > 0:
raise StopIteration
header = self.file.readline().strip().split()
header_n = len(header)
n = int(header[0])
if header_n > 1:
n = int(header[1])
ids = []
idsappend = ids.append
row = []
extend = row.extend # avoid dot in loops
col = []
append = col.append
counter = 0
typ = self.data_type
for i in range(n):
id, n_neighbors = self.file.readline().strip().split()
id = typ(id)
n_neighbors = int(n_neighbors)
neighbors_i = list(map(typ, self.file.readline().strip().split()))
nn = len(neighbors_i)
extend([id] * nn)
counter += nn
for id_neigh in neighbors_i:
append(id_neigh)
idsappend(id)
self.pos += 1
row = np.array(row)
col = np.array(col)
data = np.ones(counter)
ids = np.unique(row)
row = np.array([np.where(ids == j)[0] for j in row]).flatten()
col = np.array([np.where(ids == j)[0] for j in col]).flatten()
spmat = sparse.csr_matrix((data, (row, col)), shape=(n, n))
return WSP(spmat)
else:
if self.pos > 0:
raise StopIteration
neighbors = {}
ids = []
# handle case where more than n is specified in first line
header = self.file.readline().strip().split()
header_n = len(header)
n = int(header[0])
if header_n > 1:
n = int(header[1])
w = {}
typ = self.data_type
for i in range(n):
id, n_neighbors = self.file.readline().strip().split()
id = typ(id)
n_neighbors = int(n_neighbors)
neighbors_i = list(map(typ, self.file.readline().strip().split()))
neighbors[id] = neighbors_i
ids.append(id)
self.pos += 1
return W(neighbors, id_order=ids)
def write(self, obj):
"""
Parameters
----------
.write(weightsObject)
accepts a weights object
Returns
------
a GAL file
write a weights object to the opened GAL file.
Examples
--------
>>> import tempfile, pysal, os
>>> testfile = pysal.open(pysal.examples.get_path('sids2.gal'),'r')
>>> w = testfile.read()
Create a temporary file for this example
>>> f = tempfile.NamedTemporaryFile(suffix='.gal')
Reassign to new var
>>> fname = f.name
Close the temporary named file
>>> f.close()
Open the new file in write mode
>>> o = pysal.open(fname,'w')
Write the Weights object into the open file
>>> o.write(w)
>>> o.close()
Read in the newly created gal file
>>> wnew = pysal.open(fname,'r').read()
Compare values from old to new
>>> wnew.pct_nonzero == w.pct_nonzero
True
Clean up temporary file created for this example
>>> os.remove(fname)
"""
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W):
IDS = obj.id_order
self.file.write('%d\n' % (obj.n))
for id in IDS:
neighbors = obj.neighbors[id]
self.file.write('%s %d\n' % (str(id), len(neighbors)))
self.file.write(' '.join(map(str, neighbors)) + '\n')
self.pos += 1
else:
raise TypeError("Expected a pysal weights object, got: %s" %
(type(obj)))
def close(self):
self.file.close()
fileio.FileIO.close(self)
| |
#! /usr/bin/env python
from __future__ import print_function, absolute_import, division
"""
This module compares what datasets are currently in
/smov/cos/Data (by accessing the COS database) versus all datasets currently
archived in MAST. All missing datasets will be requested and placed in
the appropriate directory in /smov/cos/Data.
Use:
This script is intended to be used in a cron job.
"""
__author__ = "Jo Taylor"
__date__ = "04-13-2016"
__maintainer__ = "Jo Taylor"
__email__ = "jotaylor@stsci.edu"
from datetime import datetime as dt
import urllib
import time
import pickle
import os
import yaml
import argparse
import glob
import pyfastcopy
import shutil
import numpy as np
from subprocess import Popen, PIPE
from collections import defaultdict
from ..database.db_tables import load_connection
from .manualabor import parallelize, combine_2dicts, compress_files, timefunc
from .retrieval_info import BASE_DIR, CACHE
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def connect_cosdb():
"""
Connect to the COS team's database, cos_cci, on the server greendev to
determine which COS datasets are currently in the local repository.
Parameters:
-----------
None
Returns:
--------
all_smov : list
All rootnames of all files in the COS greendev database.
"""
# Open the configuration file for the COS database connection (MYSQL).
config_file = os.path.join(os.environ['HOME'], "configure.yaml")
with open(config_file, 'r') as f:
SETTINGS = yaml.load(f)
print("Querying COS greendev database for existing data...")
# Connect to the database.
Session, engine = load_connection(SETTINGS['connection_string'])
sci_files = list(engine.execute("SELECT DISTINCT rootname FROM files "
"WHERE rootname IS NOT NULL;"))
cci_files = list(engine.execute("SELECT DISTINCT name FROM files "
"WHERE rootname IS NULL AND "
"LEFT(name,1)='l';"))
# Store SQLAlchemy results as lists
all_sci = [row["rootname"].upper() for row in sci_files]
all_cci = [row["name"].strip("_cci.fits.gz").upper() for row in cci_files]
all_smov = all_sci + all_cci
# Close connection
engine.dispose()
return all_smov
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def get_all_mast_data():
"""
Connect to the MAST database on HARPO and store lists of all files.
Parameters:
-----------
None
Returns:
--------
all_mast_priv : dictionary
Dictionary where the keys are rootnames of proprietary files and
the values are the corresponding proposal IDs.
all_mast_pub : dictionary
Dictionary where the keys are rootnames of publicly available files
and the values are the corresponding proposal IDs.
"""
# Get all jitter, science (ASN), and CCI datasets.
print("Querying MAST databases for all COS data...")
query0 = "SELECT distinct ads_data_set_name,ads_pep_id FROM "\
"archive_data_set_all WHERE ads_instrument='cos' "\
"AND ads_data_set_name NOT LIKE 'LZ%' AND "\
"ads_best_version='Y' AND ads_archive_class!='EDT'\ngo"
# Some COS observations don't have ads_instrumnet=cos
query1 = "SELECT distinct ads_data_set_name,ads_pep_id FROM "\
"archive_data_set_all WHERE LEN(ads_data_set_name)=9 "\
"AND ads_data_set_name LIKE 'L%' AND ads_instrument='cos' "\
"AND ads_best_version='Y' and ads_archive_class!='EDT'\ngo"
# Now expand on the previous queries by only selecting non-propietary data.
utc_dt = dt.utcnow()
utc_str = utc_dt.strftime("%b %d %Y %I:%M:%S%p")
query0_pub = query0.split("\ngo")[0] + " and ads_release_date<='{0}'\ngo".format(utc_str)
query1_pub = query1.split("\ngo")[0] + " and ads_release_date<='{0}'\ngo".format(utc_str)
query0_priv = query0.split("\ngo")[0] + " and ads_release_date>='{0}'\ngo".format(utc_str)
query1_priv = query1.split("\ngo")[0] + " and ads_release_date>='{0}'\ngo".format(utc_str)
all_cos_priv = janky_connect(query0_priv)
all_l_priv = janky_connect(query1_priv)
all_mast_sql_priv = all_cos_priv + all_l_priv
all_cos_pub = janky_connect(query0_pub)
all_l_pub = janky_connect(query1_pub)
all_mast_sql_pub = all_cos_pub + all_l_pub
all_mast_priv = _sql_to_dict(all_mast_sql_priv)
all_mast_pub = _sql_to_dict(all_mast_sql_pub)
return all_mast_priv, all_mast_pub
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def get_pid(rootname):
program_id = rootname[1:4].upper()
query = "SELECT DISTINCT proposal_id FROM executed WHERE "\
"program_id='{0}'\ngo".format(program_id)
prop = janky_connect(query, database="opus_rep")
if len(prop) == 0:
return None
else:
return prop[0]
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def find_missing_exts(existing, existing_root):
"""
If something causes the code to crash mid-copy, some data products may
not be copied. The only way to check is this to determine what the
expected products are and compare that to what is currently in central
store.
Parameters:
-----------
existing : list
List of all existing files currently in COSMO, this includes
path name and filetype, e.g.
/grp/hst/cos2/smov_testing/13974/lcqf15meq_counts.fits
existing_root : list
List of rootnames all existing files currently in COSMO.
Returns:
--------
missing_files : list
"""
# Split query into chunks of 10K to avoid running out of processer
# resources.
chunksize= 10000
chunks = [existing_root[i:i+chunksize] for i in range(0, len(existing_root), chunksize)]
missing_files_l = []
pids = []
for chunk in chunks:
query = "SELECT distinct afi_file_name, ads_pep_id"\
" FROM archive_files, archive_data_set_all WHERE"\
" ads_data_set_name=afi_data_set_name"\
" AND ads_best_version='y'"\
" AND ads_generation_date= afi_generation_date"\
" AND ads_archive_class=afi_archive_class"\
" AND ads_archive_class IN ('cal','asn')"\
" AND ads_data_set_name IN {0}\ngo".format(tuple(chunk))
filenames = janky_connect(query)
expected_files_d = _sql_to_dict(filenames)
expected_files_s = set([row[0] for row in filenames])
existing_files_s = set([os.path.basename(x).strip(".gz") for x in existing])
missing_files_l_chunk = list(expected_files_s - existing_files_s)
missing_files_l += missing_files_l_chunk
if len(missing_files_l_chunk) == 0:
continue
pids_chunk = [int(expected_files_d[x]) for x in missing_files_l_chunk]
pids += pids_chunk
if len(missing_files_l) == 0:
return None
missing_files = _group_dict_by_pid(missing_files_l, pids)
print("{} single extensions missing for {} programs that were already retrieved- this is probably because COSMO crashed in an earlier run.".format(len(missing_files_l), len(missing_files)))
return missing_files
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def _sql_to_dict(sql_list, groupbykey=True):
"""
Store results of SQL query (list) as a dictionary with proposal IDs
as the keys and (individual) dataset rootname as the values.
Parameters:
-----------
sql_list : list
SQL results from janky_connect() stored as a list.
groupbykey : Bool
If True, it will sort SQL 0th results by 1st results, e.g. group
[["ld5301rfq", "14736"], ["ld5301rtq", "14736"]] ->
{"14736": ["ld5301rfq", "ld5301rtq"]}
If False, it will simply turn results into dictionary, e.g.
[["ld5301rfq", "14736"], ["ld5301rtq", "14736"]] ->
{"ld5301rfq": "14736", "ld5301rtq": "14736"}.
Note that, if False, PIDs will not be looked up, so NULL, MMD, CCI,
etc. programs will not have correct PIDs.
Returns:
--------
sql_dict : dictionary
Dictionary of SQL results.
"""
# Store results as dictionaries. Don't get podfiles (LZ*)
sql_dict = {row[0]:row[1] for row in sql_list if not row[0].startswith("LZ_")}
if groupbykey is True:
badfiles = []
for key in list(sql_dict):
if sql_dict[key] == "NULL":
if key.startswith("LF") or key.startswith("LN") or key.startswith("L_"):
sql_dict[key] = "CCI"
elif len(key) == 9:
prop = get_pid(key)
if prop is None:
badfiles.append(key)
else:
sql_dict[key] = prop
else:
sql_dict.pop(key, None)
return sql_dict
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def janky_connect(query_string, database=None):
"""
Connecting to the MAST database is near impossible using SQLAlchemy.
Instead, connect through a subprocess call.
Parameters:
-----------
query_string : str
SQL query text.
Returns:
--------
result : list
List where each index is a list consisting of [rootname, PID]]
"""
# Open the configuration file for the MAST database connection (TSQL).
config_file = os.path.join(os.environ['HOME'], "configure2.yaml")
with open(config_file, 'r') as f:
SETTINGS = yaml.load(f)
if database is not None:
SETTINGS["database"] = database
# Connect to the MAST database through tsql.
# Should use shlex, but this strips the correct format of the username
# for AD login, i.e. stsci\\jotaylor. Instead, do it manually.
#command_line = "tsql -S{0} -D{1} -U{2} -P{3} -t'|||'".format(SETTINGS["server"],SETTINGS["database"], SETTINGS["username"], SETTINGS["password"])
#args = shlex.split(command_line)
args = ["tsql",
"-S{}".format(SETTINGS["server"]),
"-D{}".format(SETTINGS["database"]),
"-U{}".format(SETTINGS["username"]),
"-P{}".format(SETTINGS["password"]),
"-t|||"]
p = Popen(args,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
close_fds=True)
(transmit, receive, err) = (p.stdin, p.stdout, p.stderr)
transmit.write(query_string.encode("utf-8"))
transmit.close()
query_result0 = receive.readlines()
receive.close()
error_report0 = err.readlines()
err.close()
query_result = [x.decode("utf-8") for x in query_result0]
error_report = [x.decode("utf-8") for x in error_report0]
badness = ["locale", "charset", "1>", "affected"]
# I can't stop, someone help me.
# https://imgur.com/xioMcFe (h/t E. Snyder)
result = [x.strip().split("|||") if "|||" in x else x.strip()
for x in query_result if not any(y in x for y in badness)]
# Ensure that nothing was wrong with the query syntax.
assert (len(error_report) < 3), "Something went wrong in query:{0}".format(error_report[2])
return result
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def tally_cs(mydir=BASE_DIR, uniq_roots=True):
"""
For testing purposes, tabulate all files in central store
(/grp/hst/cos2/smov_testing/) and request all missing datasets.
Parameters:
-----------
None
Returns:
--------
smovfiles : list
A list of all fits files in BASE_DIR
"""
print ("Checking {0} for existing data...".format(mydir))
allsmov = glob.glob(os.path.join(mydir, "*", "*fits*"))
smovfilenames = [os.path.basename(x) for x in allsmov]
smovroots = [x.split("_cci")[0].upper() if "cci" in x
else x.split("_")[0].upper()
for x in smovfilenames]
if uniq_roots:
return allsmov, smovfilenames, list(set(smovroots))
else:
return allsmov, smovfilenames, smovroots
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def find_missing_data(use_cs):
"""
Compare the set of all files currently in the COS repository to the list
all files currently ingested into MAST.
Parameters:
-----------
use_cs : Bool
Switch to find missing data comparing what is currently on
central store, as opposed to using the COSMO database.
Returns:
--------
missing_data : dictionary
Dictionary where each key is the proposal, and values are the
missing data.
"""
if use_cs:
existing, existing_filenames, existing_root = tally_cs()
else:
existing_root = connect_cosdb()
print("Checking to see if there are any missing COS data...")
missing_exts = find_missing_exts(existing, existing_root)
mast_priv, mast_pub = get_all_mast_data()
missing_data_priv = _determine_missing(mast_priv, existing_root)
missing_data_pub = _determine_missing(mast_pub, existing_root)
return missing_data_priv, missing_data_pub, missing_exts
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def _determine_missing(in_dict, existing_root):
"""
Given a dictionary describing all (public or proprietary) data and a
dictionary describing all the data already downloaded into central store,
determine what datasets are missing.
Parameters:
-----------
in_dict : dictionary
For all (public or proprietary) data, a dictionary where each key
is any PID and the value is one single dataset for that PID
(i.e. there are multiple keys for the same PID).
existing_root :
For all datasets already in central store, a dictionary where each
key is any PID and the value is one single dataset for that PID
(i.e. there are multiple keys for the same PID).
Returns:
--------
missing_data : dictionary
A dictionary of missing data where each key is a PID and the value
is all missing datsets for that PID.
"""
# Determine which datasets are missing.
missing_names = list(set(in_dict.keys()) - set(existing_root))
missing_props = [int(in_dict[x]) if in_dict[x] not in ["CCI","NULL"]
else in_dict[x] for x in missing_names]
missing_data = _group_dict_by_pid(missing_names, missing_props)
return missing_data
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def _group_dict_by_pid(filenames, pids):
# Create dictionaries grouped by proposal ID, it is much easier
# to retrieve them this way.
# For most data, determine corresponding proposal ID. CCIs and some
# odd files will have proposal ID = NULL though.
keys = set(pids)
vals = [[] for x in range(len(keys))]
outd = dict(zip(keys, vals))
for i in range(len(filenames)):
outd[pids[i]].append(filenames[i])
return outd
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def pickle_missing(missing_data, pkl_file=None):
"""
Pickle the dictionary describing missing data.
Parameters:
-----------
missing_data : dictionary
Dictionary where the key is the proposal, and values are the
missing data.
pkl_file : str
Name of output pickle file.
Returns:
--------
Nothing
"""
if not pkl_file:
pkl_file = "filestoretrieve.p"
pickle.dump(missing_data, open(pkl_file, "wb"))
cwd = os.getcwd()
print("Missing data written to pickle file {0}".format(os.path.join(cwd,pkl_file)))
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def check_for_pending():
"""
Check the appropriate URL and get the relevant information about pending
requests.
Parameters:
-----------
None
Returns:
--------
num : int
Number of pending archive requests (can be zero)
badness : Bool
True if something went wrong with an archive request
(e.g. status=KILLED)
status_url : str
URL to check for requests
"""
MYUSER = "jotaylor"
status_url = "http://archive.stsci.edu/cgi-bin/reqstat?reqnum=={0}".format(MYUSER)
tries = 5
while tries > 0:
try:
urllines0 = urllib.request.urlopen(status_url).readlines()
urllines = [x.decode("utf-8") for x in urllines0]
except IOError:
print("Something went wrong connecting to {0}.".format(status_url))
tries -= 1
time.sleep(30)
badness = True
else:
tries = -100
for line in urllines:
mystr = "of these requests are still RUNNING"
if mystr in line:
num_requests = [x.split(mystr) for x in line.split()][0][0]
assert (num_requests.isdigit()), "A non-number was found in line {0}!".format(line)
num = int(num_requests)
badness = False
break
else:
badness = True
return num, badness, status_url
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def ensure_no_pending():
"""
Check for any pending archive requests, and if there are any, wait until
they finish.
Parameters:
-----------
None
Returns:
--------
Nothing.
"""
num_requests, badness, status_url = check_for_pending()
while num_requests > 0:
print("There are still {0} requests pending from a previous COSMO run, waiting 5 minutes...".format(num_requests))
assert (badness == False), "Something went wrong during requests, check {0}".format(status_url)
time.sleep(300)
num_requests, badness, status_url = check_for_pending()
else:
print("All pending requests finished, moving on!")
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def tabulate_cache():
"""
Determine all the datasets that are currently in the COS central store
cache.
Parameters:
-----------
None
Returns:
--------
array : array
The full path of every dataset in the cache.
array : array
The rootname of every datset in the cache.
"""
print("\tTabulating list of all cache COS datasets (this may take several minutes)...")
cos_cache = glob.glob(os.path.join(CACHE, "l*/l*/*fits*"))
cache_filenames = [os.path.basename(x) for x in cos_cache]
cache_roots = [x[:9].upper() for x in cache_filenames]
return np.array(cos_cache), np.array(cache_filenames), np.array(cache_roots)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def find_missing_in_cache(missing_dict, cache_a, cos_cache):
total_copied = 0
start_missing = len(missing_dict.keys())
to_copy_d = {}
for key in list(missing_dict):
missing_files = missing_dict[key]
missing_in_cache = list(set(missing_files) & set(cache_a))
if len(missing_in_cache) == 0:
continue
total_copied += len(missing_in_cache)
updated_missing = [x for x in missing_files if x not in missing_in_cache]
updated_missing = list(set(missing_files) - set(missing_in_cache))
if not updated_missing:
missing_dict.pop(key, "Something went terribly wrong, {0} isn't in dictionary".format(key))
else:
missing_dict[key] = updated_missing
# Create a generator where each element is an array with all
# file types that match each missing dataset. Then concatenate all these
# individual arrays for ease of copying.
# Joe said this makes sense, so it's ok, right?
try:
to_copy = np.concatenate(tuple( (cos_cache[np.where(cache_a == x)]
for x in missing_in_cache) ))
except ValueError:
print("RUH ROH!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
to_copy_d[key] = to_copy
end_missing = len(missing_dict.keys())
print("\tCopying {} total root(s) from cache, {} complete PID(s)".format(
total_copied, start_missing-end_missing))
return missing_dict, to_copy_d
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def copy_from_cache(to_copy):
for pid, cache_files in to_copy.items():
dest = os.path.join(BASE_DIR, str(pid))
print("\tCopying {} file(s) from cache into {}".format(len(cache_files), dest))
if not os.path.isdir(dest):
os.mkdir(dest)
# By importing pyfastcopy, shutil performance is automatically
# enhanced
compress_dest = dest
compress_files(cache_files, outdir=compress_dest, remove_orig=False,
verbose=False)
return to_copy
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def copy_cache(missing_data, missing_exts=None, prl=True):
"""
When there are missing public datasets, check to see if any of them can
be copied from the COS cache in central store, which is faster than
requesting them from MAST.
Parameters:
-----------
missing_data : dictionary
Dictionary where each key is the proposal, and values are the
missing data.
missing_exts : dictionary
Dictionary where each key is the proposal, and values are
missing single raw or product files from previous COSMO runs.
out_q : multiprocess.Queue object
If not None, the output of this function will be passed to
the Queue object in order to be curated during multiprocessing.
Returns:
--------
missing_data : dictionary
Dictionary where each key is the proposal, and values are the
missing data after copying any available data from the cache.
"""
cos_cache, cache_filenames, cache_roots = tabulate_cache()
missing_data, to_copy_root = find_missing_in_cache(missing_data, cache_roots, cos_cache)
if missing_exts is not None:
print("looking at exts")
missing_exts, to_copy_exts = find_missing_in_cache(missing_exts, cache_filenames, cos_cache)
to_copy = combine_2dicts(to_copy_root, to_copy_exts)
missing_ext_roots = {k:list(set([dataset[:9].upper() for dataset in v])) for k,v in missing_exts.items()}
still_missing = combine_2dicts(missing_data, missing_ext_roots)
else:
to_copy = to_copy_root
still_missing = missing_data
if to_copy:
parallelize("smart", "check_usage", copy_from_cache, to_copy)
return still_missing
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def copy_entire_cache(cos_cache):
"""
In development.
"""
prop_map = {}
for item in cos_cache:
filename = os.path.basename(item)
ippp = filename[:4]
if not ippp in prop_map.keys():
hdr0 = pf.getheader(item,0)
proposid = hdr0["proposid"]
prop_map[ippp] = proposid
else:
proposid = prop_map[ippp]
dest = os.path.join(BASE_DIR, proposid, filename)
# By importing pyfastcopy, shutil performance is automatically enhanced
shutil.copyfile(item, dest)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
def check_proprietary_status(rootnames):
'''
Given a series of rootnames, sort them by PID and proprietary status:
65545 for proprietary (gid for STSCI/cosgo group) and 6045
(6045 # gid for STSCI/cosstis group) for public.
Parameters:
-----------
rootnames : array-like
Rootnames to query.
Returns:
--------
release_dates : dict
A dictionary whose keys are 65545 for proprietary data, or
6045 for public data and whose values are dictionaries
ordered by PID and values are rootnames.
'''
chunksize = 10000
chunks = [rootnames[i:i+chunksize] for i in range(0, len(rootnames), chunksize)]
priv_id = 65545
pub_id = 6045
sql_results = []
for chunk in chunks:
query = "SELECT DISTINCT ads_data_set_name, ads_release_date, ads_pep_id "\
"FROM archive_data_set_all "\
"WHERE ads_best_version='Y' "\
"AND ads_archive_class IN ('cal', 'asn') "\
"AND ads_data_set_name IN {}\ngo".format(tuple(chunk))
results = janky_connect(query)
sql_results += results
utc_dt = dt.utcnow()
utc_str = utc_dt.strftime("%b %d %Y %I:%M:%S%p")
propr_status = []
filenames = []
for row in sql_results:
file_dt = dt.strptime(row[1], "%b %d %Y %I:%M:%S:%f%p")
if file_dt <= utc_dt:
propr_status.append(pub_id)
else:
propr_status.append(priv_id)
filenames.append(row[0])
return propr_status, filenames
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
@timefunc
def find_new_cos_data(pkl_it, pkl_file, use_cs=False, prl=True):
"""
Workhorse function, determine what data already exist on disk/in the
greendev DB and determine if data are missing. Copy any data from local
cache if possible.
Parameters:
-----------
pkl_it : Bool
Switch to pickle final dictionary of data to requested from MAST.
pkl_file : str
Name of output pickle file.
use_cs :
Switch to find missing data comparing what is currently on
central store, as opposed to using the COSMO database.
prl :
Switch to run manualabor in parallel or not.
Returns:
--------
all_missing_data : dictionary
Dictionary of all datasets that need to be requested from MAST,
where each key is a PID and the value is a list of all missing
datasets for that PID.
"""
print("*"*72)
missing_data_priv, missing_data_pub, missing_exts = find_missing_data(use_cs)
print("\t{} proprietary program(s) missing: {}\n\t{} public program(s) missing: {}".format(
len(missing_data_priv.keys()), list(missing_data_priv.keys()),
len(missing_data_pub.keys()), list(missing_data_pub.keys()) ))
if missing_data_pub:
print("Checking to see if any missing public data is in local cache...")
missing_data_pub_rem = copy_cache(missing_data_pub, missing_exts)
# Some nonstandard data isn't stored in the cache (e.g. MMD), so
# check if any other public data needs to be retrieved.
if missing_data_pub_rem:
all_missing_data = missing_data_priv.copy()
for k,v in missing_data_pub_rem.items():
if k in all_missing_data:
all_missing_data[k] = list(set(all_missing_data[k]) |
set(v))
else:
all_missing_data[k] = v
else:
all_missing_data = missing_data_priv
elif missing_data_priv:
print("All missing data are proprietary.")
all_missing_data = missing_data_priv
else:
print("There are no missing data.")
all_missing_data = {}
# Not utilized for the moment, see Issue #22 on github.
# ensure_no_pending()
if pkl_it:
pickle_missing(all_missing_data, pkl_file)
return all_missing_data
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-p", dest="pkl_it", action="store_true", default=False,
help="Save output to pickle file")
parser.add_argument("--pklfile", dest="pkl_file", default=None,
help="Name for output pickle file")
parser.add_argument("--cs", dest="use_cs", action="store_true",
default=False,
help="Find missing data comparing to central store, not DB")
parser.add_argument("--prl", dest="prl", action="store_false",
default=True, help="Parallellize functions")
args = parser.parse_args()
find_new_cos_data(args.pkl_it, args.pkl_file, args.use_cs,
args.prl, False)
| |
import json
import logging
import os
import sys
from abc import abstractmethod, ABC
from collections.abc import Mapping
from datetime import datetime, timedelta
from typing import Dict, Any, Iterator, List
from typing import TypedDict # pylint: disable=no-name-in-module
from pywikibot import Page, Site, Category
from pywikibot.pagegenerators import CategorizedPageGenerator
from tools.bots import BotException
# type hints
class LoggerNameDict(TypedDict):
info: str
debug: str
class LastRunDict(TypedDict):
success: bool
timestamp: str
_DATA_PATH: str = os.path.expanduser("~") + os.sep + ".wiki_bot"
def _get_data_path() -> str:
if not os.path.exists(_DATA_PATH):
os.mkdir(_DATA_PATH)
return _DATA_PATH
class WikiLogger():
_logger_format: str = "[%(asctime)s] [%(levelname)-8s] [%(message)s]"
_logger_date_format: str = "%H:%M:%S"
_wiki_timestamp_format: str = "%y-%m-%d_%H:%M:%S"
def __init__(self, bot_name: str, start_time: datetime, log_to_screen: bool = True):
self._bot_name: str = bot_name
self._start_time: datetime = start_time
self._data_path: str = _get_data_path()
self._logger: logging.Logger = logging.getLogger(self._bot_name)
self._logger_names: LoggerNameDict = self._get_logger_names()
self._log_to_screen: bool = log_to_screen
def __enter__(self):
self._setup_logger_properties()
def __exit__(self, exc_type, exc_val, exc_tb):
self.tear_down()
def tear_down(self):
for handler in self._logger.handlers[:]:
handler.close()
self._logger.removeHandler(handler)
if os.path.isfile(self._data_path + os.sep + self._logger_names["info"]):
os.remove(self._data_path + os.sep + self._logger_names["info"])
sys.stdout.flush()
logging.shutdown()
def _get_logger_names(self) -> LoggerNameDict:
start_time = self._start_time.strftime('%y%m%d%H%M%S')
return {"info": f"{self._bot_name}_INFO_{start_time}.log",
"debug": f"{self._bot_name}_DEBUG_{start_time}.log"}
def _setup_logger_properties(self):
self._logger.setLevel(logging.DEBUG)
error_log = logging.FileHandler(self._data_path + os.sep + self._logger_names["info"],
encoding="utf8")
error_log.setLevel(logging.INFO)
debug_log = logging.FileHandler(self._data_path + os.sep + self._logger_names["debug"],
encoding="utf8")
debug_log.setLevel(logging.DEBUG)
formatter = logging.Formatter(self._logger_format, datefmt=self._logger_date_format)
error_log.setFormatter(formatter)
debug_log.setFormatter(formatter)
self._logger.addHandler(error_log)
self._logger.addHandler(debug_log)
if self._log_to_screen: # pragma: no cover
# this activates the output of the logger
debug_stream = logging.StreamHandler(sys.stdout)
debug_stream.setLevel(logging.DEBUG)
debug_stream.setFormatter(formatter)
self._logger.addHandler(debug_stream)
def debug(self, msg: str):
self._logger.log(logging.DEBUG, msg)
def info(self, msg: str):
self._logger.log(logging.INFO, msg)
def warning(self, msg: str):
self._logger.log(logging.WARNING, msg)
def error(self, msg: str):
self._logger.log(logging.ERROR, msg)
def critical(self, msg: str):
self._logger.log(logging.CRITICAL, msg)
def exception(self, msg: str, exc_info):
self._logger.exception(msg=msg, exc_info=exc_info)
def create_wiki_log_lines(self) -> str:
with open(self._data_path + os.sep + self._logger_names["info"], encoding="utf8") as filepointer:
line_list = []
for line in filepointer:
line_list.append(line.strip())
log_lines = ""
log_lines = log_lines \
+ "\n\n" \
+ f"=={self._start_time.strftime(self._wiki_timestamp_format)}==" \
+ "\n\n" \
+ "\n\n".join(line_list) \
+ "\n--~~~~"
return log_lines
class PersistedTimestamp():
_timeformat: str = "%Y-%m-%d_%H:%M:%S"
def __init__(self, bot_name: str):
self._last_run: datetime = datetime.utcfromtimestamp(0)
self._success_last_run: bool = False
self._success_this_run: bool = False
self._start: datetime = datetime.now()
self._data_path: str = _get_data_path()
self._full_filename: str = self._data_path + os.sep + f"{bot_name}.last_run.json"
def __enter__(self):
self.set_up()
def __exit__(self, exc_type, exc_val, exc_tb):
self.tear_down()
def set_up(self):
try:
with open(self._full_filename, mode="r", encoding="utf-8") as persist_json:
last_run_dict: LastRunDict = json.load(persist_json)
self._last_run = datetime.strptime(last_run_dict["timestamp"], self._timeformat)
self._success_last_run = last_run_dict["success"]
os.remove(self._full_filename)
except FileNotFoundError:
pass
def tear_down(self):
with open(self._full_filename, mode="w", encoding="utf-8") as persist_json:
json.dump({"timestamp": self._start.strftime(self._timeformat), "success": self.success_this_run},
persist_json)
@property
def last_run(self) -> datetime:
return self._last_run
@last_run.setter
def last_run(self, value: datetime):
if isinstance(value, datetime):
self._last_run = value
@property
def start_of_run(self) -> datetime:
return self._start
@property
def success_last_run(self) -> bool:
return self._success_last_run
@property
def success_this_run(self) -> bool:
return self._success_this_run
@success_this_run.setter
def success_this_run(self, new_value: bool):
if isinstance(new_value, bool):
self._success_this_run = new_value
else:
raise TypeError("success_this_run is a boolean value.")
class OneTimeBot(ABC):
def __init__(self, wiki: Site = None, debug: bool = True,
log_to_screen: bool = True, log_to_wiki: bool = True):
self.success: bool = False
self.log_to_screen: bool = log_to_screen
self.log_to_wiki: bool = log_to_wiki
if not self.task:
raise NotImplementedError("The class function \"task\" must be implemented!\n"
"Example:\n"
"class DoSomethingBot(OneTimeBot):\n"
" def task(self):\n"
" do_stuff()")
self.timestamp: PersistedTimestamp = PersistedTimestamp(bot_name=self.bot_name)
self.wiki: Page = wiki
self.debug: bool = debug
self.timeout: timedelta = timedelta(days=1)
self.logger: WikiLogger = WikiLogger(self.bot_name,
self.timestamp.start_of_run,
log_to_screen=self.log_to_screen)
def __enter__(self):
self.timestamp.__enter__()
self.logger.__enter__()
self.logger.info(f"Start the bot {self.bot_name}.")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.timestamp.success_this_run = self.success
self.timestamp.__exit__(exc_type, exc_val, exc_tb)
self.logger.info(f"Finish bot {self.bot_name} in "
f"{datetime.now() - self.timestamp.start_of_run}.")
if self.log_to_wiki:
self.send_log_to_wiki()
self.logger.__exit__(exc_type, exc_val, exc_tb)
@abstractmethod
def task(self) -> bool:
pass
@classmethod
def get_bot_name(cls) -> str:
return cls.__name__
@property
def bot_name(self) -> str:
return self.get_bot_name()
def run(self) -> bool:
try:
self.success = bool(self.task()) # pylint: disable=not-callable
except Exception as catched_exception: # pylint: disable=broad-except
self.logger.exception("Logging an uncaught exception", exc_info=catched_exception)
self.success = False
return self.success
def _watchdog(self) -> bool:
time_over: bool = False
if self.timeout:
diff = datetime.now() - self.timestamp.start_of_run
if diff > self.timeout:
self.logger.warning("Bot finished by timeout.")
time_over = True
return time_over
def send_log_to_wiki(self):
wiki_log_page = f"Benutzer:THEbotIT/Logs/{self.bot_name}"
page = Page(self.wiki, wiki_log_page)
page.text += self.logger.create_wiki_log_lines()
page.save(f"Update of Bot {self.bot_name}", botflag=True)
@staticmethod
def save_if_changed(page: Page, text: str, change_msg: str):
if text.rstrip() != page.text:
page.text = text
page.save(change_msg, botflag=True)
def get_lemma_str_from_cat(self, category: str) -> List[str]:
page = Category(self.wiki, category)
cat_list = [str(lemma).strip("[]")[2:] for lemma in CategorizedPageGenerator(page)]
return cat_list
class PersistedData(Mapping):
def __init__(self, bot_name: str):
self._data: Dict = {}
self.bot_name: str = bot_name
self.data_folder: str = _get_data_path()
self.file_name: str = self.data_folder + os.sep + bot_name + ".data.json"
def __getitem__(self, item) -> Any:
return self._data[item]
def __setitem__(self, key: str, value: Any):
self._data[key] = value
def __delitem__(self, key: str):
del self._data[key]
def __len__(self) -> int:
return len(self._data)
def __iter__(self) -> Iterator:
return iter(self._data)
def assign_dict(self, new_dict: Dict):
if isinstance(new_dict, dict):
self._data = new_dict
else:
raise BotException(f"{new_dict} has the wrong type. It must be a dictionary.")
def dump(self, success: bool = True):
if success:
with open(self.file_name, mode="w", encoding="utf-8") as json_file:
json.dump(self._data, json_file, indent=2)
if os.path.isfile(self.file_name + ".deprecated"):
os.remove(self.file_name + ".deprecated")
else:
with open(self.file_name + ".broken", mode="w", encoding="utf-8") as json_file:
json.dump(self._data, json_file, indent=2)
def load(self):
if os.path.exists(self.file_name):
with open(self.file_name, mode="r", encoding="utf-8") as json_file:
self._data = json.load(json_file)
os.rename(self.file_name, self.file_name + ".deprecated")
else:
raise BotException("No data to load.")
def update(self, dict_to_update: Dict):
self._data.update(dict_to_update)
def _recover_data(self, type_of_data: str):
try:
with open(f"{self.file_name}.{type_of_data}", mode="r", encoding="utf-8") as json_file:
self.assign_dict(json.load(json_file))
except FileNotFoundError as error:
raise BotException(f"There is no {type_of_data} data to load.") from error
def get_broken(self):
self._recover_data("broken")
def get_deprecated(self):
self._recover_data("deprecated")
class CanonicalBot(OneTimeBot, ABC):
def __init__(self, wiki: Site = None, debug: bool = True,
log_to_screen: bool = True, log_to_wiki: bool = True):
OneTimeBot.__init__(self, wiki, debug, log_to_screen, log_to_wiki)
self.data = PersistedData(bot_name=self.bot_name)
self.new_data_model = datetime.min
def __enter__(self):
OneTimeBot.__enter__(self)
if self.data_outdated():
self.data.assign_dict({})
self.logger.warning("The data is thrown away. It is out of date")
elif (self.timestamp.last_run is None) or not self.timestamp.success_last_run:
self.data.assign_dict({})
self.logger.warning("The last run wasn\'t successful. The data is thrown away.")
else:
self.data.load()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.success:
self.data.dump(success=True)
else:
self.data.dump(success=False)
self.logger.critical("There was an error in the general procedure. "
"The broken data and a backup of the old will be keept.")
OneTimeBot.__exit__(self, exc_type, exc_val, exc_tb)
@abstractmethod
def task(self) -> bool:
pass
def create_timestamp_for_search(self, days_in_past=1) -> datetime:
start_of_search: datetime = self.timestamp.last_run
if self.last_run_successful:
start_of_search = self.timestamp.last_run - timedelta(days=days_in_past)
return start_of_search
def data_outdated(self) -> bool:
outdated = False
if self.new_data_model and self.timestamp.last_run:
if self.timestamp.last_run < self.new_data_model:
outdated = True
self.timestamp.last_run = datetime(1970, 1, 1)
return outdated
@property
def last_run_successful(self) -> bool:
return bool(self.timestamp.last_run and self.timestamp.success_last_run)
| |
import logging
import mimetypes
import sys
from config import checks_config, settings
from keystone import get_auth_token
from swiftclient import client as swift_client
from swiftclient.exceptions import ClientException
from logger import getLogger
logger = getLogger(__name__)
# ================================================
# Swift configuration class
# ================================================
class SwiftConfig(object):
def __init__(self, auth_token, swift_url, container_name):
"""
Initialize a Swift configuration instance
"""
self.auth_token = auth_token
self.swift_url = swift_url
self.container = container_name
self.connection = self._get_connection()
def _get_connection(self):
"""
Get a connection to Swift object store
"""
return swift_client.Connection(
preauthurl=self.swift_url,
preauthtoken=self.auth_token,
retries=5,
auth_version='1',
insecure=True)
# ================================================
# Swift configuration initialization
# ================================================
def get_swift_config():
"""
Get a SwiftConfig instance per application settings
"""
auth_token = get_auth_token()
container = settings('swift_container')
swift_url = settings('swift_url')
swift_cfg = SwiftConfig(auth_token, swift_url, container)
return swift_cfg
def _get_config():
"""
This is a fixed/non-mockable func pointer for @checks_config decorator
"""
return get_swift_config()
# ================================================
# Swift module interfaces
# ================================================
@checks_config(config_func=_get_config)
def check_container_missing(config=None):
"""
Check if default container missing in Swift
Keyword arguments:
config -- an instance of SwiftConfig (optional, default None)
"""
try:
logger.debug('Checking container {0}'.format(config.container))
headers, container_list = config.connection.get_account()
for container in container_list:
logger.debug("--- container: {0}".format(container['name']))
if (container['name'] == config.container):
logger.debug('--- found {0}'.format(config.container))
return False
logger.debug('--- missing container {0}'.format(config.container))
return True
except (ClientException, Exception):
logger.exception("Exception verifying container exists.")
raise
@checks_config(config_func=_get_config)
def ensure_container_exists(config=None):
"""
Ensure default container exists in Swift.
Keyword arguments:
config -- an instance of SwiftConfig (optional, default None)
"""
# Determine if necessary container missing; if so, create it
container_missing = check_container_missing(config=config)
if (container_missing):
try:
response = {}
config.connection.put_container(
config.container, response_dict=response)
logger.debug(
"--- Container {0} created".format(config.container))
logger.debug("--- Response {0}".format(response))
except (ClientException, Exception):
msg = "Exception creating container {0}.".format(config.container)
logger.exception(msg)
raise
# ToDo - CJ - Change this back to 'read_object'
@checks_config(config_func=_get_config)
def get_file_contents(file_name, config=None):
"""
Function wrapper to perform 'get_object' call on Swift
Keyword arguments:
file_name -- the name of the file in Swift store
config -- an instance of SwiftConfig (optional, default None)
"""
response_dict = {}
try:
# Response from Swift:
# a tuple of (response headers, the object contents)
# The response headers will be a dict and all header names
# will be lowercase.
response = config.connection.get_object(
config.container,
file_name,
response_dict=response_dict)
file_contents = response[1]
return file_contents
except (ClientException, Exception):
msg = "Exception getting object {0} from Swift.".format(file_name)
logger.exception(msg)
raise
@checks_config(config_func=_get_config)
def get_files_in_container(config=None):
"""
Get info of all files in default Swift container
Keyword arguments:
config -- an instance of SwiftConfig (optional, default None)
"""
# This call returns a tuple of:
# - response headers (dict)
# - objects in the container (list)
# An example of an object in the container:
# { 'bytes': 92,
# 'last_modified': '2015-06-13T02:53:25.788490',
# 'hash': '421e105dda2580a39b21edeaf9b035de',
# 'name': 'test_vendor_package.json',
# 'content_type': 'application/json' }
result = config.connection.get_container(
config.container, full_listing=True)
return result[1]
@checks_config(config_func=_get_config)
def save_object(name, contents, config=None):
"""
Function wrapper to perform 'put_object' call Swift
Keyword arguments:
name -- the name of the file to be saved
contents -- the contents of the file to be saved in Swift store
config -- an instance of SwiftConfig (optional, default None)
"""
try:
# Ensure the container we need exists
ensure_container_exists(config=config)
# Push the file contents to Swift
# Example of response from put_object call -
# {
# 'status': 201,
# 'headers': {
# 'content-length': '0',
# 'last-modified': 'Fri, 17 Jul 2015 04:43:56 GMT',
# 'connection': 'keep-alive',
# 'etag': 'd41d8cd98f00b204e9800998ecf8427e',
# 'x-trans-id': 'txeddbca07d8e744deae343-0055a8880c',
# 'date': 'Fri, 17 Jul 2015 04:43:57 GMT',
# 'content-type': 'text/html; charset=UTF-8'},
# 'reason': 'Created',
# 'response_dicts': [{
# 'status': 201,
# 'headers': {
# 'content-length': '0',
# 'last-modified':
# 'Fri, 17 Jul 2015 04:43:56 GMT',
# 'connection': 'keep-alive',
# 'etag': 'd41d8cd98f00b204e9800998ecf8427e',
# 'x-trans-id': 'txeddbca07d8e744deae343-0055a8880c',
# 'date': 'Fri, 17 Jul 2015 04:43:57 GMT',
# 'content-type': 'text/html; charset=UTF-8'},
# 'reason': 'Created'}]}
# Note: somehow 'content-length' is 0
response = {}
config.connection.put_object(
config.container,
name,
contents,
content_length=sys.getsizeof(contents),
content_type=mimetypes.guess_type(name, strict=True)[0],
response_dict=response)
logger.debug(response)
return response
except (ClientException, Exception):
msg = "Exception saving object to Swift.\n"
logger.exception(msg)
raise
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import math
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.core.exceptions import PermissionDenied, ValidationError
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.db.models import F, Q
from django.forms.util import ErrorList
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseBadRequest,\
HttpResponseForbidden
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext as _
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from django.views.generic.edit import ModelFormMixin
from django.views.decorators.csrf import csrf_protect
from django.views import generic
from pybb import compat, defaults, util
from pybb.compat import get_atomic_func
from pybb.forms import PostForm, AdminPostForm, AttachmentFormSet, PollAnswerFormSet, PollForm
from pybb.models import Category, Forum, Topic, Post, TopicReadTracker, ForumReadTracker, PollAnswerUser
from pybb.permissions import perms
from pybb.templatetags.pybb_tags import pybb_topic_poll_not_voted
User = compat.get_user_model()
username_field = compat.get_username_field()
Paginator, pure_pagination = compat.get_paginator_class()
class PaginatorMixin(object):
def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True, **kwargs):
kwargs = {}
if pure_pagination:
kwargs['request'] = self.request
return Paginator(queryset, per_page, orphans=0, allow_empty_first_page=True, **kwargs)
class RedirectToLoginMixin(object):
""" mixin which redirects to settings.LOGIN_URL if the view encounters an PermissionDenied exception
and the user is not authenticated. Views inheriting from this need to implement
get_login_redirect_url(), which returns the URL to redirect to after login (parameter "next")
"""
def dispatch(self, request, *args, **kwargs):
try:
return super(RedirectToLoginMixin, self).dispatch(request, *args, **kwargs)
except PermissionDenied:
if not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(self.get_login_redirect_url())
else:
return HttpResponseForbidden()
def get_login_redirect_url(self):
""" get the url to which we redirect after the user logs in. subclasses should override this """
return '/'
class IndexView(generic.ListView):
template_name = 'pybb/index.html'
context_object_name = 'categories'
def get_context_data(self, **kwargs):
ctx = super(IndexView, self).get_context_data(**kwargs)
categories = ctx['categories']
for category in categories:
category.forums_accessed = perms.filter_forums(self.request.user, category.forums.filter(parent=None))
ctx['categories'] = categories
return ctx
def get_queryset(self):
return perms.filter_categories(self.request.user, Category.objects.all())
class CategoryView(RedirectToLoginMixin, generic.DetailView):
template_name = 'pybb/index.html'
context_object_name = 'category'
def get_login_redirect_url(self):
# returns super.get_object as there is a conflict with the perms in CategoryView.get_object
# Would raise a PermissionDenied and never redirect
return super(CategoryView, self).get_object().get_absolute_url()
def get_queryset(self):
return Category.objects.all()
def get_object(self, queryset=None):
obj = super(CategoryView, self).get_object(queryset)
if not perms.may_view_category(self.request.user, obj):
raise PermissionDenied
return obj
def get_context_data(self, **kwargs):
ctx = super(CategoryView, self).get_context_data(**kwargs)
ctx['category'].forums_accessed = perms.filter_forums(self.request.user, ctx['category'].forums.filter(parent=None))
ctx['categories'] = [ctx['category']]
return ctx
def get(self, *args, **kwargs):
if defaults.PYBB_NICE_URL and (('id' in kwargs) or ('pk' in kwargs)):
return redirect(super(CategoryView, self).get_object(), permanent=defaults.PYBB_NICE_URL_PERMANENT_REDIRECT)
return super(CategoryView, self).get(*args, **kwargs)
class ForumView(RedirectToLoginMixin, PaginatorMixin, generic.ListView):
paginate_by = defaults.PYBB_FORUM_PAGE_SIZE
context_object_name = 'topic_list'
template_name = 'pybb/forum.html'
def dispatch(self, request, *args, **kwargs):
self.forum = self.get_forum(**kwargs)
return super(ForumView, self).dispatch(request, *args, **kwargs)
def get_login_redirect_url(self):
return self.forum.get_absolute_url()
def get_context_data(self, **kwargs):
ctx = super(ForumView, self).get_context_data(**kwargs)
ctx['forum'] = self.forum
ctx['forum'].forums_accessed = perms.filter_forums(self.request.user, self.forum.child_forums.all())
return ctx
def get_queryset(self):
if not perms.may_view_forum(self.request.user, self.forum):
raise PermissionDenied
qs = self.forum.topics.order_by('-sticky', '-updated', '-id').select_related()
qs = perms.filter_topics(self.request.user, qs)
return qs
def get_forum(self, **kwargs):
if 'pk' in kwargs:
forum = get_object_or_404(Forum.objects.all(), pk=kwargs['pk'])
elif ('slug' and 'category_slug') in kwargs:
forum = get_object_or_404(Forum, slug=kwargs['slug'], category__slug=kwargs['category_slug'])
else:
raise Http404(_('Forum does not exist'))
return forum
def get(self, *args, **kwargs):
if defaults.PYBB_NICE_URL and 'pk' in kwargs:
return redirect(self.forum, permanent=defaults.PYBB_NICE_URL_PERMANENT_REDIRECT)
return super(ForumView, self).get(*args, **kwargs)
class LatestTopicsView(PaginatorMixin, generic.ListView):
paginate_by = defaults.PYBB_FORUM_PAGE_SIZE
context_object_name = 'topic_list'
template_name = 'pybb/latest_topics.html'
def get_queryset(self):
qs = Topic.objects.all().select_related()
qs = perms.filter_topics(self.request.user, qs)
return qs.order_by('-updated', '-id')
class PybbFormsMixin(object):
post_form_class = PostForm
admin_post_form_class = AdminPostForm
attachment_formset_class = AttachmentFormSet
poll_form_class = PollForm
poll_answer_formset_class = PollAnswerFormSet
def get_post_form_class(self):
return self.post_form_class
def get_admin_post_form_class(self):
return self.admin_post_form_class
def get_attachment_formset_class(self):
return self.attachment_formset_class
def get_poll_form_class(self):
return self.poll_form_class
def get_poll_answer_formset_class(self):
return self.poll_answer_formset_class
class TopicView(RedirectToLoginMixin, PaginatorMixin, PybbFormsMixin, generic.ListView):
paginate_by = defaults.PYBB_TOPIC_PAGE_SIZE
template_object_name = 'post_list'
template_name = 'pybb/topic.html'
def get_login_redirect_url(self):
return self.topic.get_absolute_url()
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
self.topic = self.get_topic(**kwargs)
if request.GET.get('first-unread'):
if request.user.is_authenticated():
read_dates = []
try:
read_dates.append(TopicReadTracker.objects.get(user=request.user, topic=self.topic).time_stamp)
except TopicReadTracker.DoesNotExist:
pass
try:
read_dates.append(ForumReadTracker.objects.get(user=request.user, forum=self.topic.forum).time_stamp)
except ForumReadTracker.DoesNotExist:
pass
read_date = read_dates and max(read_dates)
if read_date:
try:
first_unread_topic = self.topic.posts.filter(created__gt=read_date).order_by('created', 'id')[0]
except IndexError:
first_unread_topic = self.topic.last_post
else:
first_unread_topic = self.topic.head
return HttpResponseRedirect(reverse('pybb:post', kwargs={'pk': first_unread_topic.id}))
return super(TopicView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
if not perms.may_view_topic(self.request.user, self.topic):
raise PermissionDenied
if self.request.user.is_authenticated() or not defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER:
Topic.objects.filter(id=self.topic.id).update(views=F('views') + 1)
else:
cache_key = util.build_cache_key('anonymous_topic_views', topic_id=self.topic.id)
cache.add(cache_key, 0)
if cache.incr(cache_key) % defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER == 0:
Topic.objects.filter(id=self.topic.id).update(views=F('views') +
defaults.PYBB_ANONYMOUS_VIEWS_CACHE_BUFFER)
cache.set(cache_key, 0)
qs = self.topic.posts.all().select_related('user')
if defaults.PYBB_PROFILE_RELATED_NAME:
qs = qs.select_related('user__%s' % defaults.PYBB_PROFILE_RELATED_NAME)
if not perms.may_moderate_topic(self.request.user, self.topic):
qs = perms.filter_posts(self.request.user, qs)
return qs
def get_context_data(self, **kwargs):
ctx = super(TopicView, self).get_context_data(**kwargs)
if self.request.user.is_authenticated():
self.request.user.is_moderator = perms.may_moderate_topic(self.request.user, self.topic)
self.request.user.is_subscribed = self.request.user in self.topic.subscribers.all()
if perms.may_post_as_admin(self.request.user):
ctx['form'] = self.get_admin_post_form_class()(
initial={'login': getattr(self.request.user, username_field)},
topic=self.topic)
else:
ctx['form'] = self.get_post_form_class()(topic=self.topic)
self.mark_read(self.request.user, self.topic)
elif defaults.PYBB_ENABLE_ANONYMOUS_POST:
ctx['form'] = self.get_post_form_class()(topic=self.topic)
else:
ctx['form'] = None
ctx['next'] = self.get_login_redirect_url()
if perms.may_attach_files(self.request.user):
aformset = self.get_attachment_formset_class()()
ctx['aformset'] = aformset
if defaults.PYBB_FREEZE_FIRST_POST:
ctx['first_post'] = self.topic.head
else:
ctx['first_post'] = None
ctx['topic'] = self.topic
if perms.may_vote_in_topic(self.request.user, self.topic) and \
pybb_topic_poll_not_voted(self.topic, self.request.user):
ctx['poll_form'] = self.get_poll_form_class()(self.topic)
return ctx
def mark_read(self, user, topic):
try:
forum_mark = ForumReadTracker.objects.get(forum=topic.forum, user=user)
except ForumReadTracker.DoesNotExist:
forum_mark = None
if (forum_mark is None) or (forum_mark.time_stamp < topic.updated):
# Mark topic as readed
topic_mark, new = TopicReadTracker.objects.get_or_create_tracker(topic=topic, user=user)
if not new:
topic_mark.save()
# Check, if there are any unread topics in forum
topic_readed = topic.forum.topics.filter(topicreadtracker__user=user,
topicreadtracker__time_stamp__gte=F('updated')
).values_list('id', flat=True).order_by()
forum_readed = topic.forum.topics.filter(forum__forumreadtracker__user=user,
forum__forumreadtracker__time_stamp__gte=F('updated')
).values_list('id', flat=True).order_by()
not_readed = topic.forum.topics.exclude(id__in=list(forum_readed) + list(topic_readed))
if not not_readed.exists():
# Clear all topic marks for this forum, mark forum as readed
TopicReadTracker.objects.filter(user=user, topic__forum=topic.forum).delete()
forum_mark, new = ForumReadTracker.objects.get_or_create_tracker(forum=topic.forum, user=user)
forum_mark.save()
def get_topic(self, **kwargs):
if 'pk' in kwargs:
topic = get_object_or_404(Topic, pk=kwargs['pk'], post_count__gt=0)
elif ('slug'and 'forum_slug'and 'category_slug') in kwargs:
topic = get_object_or_404(
Topic,
slug=kwargs['slug'],
forum__slug=kwargs['forum_slug'],
forum__category__slug=kwargs['category_slug'],
post_count__gt=0
)
else:
raise Http404(_('This topic does not exists'))
return topic
def get(self, *args, **kwargs):
if defaults.PYBB_NICE_URL and 'pk' in kwargs:
return redirect(self.topic, permanent=defaults.PYBB_NICE_URL_PERMANENT_REDIRECT)
return super(TopicView, self).get(*args, **kwargs)
class PostEditMixin(PybbFormsMixin):
@method_decorator(get_atomic_func())
def post(self, request, *args, **kwargs):
return super(PostEditMixin, self).post(request, *args, **kwargs)
def get_form_class(self):
if perms.may_post_as_admin(self.request.user):
return self.get_admin_post_form_class()
else:
return self.get_post_form_class()
def get_context_data(self, **kwargs):
ctx = super(PostEditMixin, self).get_context_data(**kwargs)
if perms.may_attach_files(self.request.user) and 'aformset' not in kwargs:
ctx['aformset'] = self.get_attachment_formset_class()(
instance=getattr(self, 'object', None)
)
if perms.may_create_poll(self.request.user) and 'pollformset' not in kwargs:
ctx['pollformset'] = self.get_poll_answer_formset_class()(
instance=self.object.topic if getattr(self, 'object', None) else None
)
return ctx
def form_valid(self, form):
success = True
save_attachments = False
save_poll_answers = False
self.object, topic = form.save(commit=False)
if perms.may_attach_files(self.request.user):
aformset = self.get_attachment_formset_class()(
self.request.POST, self.request.FILES, instance=self.object
)
if aformset.is_valid():
save_attachments = True
else:
success = False
else:
aformset = None
if perms.may_create_poll(self.request.user):
pollformset = self.get_poll_answer_formset_class()()
if getattr(self, 'forum', None) or topic.head == self.object:
if topic.poll_type != Topic.POLL_TYPE_NONE:
pollformset = self.get_poll_answer_formset_class()(
self.request.POST, instance=topic
)
if pollformset.is_valid():
save_poll_answers = True
else:
success = False
else:
topic.poll_question = None
topic.poll_answers.all().delete()
else:
pollformset = None
if success:
try:
topic.save()
except ValidationError as e:
success = False
errors = form._errors.setdefault('name', ErrorList())
errors += e.error_list
else:
self.object.topic = topic
self.object.save()
if save_attachments:
aformset.save()
if save_poll_answers:
pollformset.save()
return HttpResponseRedirect(self.get_success_url())
return self.render_to_response(self.get_context_data(form=form,
aformset=aformset,
pollformset=pollformset))
class AddPostView(PostEditMixin, generic.CreateView):
template_name = 'pybb/add_post.html'
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated():
self.user = request.user
else:
if defaults.PYBB_ENABLE_ANONYMOUS_POST:
self.user, new = User.objects.get_or_create(**{username_field: defaults.PYBB_ANONYMOUS_USERNAME})
else:
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
self.forum = None
self.topic = None
if 'forum_id' in kwargs:
self.forum = get_object_or_404(perms.filter_forums(request.user, Forum.objects.all()), pk=kwargs['forum_id'])
if not perms.may_create_topic(self.user, self.forum):
raise PermissionDenied
elif 'topic_id' in kwargs:
self.topic = get_object_or_404(perms.filter_topics(request.user, Topic.objects.all()), pk=kwargs['topic_id'])
if not perms.may_create_post(self.user, self.topic):
raise PermissionDenied
self.quote = ''
if 'quote_id' in request.GET:
try:
quote_id = int(request.GET.get('quote_id'))
except TypeError:
raise Http404
else:
post = get_object_or_404(Post, pk=quote_id)
profile = util.get_pybb_profile(post.user)
self.quote = util._get_markup_quoter(defaults.PYBB_MARKUP)(post.body, profile.get_display_name())
if self.quote and request.is_ajax():
return HttpResponse(self.quote)
return super(AddPostView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
ip = self.request.META.get('REMOTE_ADDR', '')
form_kwargs = super(AddPostView, self).get_form_kwargs()
form_kwargs.update(dict(topic=self.topic, forum=self.forum, user=self.user,
ip=ip, initial={}))
if getattr(self, 'quote', None):
form_kwargs['initial']['body'] = self.quote
if perms.may_post_as_admin(self.user):
form_kwargs['initial']['login'] = getattr(self.user, username_field)
form_kwargs['may_create_poll'] = perms.may_create_poll(self.user)
form_kwargs['may_edit_topic_slug'] = perms.may_edit_topic_slug(self.user)
return form_kwargs
def get_context_data(self, **kwargs):
ctx = super(AddPostView, self).get_context_data(**kwargs)
ctx['forum'] = self.forum
ctx['topic'] = self.topic
return ctx
def get_success_url(self):
if (not self.request.user.is_authenticated()) and defaults.PYBB_PREMODERATION:
return reverse('pybb:index')
return self.object.get_absolute_url()
class EditPostView(PostEditMixin, generic.UpdateView):
model = Post
context_object_name = 'post'
template_name = 'pybb/edit_post.html'
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
return super(EditPostView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
form_kwargs = super(EditPostView, self).get_form_kwargs()
form_kwargs['may_create_poll'] = perms.may_create_poll(self.request.user)
return form_kwargs
def get_object(self, queryset=None):
post = super(EditPostView, self).get_object(queryset)
if not perms.may_edit_post(self.request.user, post):
raise PermissionDenied
return post
class UserView(generic.DetailView):
model = User
template_name = 'pybb/user.html'
context_object_name = 'target_user'
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
return get_object_or_404(queryset, **{username_field: self.kwargs['username']})
def get_context_data(self, **kwargs):
ctx = super(UserView, self).get_context_data(**kwargs)
ctx['topic_count'] = Topic.objects.filter(user=ctx['target_user']).count()
return ctx
class UserPosts(PaginatorMixin, generic.ListView):
model = Post
paginate_by = defaults.PYBB_TOPIC_PAGE_SIZE
template_name = 'pybb/user_posts.html'
def dispatch(self, request, *args, **kwargs):
username = kwargs.pop('username')
self.user = get_object_or_404(**{'klass': User, username_field: username})
return super(UserPosts, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
qs = super(UserPosts, self).get_queryset()
qs = qs.filter(user=self.user)
qs = perms.filter_posts(self.request.user, qs).select_related('topic')
qs = qs.order_by('-created', '-updated', '-id')
return qs
def get_context_data(self, **kwargs):
context = super(UserPosts, self).get_context_data(**kwargs)
context['target_user'] = self.user
return context
class UserTopics(PaginatorMixin, generic.ListView):
model = Topic
paginate_by = defaults.PYBB_FORUM_PAGE_SIZE
template_name = 'pybb/user_topics.html'
def dispatch(self, request, *args, **kwargs):
username = kwargs.pop('username')
self.user = get_object_or_404(User, username=username)
return super(UserTopics, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
qs = super(UserTopics, self).get_queryset()
qs = qs.filter(user=self.user)
qs = perms.filter_topics(self.user, qs)
qs = qs.order_by('-updated', '-created', '-id')
return qs
def get_context_data(self, **kwargs):
context = super(UserTopics, self).get_context_data(**kwargs)
context['target_user'] = self.user
return context
class PostView(RedirectToLoginMixin, generic.RedirectView):
permanent = False
def dispatch(self, request, *args, **kwargs):
self.post = self.get_post(**kwargs)
return super(PostView, self).dispatch(request, *args, **kwargs)
def get_login_redirect_url(self):
return self.post.get_absolute_url()
def get_redirect_url(self, **kwargs):
if not perms.may_view_post(self.request.user, self.post):
raise PermissionDenied
count = self.post.topic.posts.filter(created__lt=self.post.created).count() + 1
page = math.ceil(count / float(defaults.PYBB_TOPIC_PAGE_SIZE))
return '%s?page=%d#post-%d' % (self.post.topic.get_absolute_url(), page, self.post.id)
def get_post(self, **kwargs):
return get_object_or_404(Post, pk=kwargs['pk'])
class ModeratePost(generic.RedirectView):
permanent = False
def get_redirect_url(self, **kwargs):
post = get_object_or_404(Post, pk=self.kwargs['pk'])
if not perms.may_moderate_topic(self.request.user, post.topic):
raise PermissionDenied
post.on_moderation = False
post.save()
return post.get_absolute_url()
class ProfileEditView(generic.UpdateView):
template_name = 'pybb/edit_profile.html'
def get_object(self, queryset=None):
return util.get_pybb_profile(self.request.user)
def get_form_class(self):
if not self.form_class:
from pybb.forms import EditProfileForm
return EditProfileForm
else:
return super(ProfileEditView, self).get_form_class()
@method_decorator(login_required)
@method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
return super(ProfileEditView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
return reverse('pybb:edit_profile')
class DeletePostView(generic.DeleteView):
template_name = 'pybb/delete_post.html'
context_object_name = 'post'
def get_object(self, queryset=None):
post = get_object_or_404(Post.objects.select_related('topic', 'topic__forum'), pk=self.kwargs['pk'])
if not perms.may_delete_post(self.request.user, post):
raise PermissionDenied
self.topic = post.topic
self.forum = post.topic.forum
if not perms.may_moderate_topic(self.request.user, self.topic):
raise PermissionDenied
return post
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
redirect_url = self.get_success_url()
if not request.is_ajax():
return HttpResponseRedirect(redirect_url)
else:
return HttpResponse(redirect_url)
def get_success_url(self):
try:
Topic.objects.get(pk=self.topic.id)
except Topic.DoesNotExist:
return self.forum.get_absolute_url()
else:
if not self.request.is_ajax():
return self.topic.get_absolute_url()
else:
return ""
class TopicActionBaseView(generic.View):
def get_topic(self):
return get_object_or_404(Topic, pk=self.kwargs['pk'])
@method_decorator(login_required)
def get(self, *args, **kwargs):
self.topic = self.get_topic()
self.action(self.topic)
return HttpResponseRedirect(self.topic.get_absolute_url())
class StickTopicView(TopicActionBaseView):
def action(self, topic):
if not perms.may_stick_topic(self.request.user, topic):
raise PermissionDenied
topic.sticky = True
topic.save()
class UnstickTopicView(TopicActionBaseView):
def action(self, topic):
if not perms.may_unstick_topic(self.request.user, topic):
raise PermissionDenied
topic.sticky = False
topic.save()
class CloseTopicView(TopicActionBaseView):
def action(self, topic):
if not perms.may_close_topic(self.request.user, topic):
raise PermissionDenied
topic.closed = True
topic.save()
class OpenTopicView(TopicActionBaseView):
def action(self, topic):
if not perms.may_open_topic(self.request.user, topic):
raise PermissionDenied
topic.closed = False
topic.save()
class TopicPollVoteView(PybbFormsMixin, generic.UpdateView):
model = Topic
http_method_names = ['post', ]
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(TopicPollVoteView, self).dispatch(request, *args, **kwargs)
def get_form_class(self):
return self.get_poll_form_class()
def get_form_kwargs(self):
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs['topic'] = self.object
return kwargs
def form_valid(self, form):
# already voted
if not perms.may_vote_in_topic(self.request.user, self.object) or \
not pybb_topic_poll_not_voted(self.object, self.request.user):
return HttpResponseForbidden()
answers = form.cleaned_data['answers']
for answer in answers:
# poll answer from another topic
if answer.topic != self.object:
return HttpResponseBadRequest()
PollAnswerUser.objects.create(poll_answer=answer, user=self.request.user)
return super(ModelFormMixin, self).form_valid(form)
def form_invalid(self, form):
return redirect(self.object)
def get_success_url(self):
return self.object.get_absolute_url()
@login_required
def topic_cancel_poll_vote(request, pk):
topic = get_object_or_404(Topic, pk=pk)
PollAnswerUser.objects.filter(user=request.user, poll_answer__topic_id=topic.id).delete()
return HttpResponseRedirect(topic.get_absolute_url())
@login_required
def delete_subscription(request, topic_id):
topic = get_object_or_404(perms.filter_topics(request.user, Topic.objects.all()), pk=topic_id)
topic.subscribers.remove(request.user)
return HttpResponseRedirect(topic.get_absolute_url())
@login_required
def add_subscription(request, topic_id):
topic = get_object_or_404(perms.filter_topics(request.user, Topic.objects.all()), pk=topic_id)
if not perms.may_subscribe_topic(request.user, topic):
raise PermissionDenied
topic.subscribers.add(request.user)
return HttpResponseRedirect(topic.get_absolute_url())
@login_required
def post_ajax_preview(request):
content = request.POST.get('data')
html = util._get_markup_formatter()(content)
return render(request, 'pybb/_markitup_preview.html', {'html': html})
@login_required
def mark_all_as_read(request):
for forum in perms.filter_forums(request.user, Forum.objects.all()):
forum_mark, new = ForumReadTracker.objects.get_or_create_tracker(forum=forum, user=request.user)
forum_mark.save()
TopicReadTracker.objects.filter(user=request.user).delete()
msg = _('All forums marked as read')
messages.success(request, msg, fail_silently=True)
return redirect(reverse('pybb:index'))
@login_required
@require_POST
def block_user(request, username):
user = get_object_or_404(User, **{username_field: username})
if not perms.may_block_user(request.user, user):
raise PermissionDenied
user.is_active = False
user.save()
if 'block_and_delete_messages' in request.POST:
# individually delete each post and empty topic to fire method
# with forum/topic counters recalculation
posts = Post.objects.filter(user=user)
topics = posts.values('topic_id').distinct()
forums = posts.values('topic__forum_id').distinct()
posts.delete()
Topic.objects.filter(user=user).delete()
for t in topics:
try:
Topic.objects.get(id=t['topic_id']).update_counters()
except Topic.DoesNotExist:
pass
for f in forums:
try:
Forum.objects.get(id=f['topic__forum_id']).update_counters()
except Forum.DoesNotExist:
pass
msg = _('User successfuly blocked')
messages.success(request, msg, fail_silently=True)
return redirect('pybb:index')
@login_required
@require_POST
def unblock_user(request, username):
user = get_object_or_404(User, **{username_field: username})
if not perms.may_block_user(request.user, user):
raise PermissionDenied
user.is_active = True
user.save()
msg = _('User successfuly unblocked')
messages.success(request, msg, fail_silently=True)
return redirect('pybb:index')
| |
import contextlib
import os
import pwd
import subprocess
import sys
import time
from unittest import mock
from tornado.testing import AsyncTestCase
from jupyterhub import orm
from remoteappmanager.jupyterhub.spawners import (
SystemUserSpawner,
VirtualUserSpawner)
from remoteappmanager.tests import fixtures
from remoteappmanager.tests.temp_mixin import TempMixin
@contextlib.contextmanager
def spawner_start_and_stop(io_loop, spawner):
try:
io_loop.run_sync(spawner.start)
# Wait for the process to get to the while loop
time.sleep(1)
yield
finally:
io_loop.run_sync(spawner.stop)
def username():
"""Returns the current username"""
return pwd.getpwuid(os.getuid()).pw_name
def new_spawner(spawner_class):
""" Create a new spawner from a given Spawner class
"""
# Server for the user and the hub
generic_server = orm.Server(
proto="http",
ip="127.0.0.2",
port=31337,
base_url="/"
)
# Mock db
db = mock.Mock()
db.query = mock.Mock()
db.query().first = mock.Mock(
return_value=orm.Proxy(
auth_token="whatever",
api_server=orm.Server(proto="http",
ip="127.0.0.1",
port=12345,
base_url="/foo/bar/")))
# Mock user
user = mock.Mock()
user.name = username()
user.admin = False
user.state = None
user.server = generic_server
# Mock hub
hub = orm.Hub(server=generic_server)
# Mock authenticator
authenticator = mock.Mock()
authenticator.logout_url = mock.Mock(
return_value='/logout_test')
authenticator.login_service = 'TEST'
return spawner_class(
db=db, user=user, hub=hub, authenticator=authenticator)
class TestSystemUserSpawner(TempMixin, AsyncTestCase):
def setUp(self):
super().setUp()
self.spawner = new_spawner(SystemUserSpawner)
def test_args(self):
path = fixtures.get("remoteappmanager_config.py")
self.spawner.config_file_path = path
args = self.spawner.get_args()
self.assertIn("--proxy-api-url=http://127.0.0.1:12345/foo/bar/", args)
self.assertIn("--config-file={}".format(path), args)
self.assertIn("--base-urlpath=\"/\"", args)
def test_args_without_config_file_path(self):
args = self.spawner.get_args()
self.assertIn("--proxy-api-url=http://127.0.0.1:12345/foo/bar/", args)
self.assertFalse(any("--config-file=" in arg for arg in args))
self.assertIn("--base-urlpath=\"/\"", args)
def test_cmd(self):
self.assertEqual(self.spawner.cmd, ['remoteappmanager'])
def test_default_config_file_path(self):
self.assertEqual(self.spawner.config_file_path, "")
def test_env(self):
env = self.spawner.get_env()
self.assertIn("PROXY_API_TOKEN", env)
self.assertEqual(env["PROXY_API_TOKEN"], "whatever")
def test_env_has_docker_vars(self):
if "DOCKER_HOST" in os.environ:
env = self.spawner.get_env()
self.assertIn("DOCKER_HOST", env)
self.assertIn("DOCKER_CERT_PATH", env)
self.assertIn("DOCKER_MACHINE_NAME", env)
def test_cmd_spawning(self):
env = os.environ.copy()
env["PROXY_API_TOKEN"] = "dummy_token"
path = fixtures.get("remoteappmanager_config.py")
self.spawner.config_file_path = path
args = self.spawner.get_args()
try:
with self.assertRaises(subprocess.TimeoutExpired):
subprocess.check_output(
self.spawner.cmd + args,
timeout=2,
env=env,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
print("Output of the command:\n\n{}".format(
exc.output.decode(sys.getdefaultencoding())))
raise
def test_spawner_start_and_stop_with_config_file(self):
path = fixtures.get("remoteappmanager_config.py")
self.spawner.config_file_path = path
with spawner_start_and_stop(self.io_loop, self.spawner):
status = self.io_loop.run_sync(self.spawner.poll)
self.assertIsNone(status)
status = self.io_loop.run_sync(self.spawner.poll)
self.assertEqual(status, 1)
def test_spawner_start_and_stop_without_config_file(self):
with spawner_start_and_stop(self.io_loop, self.spawner):
status = self.io_loop.run_sync(self.spawner.poll)
self.assertIsNone(status)
status = self.io_loop.run_sync(self.spawner.poll)
self.assertEqual(status, 1)
class TestSystemUserSpawnerAsAdmin(TestSystemUserSpawner):
# We expect the same tests above to pass.
# admin is a full replacement application that should accept and behave
# exactly in the same way.
def setUp(self):
super().setUp()
self.spawner.user.admin = True
def test_cmd(self):
self.assertEqual(self.spawner.cmd, ['remoteappadmin'])
class TestVirtualUserSpawner(TestSystemUserSpawner):
def setUp(self):
super().setUp()
self.spawner = new_spawner(VirtualUserSpawner)
def test_spawner_without_workspace_dir(self):
with spawner_start_and_stop(self.io_loop, self.spawner):
status = self.io_loop.run_sync(self.spawner.poll)
self.assertIsNone(status)
# spawner.worspace_dir is not defined
# no temporary directory is created
self.assertFalse(os.listdir(self.tempdir))
status = self.io_loop.run_sync(self.spawner.poll)
self.assertEqual(status, 1)
def test_spawner_with_workspace_dir(self):
self.spawner.workspace_dir = self.tempdir
with spawner_start_and_stop(self.io_loop, self.spawner):
status = self.io_loop.run_sync(self.spawner.poll)
self.assertIsNone(status)
# There should be a temporary directory created
# and it should be assigned to _virtual_workspace
virtual_directory = self.spawner._virtual_workspace
self.assertIn(os.path.basename(virtual_directory),
os.listdir(self.tempdir))
self.assertIn(os.path.basename(virtual_directory),
os.listdir(self.tempdir))
status = self.io_loop.run_sync(self.spawner.poll)
self.assertEqual(status, 1)
def test_spawner_with_workspace_dir_already_existent(self):
self.spawner.workspace_dir = self.tempdir
os.mkdir(os.path.join(self.tempdir, username()))
with spawner_start_and_stop(self.io_loop, self.spawner):
status = self.io_loop.run_sync(self.spawner.poll)
self.assertIsNone(status)
# There should be a temporary directory created
# and it should be assigned to _virtual_workspace
virtual_directory = self.spawner._virtual_workspace
self.assertIn(os.path.basename(virtual_directory),
os.listdir(self.tempdir))
self.assertIn(os.path.basename(virtual_directory),
os.listdir(self.tempdir))
def test_spawner_with_workspace_dir_as_file(self):
self.spawner.workspace_dir = self.tempdir
with open(os.path.join(self.tempdir, username()), 'w'):
pass
with spawner_start_and_stop(self.io_loop, self.spawner):
self.assertIsNone(self.spawner.get_env().get('HOME'))
def test_env_has_proxy_api_token(self):
env = self.spawner.get_env()
self.assertIn("PROXY_API_TOKEN", env)
self.assertEqual(env["PROXY_API_TOKEN"], "whatever")
def test_env_has_docker_vars(self):
if "DOCKER_HOST" in os.environ:
env = self.spawner.get_env()
self.assertIn("DOCKER_HOST", env)
self.assertIn("DOCKER_CERT_PATH", env)
self.assertIn("DOCKER_MACHINE_NAME", env)
def test_env_has_home_if_workspace_defined(self):
self.spawner.workspace_dir = self.tempdir
with spawner_start_and_stop(self.io_loop, self.spawner):
home = self.spawner.get_env().get('HOME')
self.assertEqual(home, self.spawner._virtual_workspace)
def test_home_not_in_env_if_workspace_undefined(self):
with spawner_start_and_stop(self.io_loop, self.spawner):
self.assertIsNone(self.spawner.get_env().get('HOME'))
def test_state_if_workspace_defined(self):
self.spawner.workspace_dir = self.tempdir
with spawner_start_and_stop(self.io_loop, self.spawner):
state = self.spawner.get_state()
self.assertIn('virtual_workspace', state)
self.assertIn(self.tempdir, state.get('virtual_workspace'))
def test_state_if_workspace_not_defined(self):
with spawner_start_and_stop(self.io_loop, self.spawner):
state = self.spawner.get_state()
self.assertNotIn('virtual_workspace', state)
def test_start_if_workspace_path_not_exists(self):
self.spawner.workspace_dir = '/no_way/this_exists'
with spawner_start_and_stop(self.io_loop, self.spawner):
# Started running
status = self.io_loop.run_sync(self.spawner.poll)
self.assertIsNone(status)
# Stopped running
status = self.io_loop.run_sync(self.spawner.poll)
self.assertEqual(status, 1)
class TestVirtualUserSpawnerAsAdmin(TestSystemUserSpawner):
def setUp(self):
super().setUp()
self.spawner.user.admin = True
def test_cmd(self):
self.assertEqual(self.spawner.cmd, ['remoteappadmin'])
| |
import logging
from io import BytesIO
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from rest_framework.exceptions import PermissionDenied
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from ambulance.models import Ambulance, CallStatus, AmbulanceCallStatus, AmbulanceCall, Waypoint
from ambulance.models import Call
from ambulance.serializers import AmbulanceSerializer, AmbulanceUpdateSerializer, WaypointSerializer
from equipment.models import EquipmentItem
from equipment.serializers import EquipmentItemSerializer
from hospital.models import Hospital
from hospital.serializers import HospitalSerializer
from login.models import Client, ClientLog, ClientStatus, ClientActivity
from login.permissions import cache_clear
from .client import BaseClient
logger = logging.getLogger(__name__)
# Parse exception
class ParseException(Exception):
pass
# Client exception
class ClientException(Exception):
pass
# SubscribeClient
class SubscribeClient(BaseClient):
# The callback for when the client receives a CONNACK
# response from the server.
def on_connect(self, client, userdata, flags, rc):
# is connected?
if not super().on_connect(client, userdata, flags, rc):
return False
# Subscribing in on_connect() means that if we lose the
# connection and reconnect then subscriptions will be renewed.
# client.subscribe('#', 2)
# message handler
self.client.message_callback_add('message',
self.on_message)
# ambulance handler
self.client.message_callback_add('user/+/client/+/ambulance/+/data',
self.on_ambulance)
# # client ambulance status handler
# self.client.message_callback_add('user/+/client/+/ambulance/+/status',
# self.on_client_ambulance_status)
# hospital handler
self.client.message_callback_add('user/+/client/+/hospital/+/data',
self.on_hospital)
# hospital equipment handler
self.client.message_callback_add('user/+/client/+/equipment/+/item/+/data',
self.on_equipment_item)
# client status handler
self.client.message_callback_add('user/+/client/+/status',
self.on_client_status)
# ambulance call handler
self.client.message_callback_add('user/+/client/+/ambulance/+/call/+/status',
self.on_call_ambulance)
# ambulance call waypoint handler
self.client.message_callback_add('user/+/client/+/ambulance/+/call/+/waypoint/+/data',
self.on_call_ambulance_waypoint)
# subscribe
self.subscribe('message', 2)
self.subscribe('user/+/client/+/ambulance/+/data', 2)
# self.subscribe('user/+/client/+/ambulance/+/status', 2)
self.subscribe('user/+/client/+/hospital/+/data', 2)
self.subscribe('user/+/client/+/equipment/+/item/+/data', 2)
self.subscribe('user/+/client/+/status', 2)
self.subscribe('user/+/client/+/ambulance/+/call/+/status', 2)
self.subscribe('user/+/client/+/ambulance/+/call/+/waypoint/+/data', 2)
if self.verbosity > 0:
self.stdout.write(self.style.SUCCESS(">> Listening to MQTT messages..."))
return True
def send_error_message(self, username, client, topic, payload, error, qos=2):
logger.debug("send_error_message: {}, '{}:{}': '{}'".format(username,
topic,
payload,
error))
if self.verbosity > 0:
self.stdout.write(self.style.ERROR("*> Error {}, '{}:{}': {}".format(username,
topic,
payload,
error)))
try:
message = JSONRenderer().render({
'topic': topic,
'payload': payload,
'error': str(error)
})
self.publish('user/{}/client/{}/error'.format(username, client.client_id), message, qos=qos)
except Exception as e:
logger.warning(('mqtt.SubscribeClient: {}, ' +
"topic = '{}:{}', " +
"error = '{}', " +
"exception = {}").format(username,
topic,
payload,
error,
e))
def parse_topic(self, msg, expect, json=True, new_client=False):
# empty payload ?
if not msg.payload:
raise ParseException('Empty payload')
if self.verbosity > 0:
self.stdout.write(self.style.SUCCESS(" > Parsing message '{}:{}'".format(msg.topic,
msg.payload)))
# empty topic?
if not msg.topic:
raise ParseException('Empty topic')
# parse topic
values = msg.topic.split('/')
# not enough topics?
min_size = expect * 2 - 1
if len(values) < expect * 2 - 1:
raise ParseException('Topic with less than size {}'.format(min_size))
username = '__unknown__'
try:
# retrieve user
username = values[1]
# print(User.objects.all())
user = User.objects.get(username=values[1])
except User.DoesNotExist as e:
# does not know username
# cannot send error message to user
logger.warning(('mqtt.SubscribeClient: {}, ' +
"topic = '{}:{}', " +
"exception = {}").format(username,
msg.topic,
msg.payload,
e))
raise ParseException('User does not exist')
try:
# retrieve client
client = Client.objects.get(client_id=values[3])
except Client.DoesNotExist as e:
if not new_client:
# does not know client_id
# cannot send error message to user
logger.warning(('mqtt.SubscribeClient: {}, ' +
"topic = '{}:{}', " +
"exception = {}").format(username,
msg.topic,
msg.payload,
e))
raise ParseException('Client does not exist')
else:
# create new client
client = Client(client_id=values[3], user=user)
if json:
# parse data
try:
# Parse data into json dict
data = JSONParser().parse(BytesIO(msg.payload))
except Exception as e:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"JSON formatted incorrectly")
raise ParseException('JSON formatted incorrectly: {}'.format(e))
else:
data = msg.payload.decode()
if expect == 3 and len(values) == 5:
return user, client, data
elif expect == 4 and len(values) == 7:
return user, client, data, values[5]
elif expect == 5 and len(values) == 9:
return user, client, data, values[5], values[7]
elif expect == 6 and len(values) == 11:
return user, client, data, values[5], values[7], values[9]
else:
# send error message to user
# this should never happen because no subscriptions will match
# topics with different sizes
self.send_error_message(user, client, msg.topic, msg.payload,
"Invalid topic")
raise ParseException('Invalid topic {}'.format(msg.topic))
# Update ambulance
def on_ambulance(self, clnt, userdata, msg):
try:
logger.debug("on_ambulance: msg = '{}'".format(msg.topic, msg.payload))
# parse topic
user, client, data, ambulance_id = self.parse_topic(msg, 4)
except Exception as e:
logger.debug("on_ambulance: ParseException '{}'".format(e))
return
try:
# retrieve ambulance
logger.debug('ambulance_id = {}'.format(ambulance_id))
ambulance = Ambulance.objects.get(id=ambulance_id)
except Ambulance.DoesNotExist:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Ambulance with id '{}' does not exist".format(ambulance_id))
return
except Exception as e:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception: '{}'".format(e))
return
try:
logger.debug("on_ambulance: ambulance = '{}', data = '{}'".format(ambulance, data))
# updates must match client
if client.ambulance != ambulance:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Client '{}' is not currently authorized to update ambulance '{}'"
.format(client.client_id, ambulance.identifier))
return
is_valid = False
if isinstance(data, (list, tuple)):
# update ambulances in bulk
serializer = AmbulanceUpdateSerializer(data=data,
many=True,
partial=True)
if serializer.is_valid():
# save to database
serializer.save(ambulance=ambulance, updated_by=user)
is_valid = True
else:
# update ambulance
serializer = AmbulanceSerializer(ambulance,
data=data,
partial=True)
if serializer.is_valid():
# save to database
serializer.save(updated_by=user)
is_valid = True
if not is_valid:
logger.debug('on_ambulance: INVALID serializer')
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
serializer.errors)
except Exception as e:
logger.debug('on_ambulance: EXCEPTION')
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception '{}'".format(e))
logger.debug('on_ambulance: DONE')
# Update hospital
def on_hospital(self, clnt, userdata, msg):
try:
logger.debug("on_hospital: msg = '{}:{}'".format(msg.topic, msg.payload))
# parse topic
user, client, data, hospital_id = self.parse_topic(msg, 4)
except Exception as e:
logger.debug("on_hospital: ParseException '{}'".format(e))
return
try:
# retrieve hospital
hospital = Hospital.objects.get(id=hospital_id)
except Hospital.DoesNotExist:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Hospital with id '{}' does not exist".format(hospital_id))
return
except Exception as e:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception: '{}'".format(e))
return
try:
logger.debug('on_hospital: hospital = {}'.format(hospital))
# updates must match client
if client.hospital != hospital:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Client '{}' is not currently authorized to update hospital '{}'"
.format(client.client_id, hospital.name))
return
# update hospital
serializer = HospitalSerializer(hospital,
data=data,
partial=True)
if serializer.is_valid():
logger.debug('on_hospital: valid serializer')
# save to database
serializer.save(updated_by=user)
else:
logger.debug('on_hospital: INVALID serializer')
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
serializer.errors)
except Exception as e:
logger.debug('on_hospital: EXCEPTION')
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception '{}'".format(e))
logger.debug('on_hospital: DONE')
# Update equipment
def on_equipment_item(self, clnt, userdata, msg):
try:
logger.debug("on_equipment_item: msg = '{}:{}'".format(msg.topic, msg.payload))
# parse topic
user, client, data, equipmentholder_id, equipment_id = self.parse_topic(msg, 5)
except Exception as e:
logger.debug("on_equipment_item: ParseException '{}'".format(e))
return
try:
# retrieve hospital equipment
equipment_item = EquipmentItem.objects.get(equipmentholder_id=equipmentholder_id,
equipment_id=equipment_id)
except EquipmentItem.DoesNotExist:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Equipment with equipmentholder id '{}' and equipment id '{}' does not exist".format(
equipmentholder_id, equipment_id))
return
except Exception as e:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception: '{}'".format(e))
return
try:
logger.debug('on_equipment_item: equipment = {}'.format(equipment_item))
# update hospital equipment
serializer = EquipmentItemSerializer(equipment_item,
data=data,
partial=True)
if serializer.is_valid():
logger.debug('on_equipment_item: valid serializer')
# save to database
serializer.save(updated_by=user)
else:
logger.debug('on_equipment_item: INVALID serializer')
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
serializer.errors)
except Exception as e:
logger.debug('on_equipment_item: EXCEPTION')
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception '{}'".format(e))
logger.debug('on_equipment_item: DONE')
# update client information
def on_client_status(self, clnt, userdata, msg):
try:
logger.debug("on_client_status: msg = '{}:{}'".format(msg.topic, msg.payload))
# parse topic
user, client, data = self.parse_topic(msg, 3, json=False, new_client=True)
except Exception as e:
logger.debug("on_client_status: ParseException '{}'".format(e))
return
try:
# is client online?
if client.status != ClientStatus.O.name:
# client is not online
logger.debug('Client "" is not online'.format(client.client_id))
# send warning message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Warning: client '{}' is not online".format(client.client_id))
try:
# handle status
status = ClientStatus[data]
except ValueError as e:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"status '{}' is not valid".format(data))
return
logger.debug('on_client_status: status = ' + status.name)
# create or modify client
client.status = status.name
client.save()
except Exception as e:
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception '{}'".format(e))
# client is not online
logger.debug('on_client_status: done')
# handle calls
def on_call_ambulance(self, clnt, userdata, msg):
try:
logger.debug("on_call_ambulance: msg = '{}:{}'".format(msg.topic, msg.payload))
# parse topic
user, client, status, ambulance_id, call_id = self.parse_topic(msg, 5, json=False)
except Exception as e:
logger.debug("on_call_ambulance: ParseException '{}".format(e))
return
try:
ambulance = Ambulance.objects.get(id=ambulance_id)
call = Call.objects.get(id=call_id)
status = AmbulanceCallStatus[status]
except Ambulance.DoesNotExist:
self.send_error_message(user, client, msg.topic, msg.payload,
"Ambulance with id '{}' does not exist".format(ambulance_id))
return
except Call.DoesNotExist:
self.send_error_message(user, client, msg.topic, msg.payload,
"Call with id '{}' does not exist".format(call_id))
return
except Exception as e:
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception: '{}'".format(e))
return
try:
# Is call ended?
if call.status == CallStatus.E.name:
self.send_error_message(user, client, msg.topic, msg.payload,
"Call with id '{}' already ended".format(call_id))
return
try:
# Is ambulance part of this call?
ambulancecall = call.ambulancecall_set.get(ambulance_id=ambulance.id)
except AmbulanceCall.DoesNotExist:
self.send_error_message(user, client, msg.topic, msg.payload,
"Ambulance with id '{}' is not part of call '{}'".format(ambulance_id, call_id))
return
if status == AmbulanceCallStatus.A:
# change ambulancecall status to accepted
ambulancecall.status = AmbulanceCallStatus.A.name
elif status == AmbulanceCallStatus.D:
# change ambulancecall status to decline
ambulancecall.status = AmbulanceCallStatus.D.name
elif status == AmbulanceCallStatus.S:
# change ambulancecall status to suspended
ambulancecall.status = AmbulanceCallStatus.S.name
elif status == AmbulanceCallStatus.C:
# change ambulance status to completed
ambulancecall.status = AmbulanceCallStatus.C.name
else:
self.send_error_message(user, client, msg.topic, msg.payload,
"Invalid status '{}'".format(status))
return
# save changes
ambulancecall.save()
except Exception as e:
logger.debug('on_call_ambulance: ambulance EXCEPTION')
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception: '{}'".format(e))
logger.debug('on_call_ambulance: DONE')
# handle calls waypoints
def on_call_ambulance_waypoint(self, clnt, userdata, msg):
try:
logger.debug("on_call_ambulance_waypoint: msg = '{}:{}'".format(msg.topic, msg.payload))
# parse topic
user, client, data, ambulance_id, call_id, waypoint_id = self.parse_topic(msg, 6)
waypoint_id = int(waypoint_id)
except Exception as e:
logger.debug("on_call_ambulance_waypoint: ParseException '{}".format(e))
return
try:
ambulance_call = AmbulanceCall.objects.get(ambulance__pk=ambulance_id, call__pk=call_id)
except Ambulance.DoesNotExist:
self.send_error_message(user, client, msg.topic, msg.payload,
"Ambulance with id '{}' does not exist".format(ambulance_id))
return
except Call.DoesNotExist:
self.send_error_message(user, client, msg.topic, msg.payload,
"Call with id '{}' does not exist".format(call_id))
return
except Exception as e:
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception: '{}'".format(e))
return
try:
try:
if waypoint_id > 0:
# waypoint exists, update
logger.debug('will update waypoint')
# retrieve serializer
waypoint = Waypoint.objects.get(pk=waypoint_id)
# update waypoint
serializer = WaypointSerializer(waypoint,
data=data,
partial=True)
else:
# waypoint does not exist, create
logger.debug('will create waypoint')
# create waypoint
data['ambulance_call_id'] = ambulance_call.id
serializer = WaypointSerializer(data=data)
except Waypoint.DoesNotExist:
logger.debug('on_call_ambulance_waypoint: INVALID waypoint id')
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Waypoint with id '{}' does not exist".format(waypoint_id))
return
except Exception as e:
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception: '{}'".format(e))
return
if serializer.is_valid():
logger.debug('on_call_ambulance_waypoint: valid serializer')
if waypoint_id > 0:
# save to database
serializer.save(updated_by=user, publish=True)
else:
# save to database
serializer.save(updated_by=user, ambulance_call_id=ambulance_call.id, publish=True)
else:
logger.debug('on_call_ambulance_waypoint: INVALID serializer')
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
serializer.errors)
except Exception as e:
logger.debug('on_call_ambulance_waypoint: EXCEPTION')
# send error message to user
self.send_error_message(user, client, msg.topic, msg.payload,
"Exception: '{}'".format(e))
logger.debug('on_call_ambulance_waypoint: DONE')
# handle message
def on_message(self, clnt, userdata, msg):
try:
logger.debug("on_message: msg = '{}'".format(msg.topic, msg.payload))
if self.verbosity > 0:
self.stdout.write(self.style.SUCCESS(" > Parsing message '{}:{}'".format(msg.topic,
msg.payload)))
# Parse message
data = msg.payload.decode()
except Exception as e:
logger.debug("on_message: ParseException '{}'".format(e))
return
try:
if data == '"cache_clear"':
# call cache clear
cache_clear()
if self.verbosity > 0:
self.stdout.write(self.style.SUCCESS(" > Clearing cache"))
else:
logger.debug("on_message: unknown message '{}'".format(data))
except Exception as e:
logger.debug('on_message: EXCEPTION: {}'.format(e))
| |
# -*- coding: utf-8 -*-
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wagtailcore", "0020_add_index_on_page_first_published_at"),
]
operations = [
migrations.AlterField(
model_name="grouppagepermission",
name="group",
field=models.ForeignKey(
on_delete=models.CASCADE,
related_name="page_permissions",
to="auth.Group",
verbose_name="group",
),
),
migrations.AlterField(
model_name="grouppagepermission",
name="page",
field=models.ForeignKey(
on_delete=models.CASCADE,
related_name="group_permissions",
to="wagtailcore.Page",
verbose_name="page",
),
),
migrations.AlterField(
model_name="grouppagepermission",
name="permission_type",
field=models.CharField(
max_length=20,
verbose_name="permission type",
choices=[
("add", "Add/edit pages you own"),
("edit", "Edit any page"),
("publish", "Publish any page"),
("lock", "Lock/unlock any page"),
],
),
),
migrations.AlterField(
model_name="page",
name="content_type",
field=models.ForeignKey(
on_delete=models.CASCADE,
related_name="pages",
to="contenttypes.ContentType",
verbose_name="content type",
),
),
migrations.AlterField(
model_name="page",
name="expire_at",
field=models.DateTimeField(
null=True,
blank=True,
help_text="Please add a date-time in the form YYYY-MM-DD hh:mm.",
verbose_name="expiry date/time",
),
),
migrations.AlterField(
model_name="page",
name="expired",
field=models.BooleanField(
default=False, editable=False, verbose_name="expired"
),
),
migrations.AlterField(
model_name="page",
name="first_published_at",
field=models.DateTimeField(
null=True,
db_index=True,
editable=False,
verbose_name="first published at",
),
),
migrations.AlterField(
model_name="page",
name="go_live_at",
field=models.DateTimeField(
null=True,
blank=True,
help_text="Please add a date-time in the form YYYY-MM-DD hh:mm.",
verbose_name="go live date/time",
),
),
migrations.AlterField(
model_name="page",
name="has_unpublished_changes",
field=models.BooleanField(
default=False, editable=False, verbose_name="has unpublished changes"
),
),
migrations.AlterField(
model_name="page",
name="latest_revision_created_at",
field=models.DateTimeField(
null=True, editable=False, verbose_name="latest revision created at"
),
),
migrations.AlterField(
model_name="page",
name="live",
field=models.BooleanField(
default=True, editable=False, verbose_name="live"
),
),
migrations.AlterField(
model_name="page",
name="locked",
field=models.BooleanField(
default=False, editable=False, verbose_name="locked"
),
),
migrations.AlterField(
model_name="page",
name="owner",
field=models.ForeignKey(
blank=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="owned_pages",
null=True,
to=settings.AUTH_USER_MODEL,
editable=False,
verbose_name="owner",
),
),
migrations.AlterField(
model_name="page",
name="search_description",
field=models.TextField(blank=True, verbose_name="search description"),
),
migrations.AlterField(
model_name="page",
name="seo_title",
field=models.CharField(
max_length=255,
blank=True,
help_text="Optional. 'Search Engine Friendly' title. This will appear at the top of the browser window.",
verbose_name="page title",
),
),
migrations.AlterField(
model_name="page",
name="show_in_menus",
field=models.BooleanField(
default=False,
help_text="Whether a link to this page will appear in automatically generated menus",
verbose_name="show in menus",
),
),
migrations.AlterField(
model_name="page",
name="slug",
field=models.SlugField(
max_length=255,
verbose_name="slug",
help_text="The name of the page as it will appear in URLs e.g http://domain.com/blog/[my-slug]/",
),
),
migrations.AlterField(
model_name="page",
name="title",
field=models.CharField(
max_length=255,
help_text="The page title as you'd like it to be seen by the public",
verbose_name="title",
),
),
migrations.AlterField(
model_name="pagerevision",
name="approved_go_live_at",
field=models.DateTimeField(
null=True, blank=True, verbose_name="approved go live at"
),
),
migrations.AlterField(
model_name="pagerevision",
name="content_json",
field=models.TextField(verbose_name="content JSON"),
),
migrations.AlterField(
model_name="pagerevision",
name="created_at",
field=models.DateTimeField(verbose_name="created at"),
),
migrations.AlterField(
model_name="pagerevision",
name="page",
field=models.ForeignKey(
on_delete=models.CASCADE,
related_name="revisions",
to="wagtailcore.Page",
verbose_name="page",
),
),
migrations.AlterField(
model_name="pagerevision",
name="submitted_for_moderation",
field=models.BooleanField(
default=False, db_index=True, verbose_name="submitted for moderation"
),
),
migrations.AlterField(
model_name="pagerevision",
name="user",
field=models.ForeignKey(
on_delete=models.CASCADE,
blank=True,
null=True,
to=settings.AUTH_USER_MODEL,
verbose_name="user",
),
),
migrations.AlterField(
model_name="pageviewrestriction",
name="page",
field=models.ForeignKey(
on_delete=models.CASCADE,
related_name="view_restrictions",
to="wagtailcore.Page",
verbose_name="page",
),
),
migrations.AlterField(
model_name="pageviewrestriction",
name="password",
field=models.CharField(max_length=255, verbose_name="password"),
),
migrations.AlterField(
model_name="site",
name="hostname",
field=models.CharField(
max_length=255, db_index=True, verbose_name="hostname"
),
),
migrations.AlterField(
model_name="site",
name="is_default_site",
field=models.BooleanField(
default=False,
help_text="If true, this site will handle requests for all other hostnames that do not have a site entry of their own",
verbose_name="is default site",
),
),
migrations.AlterField(
model_name="site",
name="port",
field=models.IntegerField(
default=80,
help_text="Set this to something other than 80 if you need a specific port number to appear in URLs (e.g. development on port 8000). Does not affect request handling (so port forwarding still works).",
verbose_name="port",
),
),
migrations.AlterField(
model_name="site",
name="root_page",
field=models.ForeignKey(
on_delete=models.CASCADE,
related_name="sites_rooted_here",
to="wagtailcore.Page",
verbose_name="root page",
),
),
]
| |
import json
import certifi
import pycurl
from django.core import mail
from django.test import TestCase
from django.test.client import RequestFactory
from mock import MagicMock
from mock import PropertyMock
from mock import patch
from mock import call
from tests.utils.factories import ServiceFactory, LogFactory, NotificationFactory
from ftp_deploy.conf import *
from ftp_deploy.models import Log
from ftp_deploy.utils.decorators import check
from ftp_deploy.utils.core import absolute_url, LockError, service_check, bitbucket_check
from ftp_deploy.utils.repo import commits_parser, repository_api
from ftp_deploy.utils.email import notification_success, notification_fail
from ftp_deploy.utils.ftp import ftp_check, ftp_connection
from ftp_deploy.utils.curl import curl_connection
from ftp_deploy.models.service import Service
class UtilsDecoratorCheckTest(TestCase):
def test_utils_decorator_check_exception_output(self):
"""check decorator return tuple in format (True, '<b>prefix:</b> Exception message') if raise exception"""
check_ = check('prefix')
output = check_(self.function_raise_exception)()
self.assertEqual(output, (True, '<b>prefix:</b> Exception message'))
def test_utils_decorator_check_no_exception_output(self):
"""check decorator return tuple in format (False,'') if no exception"""
check_ = check('prefix')
output = check_(self.function_not_raise_exception)()
self.assertEqual(output, (False, ''))
def function_raise_exception(self):
raise Exception("Exception message")
def function_not_raise_exception(self):
pass
class UtilsRepoCommitParserTest(TestCase):
def setUp(self):
self.service = ServiceFactory()
log = LogFactory(service=self.service)
payload = json.loads(log.payload)
self.data = commits_parser(payload['commits'], self.service.repo_source)
def test_commit_parser_file_diff(self):
"""file_diff return files information in format - files_added, files_modified, files_removed"""
files_added, files_modified, files_removed = self.data.file_diff()
self.assertEqual(files_added[0], 'example/file2.txt')
self.assertEqual(files_modified[0], 'example/file1.txt')
self.assertEqual(files_removed[0], 'example/file3.txt')
self.assertNotIn('example/file4.txt', files_removed)
def test_commit_parser_commits_info(self):
"""commits_info return commits info in format [['message','username','raw_node'],]"""
commits_info = self.data.commits_info()
self.assertEqual(commits_info, [[u'test message commit 2', u'username', u'57baa5c89daef238c2043c7e866c2e997d681876'], [
u'test message commit 1', u'username', u'57baa5c89daef238c2043c7e866c2e997d681871']])
def test_commit_parser_email_list_return_list_of_emails_from_commits(self):
email_list = self.data.email_list()
self.assertEqual(email_list, [u'author@email.com'])
class UtilsRepoAPI(TestCase):
def setUp(self):
self.service_bb = ServiceFactory()
self.service_gh = ServiceFactory(repo_source='gh')
@patch('ftp_deploy.utils.repo.curl_connection')
def test_repo_init_bb_perform_curl_initialization(self, mock_curl_connection):
api = repository_api(self.service_bb.repo_source)
mock_curl_connection.assert_called_once_with(BITBUCKET_SETTINGS['username'], BITBUCKET_SETTINGS['password'])
mock_curl_connection.assert_has_calls(call().authenticate())
@patch('ftp_deploy.utils.repo.curl_connection')
def test_repo_init_gh_perform_curl_initialization(self, mock_curl_connection):
api = repository_api(self.service_gh.repo_source)
mock_curl_connection.assert_called_once_with(GITHUB_SETTINGS['username'], GITHUB_SETTINGS['password'])
mock_curl_connection.assert_has_calls(call().authenticate())
@patch('ftp_deploy.utils.repo.curl_connection')
def test_repo_repositories_bb_use_proper_url(self, mock_curl_connection):
api = repository_api(self.service_bb.repo_source)
api.curl = MagicMock(name="mock_curl")
api.repositories()
api.curl.assert_has_calls([call.perform('https://bitbucket.org/api/1.0/user/repositories')])
@patch('ftp_deploy.utils.repo.curl_connection')
def test_repo_repositories_gh_use_proper_url(self, mock_curl_connection):
api = repository_api(self.service_gh.repo_source)
api.curl = MagicMock(name="mock_curl")
api.repositories()
api.curl.assert_has_calls([call.perform('https://api.github.com/user/repos')])
@patch('ftp_deploy.utils.repo.absolute_url')
@patch('ftp_deploy.utils.repo.curl_connection')
def test_repo_add_hook_bb_use_proper_url(self, mock_curl_connection, mock_absolute_url):
absolute_url = MagicMock(name='absolute_url')
absolute_url.build = MagicMock(return_value='build')
mock_absolute_url.return_value = absolute_url
api = repository_api(self.service_bb.repo_source)
api.curl = MagicMock(name="mock_curl")
api.add_hook(self.service_bb, MagicMock(name='request', return_value='request'))
calls = [call.perform_post('https://api.bitbucket.org/1.0/repositories/%s/%s/services/' % (
BITBUCKET_SETTINGS['username'], self.service_bb.repo_slug_name), 'type=POST&URL=build/ftpdeploy/deploy/%s' % (self.service_bb.secret_key))]
api.curl.assert_has_calls(calls)
@patch('ftp_deploy.utils.repo.absolute_url')
@patch('ftp_deploy.utils.repo.curl_connection')
def test_repo_add_hook_gh_use_proper_url(self, mock_curl_connection, mock_absolute_url):
absolute_url = MagicMock(name='absolute_url')
absolute_url.build = MagicMock(return_value='build')
mock_absolute_url.return_value = absolute_url
api = repository_api(self.service_gh.repo_source)
api.curl = MagicMock(name="mock_curl")
api.add_hook(self.service_gh, MagicMock(name='request', return_value='request'))
json_call = json.loads(api.curl.mock_calls[0][1][1])
url_call = api.curl.mock_calls[0][1][0]
self.assertEqual(url_call, 'https://api.github.com/repos/%s/%s/hooks' % (
GITHUB_SETTINGS['username'], self.service_gh.repo_slug_name))
self.assertEqual(json_call['name'],'web')
self.assertEqual(json_call['active'], True)
self.assertEqual(json_call['config']['url'], 'build%s' % self.service_gh.hook_url())
self.assertEqual(json_call['config']['content_type'],'json')
class UtilsCoreServiceCheckTest(TestCase):
def setUp(self):
self.service = ServiceFactory()
def test_service_check_init(self):
"""service test init set up service, message and fails list"""
check_init = service_check(self.service)
self.assertEqual(check_init.service, self.service)
self.assertEqual(check_init.message, [])
self.assertEqual(check_init.fails, [False, False, False, False])
def test_service_check_log_return_number_of_fail_logs_assign_to_service(self):
log1 = LogFactory(service=self.service, status=False)
log2 = LogFactory(service=self.service, status=False)
log3 = LogFactory(service=self.service, status=True)
check_log = service_check(self.service)
check_log.check_log()
self.assertEqual(check_log.message, ['<b>Log</b>: Deploy Fails(2)'])
self.assertTrue(check_log.fails[0])
@patch('ftp_deploy.utils.core.bitbucket_check')
def test_service_check_repo_bb_perform_bb_check_and_return_proper_data(self, mock_bb_check):
bb = MagicMock(name='bb', spec_set=bitbucket_check)
mock_bb_check.return_value = bb
bb.check_all = MagicMock(name='check_all', return_value=(True, 'bb_fail'))
bb.check_hook_exist = MagicMock(name='check_hook_exist', return_value=(True, 'bb_hook_fail'))
check_repo = service_check(self.service)
check_repo.check_repo()
mock_bb_check.assert_called_once_with(BITBUCKET_SETTINGS['username'], BITBUCKET_SETTINGS['password'], self.service)
self.assertEqual(check_repo.message, ['bb_fail', 'bb_hook_fail'])
self.assertTrue(check_repo.fails[1])
self.assertTrue(check_repo.fails[2])
@patch('ftp_deploy.utils.core.ftp_check')
def test_service_check_ftp_perform_ftp_check_and_return_proper_data(self, mock_ftp_check):
ftp = MagicMock(name='ftp', spec_set=ftp_check)
mock_ftp_check.return_value = ftp
ftp.check_all = MagicMock(name='check_all', return_value=(True, 'ftp_fail'))
check_ftp = service_check(self.service)
check_ftp.check_ftp()
mock_ftp_check.assert_called_once_with('ftp_host', 'ftp_username', 'ftp_password', 'ftp/path')
self.assertEqual(check_ftp.message, ['ftp_fail'])
self.assertTrue(check_ftp.fails[3])
@patch('ftp_deploy.utils.core.service_check.check_log')
@patch('ftp_deploy.utils.core.service_check.check_repo')
@patch('ftp_deploy.utils.core.service_check.check_ftp')
def test_service_check_all_perform_all_check_stages_and_return_proper_data(self, mock_check_log, mock_check_repo, mock_check_ftp):
check_all = service_check(self.service)
check_all.fails = 'fails'
check_all.message = 'message'
response = check_all.check_all()
mock_check_log.assert_called_with()
mock_check_repo.assert_called_with()
mock_check_ftp.assert_called_with()
self.assertEqual(response, ('fails', 'message'))
class UtilsCoreAbsoluteURLTest(TestCase):
def test_absoluteurl_build_return_absolute_url_of_the_website(self):
request_factory = RequestFactory()
request = request_factory.get('/example')
output = absolute_url(request).build()
self.assertEqual(output, 'http://testserver')
class UtilsCoreLockErrorTest(TestCase):
def test_lock_error_exception(self):
try:
raise LockError()
except LockError as e:
self.assertEqual(e.__str__(), 'Deploy failed because service is Locked!')
class UtilsEmailTest(TestCase):
def setUp(self):
notification = NotificationFactory(
success='email_success_1@email.com,email_success_1@email.com,email_success_2@email.com',
fail='email_fail_1@email.com,email_fail_2@email.com',
deploy_user=[]
)
self.service = ServiceFactory(notification=notification)
self.log = LogFactory(service=self.service)
request_factory = RequestFactory()
request = request_factory.get('/example')
self.host = absolute_url(request).build()
def test_notification_success_subject(self):
notification = notification_success(self.host, self.service, self.log.payload)
self.assertEqual(notification.subject(), '%s - Deploy Successfully' % self.service)
def test_notification_success_emails_list(self):
notification = notification_success(self.host, self.service, self.log.payload)
self.assertIn('email_success_1@email.com', notification.emails())
self.assertIn('email_success_2@email.com', notification.emails())
self.assertIn('author@email.com', notification.emails())
def test_notification_success_emails_no_notification(self):
"""recipient email list for success email is empty if service has no notification assign"""
self.service.notification = None
notification = notification_success(self.host, self.service, self.log.payload)
self.assertListEqual(notification.emails(), [])
def test_notification_success_context(self):
notification = notification_success(self.host, self.service, self.log.payload)
payload = json.loads(self.log.payload)
files_added, files_modified, files_removed = commits_parser(payload['commits'], self.service.repo_source).file_diff()
self.assertEqual(notification.context()['service'], self.service)
self.assertEqual(notification.context()['host'], self.host)
self.assertEqual(notification.context()['commits_info'], commits_parser(payload['commits'], self.service.repo_source).commits_info())
self.assertEqual(notification.context()['files_added'], files_added)
self.assertEqual(notification.context()['files_modified'], files_modified)
self.assertEqual(notification.context()['files_removed'], files_removed)
def test_notification_fail_subject(self):
notification = notification_fail(self.host, self.service, self.log.payload, 'error_message')
self.assertEqual(notification.subject(), '%s - Deploy Fail' % self.service)
def test_notification_fail_emails_list(self):
notification = notification_fail(self.host, self.service, self.log.payload, 'error_message')
self.assertIn('email_fail_1@email.com', notification.emails())
self.assertIn('email_fail_2@email.com', notification.emails())
def test_notification_fail_emails_no_notification(self):
"""recipient email list for fail email is empty if service has no notification assign"""
self.service.notification = None
notification = notification_fail(self.host, self.service, self.log.payload, 'error_message')
self.assertListEqual(notification.emails(), [])
def test_notification_fail_context(self):
notification = notification_fail(self.host, self.service, self.log.payload, 'error_message')
self.assertEqual(notification.context()['service'], self.service)
self.assertEqual(notification.context()['host'], self.host)
self.assertEqual(notification.context()['error'], 'error_message')
def test_notification_send_method_sent_appropriate_number_of_emails(self):
notification_success(self.host, self.service, self.log.payload)
self.assertEqual(len(mail.outbox), 3)
class UtilsCurlTest(TestCase):
def test_curl_connection_init_setup_username_and_password(self):
"""curl connection set up username and password"""
curl = curl_connection('curl_username', 'curl_password')
self.assertEqual(curl.username, 'curl_username')
self.assertEqual(curl.password, 'curl_password')
@patch('ftp_deploy.utils.curl.certifi')
@patch('ftp_deploy.utils.curl.pycurl')
def test_curl_authenticate_method_perform_curl_authorisation(self, mock_pycurl, mock_certifi):
mock_certifi.where.return_value = 'certifi_where'
mock_curl = MagicMock(name='curl')
mock_pycurl.Curl.return_value = mock_curl
type(mock_curl).USERPWD = PropertyMock(name='MOCK_USERPWD', return_value='USERPWD')
type(mock_pycurl).CAINFO = PropertyMock(name='MOCK_CAINFO', return_value='CAINFO')
curl = curl_connection('curl_username', 'curl_password')
curl.authenticate()
mock_pycurl.assert_has_calls([call.Curl()])
mock_curl.assert_has_calls([call.setopt('CAINFO', 'certifi_where'), call.setopt('USERPWD', 'curl_username:curl_password')])
@patch('ftp_deploy.utils.curl._io')
def test_curl_perform_method_perform_curl_GET_request(self, mock_stringIO):
type(mock_stringIO()).write = PropertyMock(name='io_write', return_value='io_write')
curl = curl_connection('curl_username', 'curl_password')
curl.curl = MagicMock(name='mock_curl')
type(curl.curl).URL = PropertyMock(name='MOCK_URL', return_value='URL')
type(curl.curl).WRITEFUNCTION = PropertyMock(name='MOCK_WRITEFUNCTION', return_value='WRITEFUNCTION')
curl.perform('example/url')
curl.curl.assert_has_calls([call.setopt('URL', 'example/url'), call.setopt('WRITEFUNCTION', 'io_write'), call.perform()])
def test_curl_perform_post_method_send_POST_request_with_post_data(self):
curl = curl_connection('curl_username', 'curl_password')
curl.curl = MagicMock(name='mock_curl')
type(curl.curl).URL = PropertyMock(name='MOCK_URL', return_value='URL')
type(curl.curl).POSTFIELDS = PropertyMock(name='MOCK_POSTFIELDS', return_value='POSTFIELDS')
curl.perform_post('example/url', 'post=data')
curl.curl.assert_has_calls([call.setopt('URL', 'example/url'), call.setopt('POSTFIELDS', 'post=data'), call.perform()], any_order=True)
@patch('ftp_deploy.utils.curl.pycurl')
def test_curl_get_http_code_method_return_http_response_of_current_curl_request(self, mock_pycurl):
type(mock_pycurl).HTTP_CODE = PropertyMock(return_value='HTTP_CODE')
curl = curl_connection('curl_username', 'curl_password')
curl.curl = MagicMock(name='mock_curl')
curl.get_http_code()
curl.curl.assert_has_calls(call.getinfo('HTTP_CODE'))
def test_curl_close_method_close_current_curl_connection(self):
curl = curl_connection('curl_username', 'curl_password')
curl.curl = MagicMock(name='mock_curl')
curl.close()
curl.curl.assert_has_calls(call.close())
class UtilsFTPTest(TestCase):
def setUp(self):
self.ftp_connection = ftp_connection('host', 'username', 'password', 'ftp/path/')
def test_ftp_connection_init_method_setup_variables(self):
self.assertEqual(self.ftp_connection.host, 'host')
self.assertEqual(self.ftp_connection.username, 'username')
self.assertEqual(self.ftp_connection.password, 'password')
self.assertEqual(self.ftp_connection.ftp_path, 'ftp/path/')
self.assertFalse(self.ftp_connection.connected)
@patch('ftp_deploy.utils.ftp.FTP')
def test_ftp_connection_connect_method_perform_login_to_ftp(self, mock_ftp):
self.ftp_connection.connect()
mock_ftp.assert_has_calls([call('host'), call().login('username', 'password')])
self.assertTrue(self.ftp_connection.connected)
def test_ftp_connection_create_file_method_create_file_in_filepath_location(self):
self.ftp_connection.ftp = MagicMock(name='ftp')
self.ftp_connection.create_file('path/to/file/file.txt', 'example file content')
self.ftp_connection.ftp.assert_has_calls(call.storbinary('STOR ftp/path/path/to/file/file.txt', 'example file content'))
def test_ftp_connection_remove_file_method_remove_file_and_clear_empty_directories(self):
return_default = lambda value: value
self.ftp_connection.ftp = MagicMock(name='ftp')
self.ftp_connection.ftp.rmd = MagicMock(name='rmd', side_effect=[return_default, Exception('no empty directory')])
self.ftp_connection.remove_file('path/to/file/file.txt')
self.ftp_connection.ftp.rmd.assert_has_calls([call('ftp/path/path/to/file'), call('ftp/path/path/to')])
self.ftp_connection.ftp.assert_has_calls([call.delete('ftp/path/path/to/file/file.txt')])
def test_ftp_connection_make_dirs_method_create_all_directories_based_on_filepath(self):
return_default = lambda value: value
self.ftp_connection.ftp = MagicMock(name='ftp')
self.ftp_connection.ftp.dir = MagicMock(name='dir', side_effect=[return_default, Exception('directory doesnt exist')])
self.ftp_connection.ftp.mkd = MagicMock(name='mkd')
self.ftp_connection.make_dirs('path/to/file/file.txt')
self.ftp_connection.ftp.dir.assert_has_calls([call('ftp/path/path'), call('ftp/path/path/to'), call('ftp/path/path/to/file')])
self.ftp_connection.ftp.mkd.assert_has_calls([call('ftp/path/path/to'), call('ftp/path/path/to/file')])
def test_ftp_connection_quit_mehtod_perform_quit_only_if_connected_is_true(self):
self.ftp_connection.ftp = MagicMock(name='ftp')
self.ftp_connection.connected = True
self.ftp_connection.quit()
self.ftp_connection.ftp.assert_has_calls([call.quit()])
self.ftp_connection.connected = False
self.ftp_connection.quit()
self.assertFalse(self.ftp_connection.ftp.called)
def test_ftp_check_login_perfrm_connect_method(self):
check = ftp_check('host', 'username', 'password', 'ftp/path/')
check.connect = MagicMock(name='connect')
check.check_ftp_login()
self.assertTrue(check.connect.mock_called)
def test_ftp_check_if_path_exist(self):
check = ftp_check('host', 'username', 'password', 'ftp/path/')
check.ftp = MagicMock(name='ftp')
check.check_ftp_path()
check.ftp.assert_has_calls([call.cwd('ftp/path/')])
def test_ftp_check_all_perform_all_check_stages(self):
check = ftp_check('host', 'username', 'password', 'ftp/path/')
check.check_ftp_login = MagicMock(name='check_login', return_value=(True, 'error login message'))
check.check_ftp_path = MagicMock(name='check_ftp_path', return_value=(True, 'error path message'))
response = check.check_all()
self.assertEqual(response, (True, 'error login message'))
check.check_ftp_login = MagicMock(name='check_login', return_value=(False, ''))
check.check_ftp_path = MagicMock(name='check_ftp_path', return_value=(True, 'error path message'))
response = check.check_all()
self.assertEqual(response, (True, 'error path message'))
check.check_ftp_login = MagicMock(name='check_login', return_value=(False, ''))
check.check_ftp_path = MagicMock(name='check_ftp_path', return_value=(False, ''))
response = check.check_all()
self.assertEqual(response, (False, ''))
| |
import directory
import date
def get_multiple_keys():
return ['Close', 'High', 'HighLimit', 'Low', 'LowLimit', 'Open']
def get_divided_keys():
return ['Volume']
def contains_key(key_list = []):
for key in get_multiple_keys():
if key in key_list:
return True
for key in get_divided_keys():
if key in key_list:
return True
return False
def calculate_restoration(restoration_base_date = 'no_restoration', end_date = 'today',\
stock_name = '', price_list = [], date_list = [], re_start_date_list = [],\
re_end_date_list = [], start_date = 'very_beginning', dtsk_data = ''):
multiple_keys = get_multiple_keys()
divided_keys = get_divided_keys()
if restoration_base_date == 'no_restoration':
return dtsk_data
restoration_base_date = date.parse(restoration_base_date)
start_date = date.parse(start_date)
end_date = date.parse(end_date)
base_restoration_price = 1
for index, price in enumerate(price_list):
if restoration_base_date >= re_start_date_index[index] and \
restoration_base_date <= re_end_date_list[index]:
base_restoration_price = price
break
if stock_name in dtsk_data['SYMBOL'].values:
start = date_list[bisect.bisect_left(date_list, start_date)]
end = date_list[bisect.bisect_right(date_list, end_date) - 1]
dtsk_data.loc[start : end, :, stock_name, multiple_keys].values /= base_restoration_price
dtsk_data.loc[start : end, :, stock_name, divided_keys].values *= base_restoration_price
for index, price in enumerate(price_list):
re_start_date = re_start_date_list[index]
if start_date > re_start_date:
re_start_date = start_date
re_end_date = re_end_date_list[index]
if end_date < re_end_date:
re_end_date = end_date
start = date_list[bisect.bisect_left(date_list, re_start_date)]
end = date_list[bisect.bisect_right(date_list, re_end_date) - 1]
dtsk_data.loc[start : end, :, stock_name, multiple_keys].values *= price
dtsk_data.loc[start : end, :, stock_name, divided_keys].values /= price
return dtsk_data
def apply_after_restoration(restoration_base_date = 'no_restoration',\
start_date = 'very_beginning',\
end_date = 'today',\
remote_root = 'Default',\
local_cache_root = '',\
dtsk_data = ''):
restoration_factor_file = directory.open_prioritized_file(\
file_relative_path = 'StockInfo/after_restoration_factor_list.txt',
remote_root = remote_root, local_cache_root = local_cache_root)
trading_days_file = directory.open_prioritized_file(\
file_relative_path = 'StockInfo/tradingdays.txt',\
remote_root = remote_root, local_cache_root = local_cache_root)
date_list = trading_days_file.read().splitlines()
start_date = date.parse(start_date)
end_date = date.parse(end_date)
stock_name = ''
re_start_date_list = []
re_end_date_list = []
price_list = []
for line in reversed(restoration_factor_file.read().splitlines()):
if not line:
continue
line_words = line.split()
symbol = line_words[0]
re_start_date = line_words[1] # include
re_start_date = date.parse(re_start_date)
re_end_date = line_words[2] # include
re_end_date = date.parse(re_end_date)
price = float(line_words[3])
if not stock_name or stock_name.lower() == symbol.lower():
stock_name = symbol
re_end_date_list.append(re_end_date)
re_start_date_list.append(re_start_date)
price_list.append(price)
else:
dtsk_date = calculate_restoration(\
restoration_base_date = restoration_base_date, dtsk_data = dtsk_data,\
stock_name = stock_name, price_list = price_list, end_date = end_date,\
date_list = date_list, re_start_date_list = re_start_date_list,\
re_end_date_list = re_end_date_list, start_date = start_date)
stock_name = ''
re_end_date_list = []
re_start_date_list = []
price_list = []
dtsk_date = calculate_restoration(\
restoration_base_date = restoration_base_date, dtsk_data = dtsk_data,\
stock_name = stock_name, price_list = price_list, end_date = end_date,\
date_list = date_list, re_start_date_list = re_start_date_list,\
re_end_date_list = re_end_date_list, start_date = start_date)
return dtsk_data
def apply_forward_restoration(restoration_base_date = 'no_restoration',\
start_date = 'very_beginning',\
end_date = 'today',\
remote_root = 'Default',\
local_cache_root = '',\
dtsk_data = ''):
restoration_factor_file = directory.open_prioritized_file(\
file_relative_path = 'StockInfo/forward_restoration_factor_list.txt',\
remote_root = remote_root, local_cache_root = local_cache_root)
trading_days_file = directory.open_prioritized_file(\
file_relative_path = 'StockInfo/tradingdays.txt',\
remote_root = remote_root, local_cache_root = local_cache_root)
date_list = trading_days_file.read().splitlines()
start_date = date.parse(start_date)
end_date = date.parse(end_date)
stock_name = ''
re_start_date_list = []
re_end_date_list = []
price_list = []
for line in restoration_factor_file.read().splitlines():
if not line:
continue
line_words = line.split()
symbol = line_words[0]
re_end_date = line_words[1] # include
re_end_date = date.parse(re_end_date)
re_start_date = line_words[2] # not include
re_start_date = date.add_1_day(re_start_date) # after adding, include
price = float(line_words[3])
if not stock_name or stock_name.lower() == symbol.lower():
stock_name = symbol
re_end_date_list.append(re_end_date)
re_start_date_list.append(re_start_date)
price_list.append(price)
else:
dtsk_date = calculate_restoration(\
restoration_base_date = restoration_base_date, dtsk_data = dtsk_data,\
stock_name = stock_name, price_list = price_list, end_date = end_date,\
date_list = date_list, re_start_date_list = re_start_date_list,\
re_end_date_list = re_end_date_list, start_date = start_date)
stock_name = ''
re_end_date_list = []
re_start_date_list = []
price_list = []
dtsk_date = calculate_restoration(\
restoration_base_date = restoration_base_date, dtsk_data = dtsk_data,\
stock_name = stock_name, price_list = price_list, end_date = end_date,\
date_list = date_list, re_start_date_list = re_start_date_list,\
re_end_date_list = re_end_date_list, start_date = start_date)
return dtsk_data
def apply(restoration_base_date = 'no_restoration',\
start_date = 'very_beginning',\
end_date = 'today',\
dtsk_data = ''):
# First, Check parameters.
# Using the same utility with dtsk.load()
if restoration_base_date.lower() == 'no_restoration'.lower():
print 'no restoration.'
return dtsk_data
restoration_base_date = date.parse(restoration_base_date)
start_date = date.parse(start_date)
end_date = date.parse(end_date)
new_dtsk = dtsk_data
# Second, Seperate 3 cases and apply:
# Situation 1: restoration_base_date < start_date,
# DTSK will apply after_restoration based on restoration_base_date
if restoration_base_date < start_date:
print 'try to apply after restoration.'
new_dtsk = apply_after_restoration(\
restoration_base_date = restoration_base_date,\
start_date = start_date,\
end_date = end_date,\
dtsk_data = dtsk_data)
# Situation 2: restoration_base_date > end_date.
# DTSK will apply forward_restoration based on restoration_base_date.
elif restoration_base_date > end_date:
print 'try to apply forward restoration.'
new_dtsk = apply_forward_restoration(\
restoration_base_date = restoration_base_date,\
start_date = start_date,\
end_date = end_date,\
dtsk_data = dtsk_data)
# Situation 3: start_date <= restorateion_base_date <= end_date:
# DTSK will not apply any restoration factor.
else:
print 'no restoration.'
return dtsk_data
# [To Implement]. Third, Automation of test.
return new_dtsk
| |
import numpy as np
from numpy.testing import assert_array_almost_equal
import pytest
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_allclose
rng = np.random.RandomState(42)
V = rng.random_sample((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def check_neighbors(dualtree, breadth_first, k, metric, X, Y, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
@pytest.mark.parametrize('metric', METRICS)
@pytest.mark.parametrize('k', (1, 3, 5))
@pytest.mark.parametrize('dualtree', (True, False))
@pytest.mark.parametrize('breadth_first', (True, False))
def test_kd_tree_query(metric, k, dualtree, breadth_first):
rng = check_random_state(0)
X = rng.random_sample((40, DIMENSION))
Y = rng.random_sample((10, DIMENSION))
kwargs = METRICS[metric]
check_neighbors(dualtree, breadth_first, k, metric, X, Y, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, h, atol, rtol, breadth_first, Y, kdt, dens_true):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
@pytest.mark.parametrize('kernel',
['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine'])
@pytest.mark.parametrize('h', [0.01, 0.1, 1])
def test_kd_tree_kde(kernel, h):
n_samples, n_features = (100, 3)
rng = check_random_state(0)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
dens_true = compute_kernel_slow(Y, X, kernel, h)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
check_results(kernel, h, atol, rtol,
breadth_first, Y, kdt, dens_true)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
rng = check_random_state(0)
x_in = rng.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
@pytest.mark.parametrize('dualtree', (True, False))
def test_kd_tree_two_point(dualtree):
n_samples, n_features = (100, 3)
rng = check_random_state(0)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
@pytest.mark.parametrize('protocol', (0, 1, 2))
def test_kd_tree_pickle(protocol):
import pickle
rng = check_random_state(0)
X = rng.random_sample((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert isinstance(kdt2, KDTree)
check_pickle_protocol(protocol)
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = rng.random_sample(2 * n_nbrs).astype(DTYPE, copy=False)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = rng.random_sample(n_nodes).astype(DTYPE, copy=False)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = rng.random_sample((n_rows, n_pts)).astype(DTYPE, copy=False)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE, copy=False)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HGlobalVarGetsCorrectFunctionAddressAtInit_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HGlobalVarGetsCorrectFunctionAddressAtInit_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HGlobalVarGetsCorrectFunctionAddressAtInit_ConnectedLHS, self).__init__(name='HGlobalVarGetsCorrectFunctionAddressAtInit_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'GlobalVarGetsCorrectFunctionAddressAtInit')
# Set the node attributes
# match class ComponentInstance() node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__ComponentInstance"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Operation() node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__Operation"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class OperationTrigger() node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__OperationTrigger"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class Executable() node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["MT_dirty__"] = False
self.vs[3]["mm__"] = """MT_pre__Executable"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class ProvidedPort() node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["MT_dirty__"] = False
self.vs[4]["mm__"] = """MT_pre__ProvidedPort"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class InstanceConfiguration() node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["MT_dirty__"] = False
self.vs[5]["mm__"] = """MT_pre__InstanceConfiguration"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class ClientServerInterface() node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["MT_dirty__"] = False
self.vs[6]["mm__"] = """MT_pre__ClientServerInterface"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# match class AtomicComponent() node
self.add_node()
self.vs[7]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["MT_dirty__"] = False
self.vs[7]["mm__"] = """MT_pre__AtomicComponent"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'')
# Nodes that represent the edges of the property.
# match association InstanceConfiguration--contents-->ComponentInstance node
self.add_node()
self.vs[8]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "contents"
"""
self.vs[8]["MT_label__"] = """9"""
self.vs[8]["MT_subtypes__"] = []
self.vs[8]["MT_dirty__"] = False
self.vs[8]["mm__"] = """MT_pre__directLink_S"""
self.vs[8]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc8')
# match association AtomicComponent--contents-->ProvidedPort node
self.add_node()
self.vs[9]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "contents"
"""
self.vs[9]["MT_label__"] = """10"""
self.vs[9]["MT_subtypes__"] = []
self.vs[9]["MT_dirty__"] = False
self.vs[9]["mm__"] = """MT_pre__directLink_S"""
self.vs[9]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc9')
# match association ComponentInstance--component-->AtomicComponent node
self.add_node()
self.vs[10]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "component"
"""
self.vs[10]["MT_label__"] = """11"""
self.vs[10]["MT_subtypes__"] = []
self.vs[10]["MT_dirty__"] = False
self.vs[10]["mm__"] = """MT_pre__directLink_S"""
self.vs[10]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc10')
# match association AtomicComponent--contents-->Executable node
self.add_node()
self.vs[11]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "contents"
"""
self.vs[11]["MT_label__"] = """12"""
self.vs[11]["MT_subtypes__"] = []
self.vs[11]["MT_dirty__"] = False
self.vs[11]["mm__"] = """MT_pre__directLink_S"""
self.vs[11]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc11')
# match association Executable--trigger-->OperationTrigger node
self.add_node()
self.vs[12]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "trigger"
"""
self.vs[12]["MT_label__"] = """13"""
self.vs[12]["MT_subtypes__"] = []
self.vs[12]["MT_dirty__"] = False
self.vs[12]["mm__"] = """MT_pre__directLink_S"""
self.vs[12]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc12')
# match association OperationTrigger--calledOperation-->Operation node
self.add_node()
self.vs[13]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "calledOperation"
"""
self.vs[13]["MT_label__"] = """14"""
self.vs[13]["MT_subtypes__"] = []
self.vs[13]["MT_dirty__"] = False
self.vs[13]["mm__"] = """MT_pre__directLink_S"""
self.vs[13]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc13')
# match association ProvidedPort--intf-->ClientServerInterface node
self.add_node()
self.vs[14]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "intf"
"""
self.vs[14]["MT_label__"] = """15"""
self.vs[14]["MT_subtypes__"] = []
self.vs[14]["MT_dirty__"] = False
self.vs[14]["mm__"] = """MT_pre__directLink_S"""
self.vs[14]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc14')
# match association ClientServerInterface--contents-->Operation node
self.add_node()
self.vs[15]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "contents"
"""
self.vs[15]["MT_label__"] = """16"""
self.vs[15]["MT_subtypes__"] = []
self.vs[15]["MT_dirty__"] = False
self.vs[15]["mm__"] = """MT_pre__directLink_S"""
self.vs[15]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc15')
# match association OperationTrigger--providedPort-->ProvidedPort node
self.add_node()
self.vs[16]["MT_pre__attr1"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "providedPort"
"""
self.vs[16]["MT_label__"] = """17"""
self.vs[16]["MT_subtypes__"] = []
self.vs[16]["MT_dirty__"] = False
self.vs[16]["mm__"] = """MT_pre__directLink_S"""
self.vs[16]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'assoc16')
# Add the edges
self.add_edges([
(5,8), # match_class InstanceConfiguration() -> association contents
(8,0), # association contents -> match_class ComponentInstance()
(7,9), # match_class AtomicComponent() -> association contents
(9,4), # association contents -> match_class ProvidedPort()
(0,10), # match_class ComponentInstance() -> association component
(10,7), # association component -> match_class AtomicComponent()
(7,11), # match_class AtomicComponent() -> association contents
(11,3), # association contents -> match_class Executable()
(3,12), # match_class Executable() -> association trigger
(12,2), # association trigger -> match_class OperationTrigger()
(2,13), # match_class OperationTrigger() -> association calledOperation
(13,1), # association calledOperation -> match_class Operation()
(4,14), # match_class ProvidedPort() -> association intf
(14,6), # association intf -> match_class ClientServerInterface()
(6,15), # match_class ClientServerInterface() -> association contents
(15,1), # association contents -> match_class Operation()
(2,16), # match_class OperationTrigger() -> association providedPort
(16,4) # association providedPort -> match_class ProvidedPort()
])
# Add the attribute equations
self["equations"] = []
def eval_attr11(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr12(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr13(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr14(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr15(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr16(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr17(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr18(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_attr19(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "contents"
def eval_attr110(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "contents"
def eval_attr111(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "component"
def eval_attr112(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "contents"
def eval_attr113(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "trigger"
def eval_attr114(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "calledOperation"
def eval_attr115(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "intf"
def eval_attr116(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "contents"
def eval_attr117(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return attr_value == "providedPort"
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
| |
# Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova import db
from nova import exception
from nova.objects import base
from nova.objects import fields as obj_fields
from nova.virt import hardware
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMACell(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add pagesize field
# Version 1.2: Add cpu_pinning_raw and topology fields
VERSION = '1.2'
fields = {
'id': obj_fields.IntegerField(),
'cpuset': obj_fields.SetOfIntegersField(),
'memory': obj_fields.IntegerField(),
'pagesize': obj_fields.IntegerField(nullable=True),
'cpu_topology': obj_fields.ObjectField('VirtCPUTopology',
nullable=True),
'cpu_pinning_raw': obj_fields.DictOfIntegersField(nullable=True)
}
cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')
def __init__(self, **kwargs):
super(InstanceNUMACell, self).__init__(**kwargs)
if 'pagesize' not in kwargs:
self.pagesize = None
self.obj_reset_changes(['pagesize'])
if 'cpu_topology' not in kwargs:
self.cpu_topology = None
self.obj_reset_changes(['cpu_topology'])
if 'cpu_pinning' not in kwargs:
self.cpu_pinning = None
self.obj_reset_changes(['cpu_pinning_raw'])
def __len__(self):
return len(self.cpuset)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_to_dict_ to the future to avoid confusing.
return {'cpus': hardware.format_cpu_spec(self.cpuset,
allow_ranges=False),
'mem': {'total': self.memory},
'id': self.id,
'pagesize': self.pagesize}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_from_dict_ to the future to avoid confusing.
cpuset = hardware.parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cell_id = data_dict.get('id')
pagesize = data_dict.get('pagesize')
return cls(id=cell_id, cpuset=cpuset,
memory=memory, pagesize=pagesize)
@property
def siblings(self):
cpu_list = sorted(list(self.cpuset))
threads = 0
if self.cpu_topology:
threads = self.cpu_topology.threads
if threads == 1:
threads = 0
return list(map(set, zip(*[iter(cpu_list)] * threads)))
@property
def cpu_pinning_requested(self):
return self.cpu_pinning is not None
def pin(self, vcpu, pcpu):
if vcpu not in self.cpuset:
return
pinning_dict = self.cpu_pinning or {}
pinning_dict[vcpu] = pcpu
self.cpu_pinning = pinning_dict
def pin_vcpus(self, *cpu_pairs):
for vcpu, pcpu in cpu_pairs:
self.pin(vcpu, pcpu)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMATopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Takes into account pagesize
# Version 1.2: InstanceNUMACell 1.2
VERSION = '1.2'
fields = {
# NOTE(danms): The 'id' field is no longer used and should be
# removed in the future when convenient
'id': obj_fields.IntegerField(),
'instance_uuid': obj_fields.UUIDField(),
'cells': obj_fields.ListOfObjectsField('InstanceNUMACell'),
}
@classmethod
def obj_from_primitive(cls, primitive, context=None):
if 'nova_object.name' in primitive:
obj_topology = super(InstanceNUMATopology, cls).obj_from_primitive(
primitive, context=None)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that there are no cases of the old format stored in
# the database (or forever, if we can never guarantee that).
obj_topology = InstanceNUMATopology._from_dict(primitive)
obj_topology.id = 0
return obj_topology
@classmethod
def obj_from_db_obj(cls, instance_uuid, db_obj):
primitive = jsonutils.loads(db_obj)
obj_topology = cls.obj_from_primitive(primitive)
if 'nova_object.name' not in db_obj:
obj_topology.instance_uuid = instance_uuid
# No benefit to store a list of changed fields
obj_topology.obj_reset_changes()
return obj_topology
# TODO(ndipanov) Remove this method on the major version bump to 2.0
@base.remotable
def create(self):
self._save()
# NOTE(ndipanov): We can't rename create and want to avoid version bump
# as this needs to be backported to stable so this is not a @remotable
# That's OK since we only call it from inside Instance.save() which is.
def _save(self):
values = {'numa_topology': self._to_json()}
db.instance_extra_update_by_uuid(self._context, self.instance_uuid,
values)
self.obj_reset_changes()
# NOTE(ndipanov): We want to avoid version bump
# as this needs to be backported to stable so this is not a @remotable
# That's OK since we only call it from inside Instance.save() which is.
@classmethod
def delete_by_instance_uuid(cls, context, instance_uuid):
values = {'numa_topology': None}
db.instance_extra_update_by_uuid(context, instance_uuid,
values)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['numa_topology'])
if not db_extra:
raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid)
if db_extra['numa_topology'] is None:
return None
return cls.obj_from_db_obj(instance_uuid, db_extra['numa_topology'])
def _to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def _to_dict(self):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_to_dict_
# in the future to avoid confusing.
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_from_dict_
# in the future to avoid confusing.
return cls(cells=[
InstanceNUMACell._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
@property
def cpu_pinning_requested(self):
return all(cell.cpu_pinning_requested for cell in self.cells)
| |
from hotspotter.other.ConcretePrintable import Pref
from PyQt4.Qt import QMainWindow, QTableWidgetItem, QMessageBox, \
QAbstractItemView, QWidget, Qt, pyqtSlot, pyqtSignal, \
QStandardItem, QStandardItemModel, QString, QObject
from hotspotter.front.EditPrefSkel import Ui_editPrefSkel
from hotspotter.front.MainSkel import Ui_mainSkel
from hotspotter.tpl.other.matplotlibwidget import MatplotlibWidget
from hotspotter.other.logger import logmsg, logdbg, func_log
from hotspotter.other.messages import workflow_help, cmd_help, troubles_help
import types
#from weakref import ref
# --- QtMainWindow Thread --- #
# Talk to this only with signals and slots
try:
_fromUtf8 = QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
def gui_log(fn):
'log what happens in the GUI for debugging'
def gui_log_wrapper(hsgui, *args, **kwargs):
try:
function_name = fn.func_name
into_str = 'In hsgui.'+function_name
outo_str = 'Out hsgui.'+function_name+'\n'
hsgui.logdbgSignal.emit(into_str)
ret = fn(hsgui, *args, **kwargs)
hsgui.logdbgSignal.emit(outo_str)
return ret
except Exception as ex:
import traceback
logmsg('\n\n *!!* HotSpotter GUI Raised Exception: '+str(ex))
logmsg('\n\n *!!* HotSpotter GUI Exception Traceback: \n\n'+traceback.format_exc())
return gui_log_wrapper
#-------------------------------------------
class EditPrefWidget(QWidget):
'The Settings Pane; Subclass of Main Windows.'
def __init__(epw, fac):
super( EditPrefWidget, epw ).__init__()
epw.pref_skel = Ui_editPrefSkel()
epw.pref_skel.setupUi(epw)
epw.pref_model = None
epw.pref_skel.redrawBUT.clicked.connect(fac.redraw)
epw.pref_skel.defaultPrefsBUT.clicked.connect(fac.default_prefs)
epw.pref_skel.unloadFeaturesAndModelsBUT.clicked.connect(fac.unload_features_and_models)
@pyqtSlot(Pref, name='populatePrefTreeSlot')
def populatePrefTreeSlot(epw, pref_struct):
'Populates the Preference Tree Model'
logdbg('Bulding Preference Model of: '+repr(pref_struct))
epw.pref_model = pref_struct.createQPreferenceModel()
logdbg('Built: '+repr(epw.pref_model))
epw.pref_skel.prefTreeView.setModel(epw.pref_model)
epw.pref_skel.prefTreeView.header().resizeSection(0,250)
#-------------------------------------------
# TODO: Remove the .ui files all together and code the gui
# entirely in this file.
class HotspotterMainWindow(QMainWindow):
'The GUI guts of the skeletons in the hsgui directory'
# Signals that call Facade Slots
selectCidSignal = pyqtSignal(int)
selectGidSignal = pyqtSignal(int)
renameChipIdSignal = pyqtSignal(str, int)
changeChipPropSignal = pyqtSignal(str, str, int)
logdbgSignal = pyqtSignal(str)
def __init__(hsgui, fac):
super( HotspotterMainWindow, hsgui ).__init__()
# Setup main window
hsgui.main_skel = Ui_mainSkel()
hsgui.main_skel.setupUi(hsgui)
hsgui.epw = EditPrefWidget(fac)
hsgui.plotWidget = MatplotlibWidget(hsgui.main_skel.centralwidget)
hsgui.plotWidget.setObjectName(_fromUtf8('plotWidget'))
hsgui.main_skel.root_hlayout.addWidget(hsgui.plotWidget)
hsgui.prev_tbl_item = None
hsgui.prev_cid = None
hsgui.prev_gid = None
hsgui.non_modal_qt_handles = []
def connectSignals(hsgui, fac):
'Connects GUI signals to Facade Actions'
logdbg('Connecting GUI >> to >> Facade')
# Base Signals
hsgui.selectCidSignal.connect(fac.selc)
hsgui.selectGidSignal.connect(fac.selg)
hsgui.renameChipIdSignal.connect(fac.rename_cid)
hsgui.changeChipPropSignal.connect(fac.change_chip_prop)
hsgui.logdbgSignal.connect(fac.logdbgSlot)
# SKEL SIGNALS
main_skel = hsgui.main_skel
# Widget
hsgui.main_skel.fignumSPIN.valueChanged.connect(
fac.set_fignum)
# File
main_skel.actionOpen_Database.triggered.connect(
fac.open_db)
main_skel.actionSave_Database.triggered.connect(
fac.save_db)
main_skel.actionImport_Images.triggered.connect(
fac.import_images)
main_skel.actionQuit.triggered.connect(
hsgui.close)
# Actions
main_skel.actionQuery.triggered.connect(
fac.query)
main_skel.actionAdd_ROI.triggered.connect(
fac.add_chip)
main_skel.actionReselect_Orientation.triggered.connect(
fac.reselect_orientation)
main_skel.actionReselect_ROI.triggered.connect(
fac.reselect_roi)
main_skel.actionRemove_Chip.triggered.connect(
fac.remove_cid)
main_skel.actionNext.triggered.connect(
fac.select_next)
# Options
main_skel.actionTogEll.triggered.connect(
fac.toggle_ellipse)
main_skel.actionTogPts.triggered.connect(
fac.toggle_points)
main_skel.actionTogPlt.triggered.connect(
hsgui.setPlotWidgetVisibleSlot)
main_skel.actionPreferences.triggered.connect(
hsgui.epw.show )
# Help
main_skel.actionView_Documentation.triggered.connect(
fac.view_documentation)
main_skel.actionHelpCMD.triggered.connect(
lambda:hsgui.msgbox('Command Line Help', cmd_help))
main_skel.actionHelpWorkflow.triggered.connect(
lambda:hsgui.msgbox('Workflow HOWTO', workflow_help))
main_skel.actionHelpTroubles.triggered.connect(
lambda:hsgui.msgbox('Troubleshooting Help', troubles_help))
main_skel.actionWriteLogs.triggered.connect(
fac.write_logs)
# Convinience
main_skel.actionOpen_Source_Directory.triggered.connect(
fac.vd)
main_skel.actionOpen_Data_Directory.triggered.connect(
fac.vdd)
main_skel.actionOpen_Internal_Directory.triggered.connect(
fac.vdi)
main_skel.actionConvertImage2Chip.triggered.connect(
fac.convert_all_images_to_chips)
main_skel.actionBatch_Change_Name.triggered.connect(
fac._quick_and_dirty_batch_rename)
main_skel.actionAdd_Metadata_Property.triggered.connect(
fac.add_new_prop)
main_skel.actionAssign_Matches_Above_Threshold.triggered.connect(
fac.match_all_above_thresh)
main_skel.actionIncrease_ROI_Size.triggered.connect(
fac.expand_rois)
# Experiments
main_skel.actionMatching_Experiment.triggered.connect(
fac.run_matching_experiment)
main_skel.actionName_Consistency_Experiment.triggered.connect(
fac.run_name_consistency_experiment)
#
# Gui Components
# Tables Widgets
main_skel.chip_TBL.itemClicked.connect(
hsgui.chipTableClickedSlot)
main_skel.chip_TBL.itemChanged.connect(
hsgui.chipTableChangedSlot)
main_skel.image_TBL.itemClicked.connect(
hsgui.imageTableClickedSlot)
main_skel.res_TBL.itemChanged.connect(
hsgui.resultTableChangedSlot)
# Tab Widget
# This signal slot setup is very bad. Needs rewrite
main_skel.tablesTabWidget.currentChanged.connect(
fac.change_view)
main_skel.chip_TBL.sortByColumn(0, Qt.AscendingOrder)
main_skel.res_TBL.sortByColumn(0, Qt.AscendingOrder)
main_skel.image_TBL.sortByColumn(0, Qt.AscendingOrder)
def msgbox(hsgui, title, msg):
# Make a non modal critical QMessageBox
msgBox = QMessageBox( hsgui );
msgBox.setAttribute( Qt.WA_DeleteOnClose )
msgBox.setStandardButtons( QMessageBox.Ok )
msgBox.setWindowTitle( title )
msgBox.setText( msg )
msgBox.setModal( False )
msgBox.open( msgBox.close )
msgBox.show()
hsgui.non_modal_qt_handles.append(msgBox)
# Old Modal Version: QMessageBox.critical(None, 'ERROR', msg)
@pyqtSlot(name='setPlotWidgetVisible')
def setPlotWidgetVisibleSlot(hsgui, bit=None): #None = toggle
if hsgui.plotWidget != None:
logdbg('Disabling Plot Widget')
if bit is None: bit = not hsgui.plotWidget.isVisible()
was_visible = hsgui.plotWidget.setVisible(bit)
if was_visible != bit:
if bit:
hsgui.main_skel.fignumSPIN.setValue(0)
else:
hsgui.main_skel.fignumSPIN.setValue(1)
#hsgui.setFignumSignal.emit(int(1 - bit)) # plotwidget fignum = 0
# Internal GUI Functions
def populate_tbl_helper(hsgui, tbl, col_headers, col_editable, row_list, row2_data_tup ):
#tbl = main_skel.chip_TBL
hheader = tbl.horizontalHeader()
sort_col = hheader.sortIndicatorSection()
sort_ord = hheader.sortIndicatorOrder()
tbl.sortByColumn(0, Qt.AscendingOrder) # Basic Sorting
prevBlockSignals = tbl.blockSignals(True)
tbl.clear()
tbl.setColumnCount(len(col_headers))
tbl.setRowCount(len(row_list))
tbl.verticalHeader().hide()
tbl.setHorizontalHeaderLabels(col_headers)
tbl.setSelectionMode( QAbstractItemView.SingleSelection )
tbl.setSelectionBehavior( QAbstractItemView.SelectRows)
tbl.setSortingEnabled(False)
for row in iter(row_list):
data_tup = row2_data_tup[row]
for col, data in enumerate(data_tup):
item = QTableWidgetItem()
try:
int_data = int(data)
item.setData(Qt.DisplayRole, int_data)
except ValueError: # for strings
item.setText(str(data))
except TypeError: #for lists
item.setText(str(data))
item.setTextAlignment(Qt.AlignHCenter)
if col_editable[col]: item.setFlags(item.flags() | Qt.ItemIsEditable)
else: item.setFlags(item.flags() ^ Qt.ItemIsEditable)
tbl.setItem(row, col, item)
tbl.setSortingEnabled(True)
tbl.sortByColumn(sort_col,sort_ord) # Move back to old sorting
tbl.show()
tbl.blockSignals(prevBlockSignals)
@pyqtSlot(dict, name='updateDBStatsSlot')
@gui_log
def updateDBStatsSlot(hsgui, stats):
hsgui.setWindowTitle(stats['title'])
def updateSelSpinsSlot(hsgui, cid, gid):
hsgui.prev_cid = cid
hsgui.prev_gid = gid
hsgui.main_skel.sel_cid_SPIN.setValue(cid)
hsgui.main_skel.sel_gid_SPIN.setValue(gid)
def redrawGuiSlot(hsgui):
hsgui.show()
if hsgui.plotWidget != None and\
hsgui.plotWidget.isVisible():
hsgui.plotWidget.show()
hsgui.plotWidget.draw()
def updateStateLabelSlot(hsgui, state):
hsgui.main_skel.state_LBL.setText(state)
@pyqtSlot(list, list, list, list, name='populateChipTblSlot')
def populateChipTblSlot(hsgui, col_headers, col_editable, row_list, row2_data_tup):
hsgui.populate_tbl_helper(hsgui.main_skel.chip_TBL, col_headers, col_editable, row_list, row2_data_tup)
@pyqtSlot(list, list, list, list, name='populateImageTblSlot')
def populateImageTblSlot(hsgui, col_headers, col_editable, row_list, row2_data_tup):
hsgui.populate_tbl_helper(hsgui.main_skel.image_TBL, col_headers, col_editable, row_list, row2_data_tup)
@pyqtSlot(list, list, list, list, name='populateResultTblSlot')
def populateResultTblSlot(hsgui, col_headers, col_editable, row_list, row2_data_tup):
hsgui.populate_tbl_helper(hsgui.main_skel.res_TBL, col_headers, col_editable, row_list, row2_data_tup)
@gui_log
def chipTableChangedSlot(hsgui, item):
'A Chip had a data member changed '
hsgui.logdbgSignal.emit('chip table changed')
sel_row = item.row()
sel_col = item.column()
sel_cid = int(hsgui.main_skel.chip_TBL.item(sel_row,0).text())
new_val = str(item.text()).replace(',',';;')
header_lbl = str(hsgui.main_skel.chip_TBL.horizontalHeaderItem(sel_col).text())
hsgui.selectCidSignal.emit(sel_cid)
# Rename the chip!
if header_lbl == 'Chip Name':
hsgui.renameChipIdSignal.emit(new_val, sel_cid)
# Change the user property instead
else:
hsgui.changeChipPropSignal.emit(header_lbl, new_val, sel_cid)
@gui_log
def resultTableChangedSlot(hsgui, item):
'A Chip was Renamed in Result View'
hsgui.logdbgSignal.emit('result table changed')
sel_row = item.row()
sel_cid = int(hsgui.main_skel.res_TBL.item(sel_row,1).text())
new_name = str(item.text())
hsgui.renameChipIdSignal.emit(new_name, int(sel_cid))
def imageTableClickedSlot(hsgui, item):
'Select Image ID'
if item == hsgui.prev_tbl_item: return
hsgui.prev_tbl_item = item
sel_row = item.row()
sel_gid = int(hsgui.main_skel.image_TBL.item(sel_row,0).text())
hsgui.selectGidSignal.emit(sel_gid)
def chipTableClickedSlot(hsgui, item):
'Select Chip ID'
hsgui.logdbgSignal.emit('chip table clicked')
if item == hsgui.prev_tbl_item: return
hsgui.prev_tbl_item = item
sel_row = item.row()
sel_cid = int(hsgui.main_skel.chip_TBL.item(sel_row,0).text())
hsgui.selectCidSignal.emit(sel_cid)
| |
from synapse.api.ratelimiting import LimitExceededError, Ratelimiter
from synapse.appservice import ApplicationService
from synapse.types import create_requester
from tests import unittest
class TestRatelimiter(unittest.HomeserverTestCase):
def test_allowed_via_can_do_action(self):
limiter = Ratelimiter(
store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1
)
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(None, key="test_id", _time_now_s=0)
)
self.assertTrue(allowed)
self.assertEqual(10.0, time_allowed)
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(None, key="test_id", _time_now_s=5)
)
self.assertFalse(allowed)
self.assertEqual(10.0, time_allowed)
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(None, key="test_id", _time_now_s=10)
)
self.assertTrue(allowed)
self.assertEqual(20.0, time_allowed)
def test_allowed_appservice_ratelimited_via_can_requester_do_action(self):
appservice = ApplicationService(
None,
"example.com",
id="foo",
rate_limited=True,
sender="@as:example.com",
)
as_requester = create_requester("@user:example.com", app_service=appservice)
limiter = Ratelimiter(
store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1
)
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(as_requester, _time_now_s=0)
)
self.assertTrue(allowed)
self.assertEqual(10.0, time_allowed)
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(as_requester, _time_now_s=5)
)
self.assertFalse(allowed)
self.assertEqual(10.0, time_allowed)
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(as_requester, _time_now_s=10)
)
self.assertTrue(allowed)
self.assertEqual(20.0, time_allowed)
def test_allowed_appservice_via_can_requester_do_action(self):
appservice = ApplicationService(
None,
"example.com",
id="foo",
rate_limited=False,
sender="@as:example.com",
)
as_requester = create_requester("@user:example.com", app_service=appservice)
limiter = Ratelimiter(
store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1
)
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(as_requester, _time_now_s=0)
)
self.assertTrue(allowed)
self.assertEqual(-1, time_allowed)
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(as_requester, _time_now_s=5)
)
self.assertTrue(allowed)
self.assertEqual(-1, time_allowed)
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(as_requester, _time_now_s=10)
)
self.assertTrue(allowed)
self.assertEqual(-1, time_allowed)
def test_allowed_via_ratelimit(self):
limiter = Ratelimiter(
store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1
)
# Shouldn't raise
self.get_success_or_raise(limiter.ratelimit(None, key="test_id", _time_now_s=0))
# Should raise
with self.assertRaises(LimitExceededError) as context:
self.get_success_or_raise(
limiter.ratelimit(None, key="test_id", _time_now_s=5)
)
self.assertEqual(context.exception.retry_after_ms, 5000)
# Shouldn't raise
self.get_success_or_raise(
limiter.ratelimit(None, key="test_id", _time_now_s=10)
)
def test_allowed_via_can_do_action_and_overriding_parameters(self):
"""Test that we can override options of can_do_action that would otherwise fail
an action
"""
# Create a Ratelimiter with a very low allowed rate_hz and burst_count
limiter = Ratelimiter(
store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1
)
# First attempt should be allowed
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(
None,
("test_id",),
_time_now_s=0,
)
)
self.assertTrue(allowed)
self.assertEqual(10.0, time_allowed)
# Second attempt, 1s later, will fail
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(
None,
("test_id",),
_time_now_s=1,
)
)
self.assertFalse(allowed)
self.assertEqual(10.0, time_allowed)
# But, if we allow 10 actions/sec for this request, we should be allowed
# to continue.
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(None, ("test_id",), _time_now_s=1, rate_hz=10.0)
)
self.assertTrue(allowed)
self.assertEqual(1.1, time_allowed)
# Similarly if we allow a burst of 10 actions
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(None, ("test_id",), _time_now_s=1, burst_count=10)
)
self.assertTrue(allowed)
self.assertEqual(1.0, time_allowed)
def test_allowed_via_ratelimit_and_overriding_parameters(self):
"""Test that we can override options of the ratelimit method that would otherwise
fail an action
"""
# Create a Ratelimiter with a very low allowed rate_hz and burst_count
limiter = Ratelimiter(
store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1
)
# First attempt should be allowed
self.get_success_or_raise(
limiter.ratelimit(None, key=("test_id",), _time_now_s=0)
)
# Second attempt, 1s later, will fail
with self.assertRaises(LimitExceededError) as context:
self.get_success_or_raise(
limiter.ratelimit(None, key=("test_id",), _time_now_s=1)
)
self.assertEqual(context.exception.retry_after_ms, 9000)
# But, if we allow 10 actions/sec for this request, we should be allowed
# to continue.
self.get_success_or_raise(
limiter.ratelimit(None, key=("test_id",), _time_now_s=1, rate_hz=10.0)
)
# Similarly if we allow a burst of 10 actions
self.get_success_or_raise(
limiter.ratelimit(None, key=("test_id",), _time_now_s=1, burst_count=10)
)
def test_pruning(self):
limiter = Ratelimiter(
store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1
)
self.get_success_or_raise(
limiter.can_do_action(None, key="test_id_1", _time_now_s=0)
)
self.assertIn("test_id_1", limiter.actions)
self.get_success_or_raise(
limiter.can_do_action(None, key="test_id_2", _time_now_s=10)
)
self.assertNotIn("test_id_1", limiter.actions)
def test_db_user_override(self):
"""Test that users that have ratelimiting disabled in the DB aren't
ratelimited.
"""
store = self.hs.get_datastores().main
user_id = "@user:test"
requester = create_requester(user_id)
self.get_success(
store.db_pool.simple_insert(
table="ratelimit_override",
values={
"user_id": user_id,
"messages_per_second": None,
"burst_count": None,
},
desc="test_db_user_override",
)
)
limiter = Ratelimiter(store=store, clock=None, rate_hz=0.1, burst_count=1)
# Shouldn't raise
for _ in range(20):
self.get_success_or_raise(limiter.ratelimit(requester, _time_now_s=0))
def test_multiple_actions(self):
limiter = Ratelimiter(
store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3
)
# Test that 4 actions aren't allowed with a maximum burst of 3.
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(None, key="test_id", n_actions=4, _time_now_s=0)
)
self.assertFalse(allowed)
# Test that 3 actions are allowed with a maximum burst of 3.
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(None, key="test_id", n_actions=3, _time_now_s=0)
)
self.assertTrue(allowed)
self.assertEqual(10.0, time_allowed)
# Test that, after doing these 3 actions, we can't do any more action without
# waiting.
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(None, key="test_id", n_actions=1, _time_now_s=0)
)
self.assertFalse(allowed)
self.assertEqual(10.0, time_allowed)
# Test that after waiting we can do only 1 action.
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(
None,
key="test_id",
update=False,
n_actions=1,
_time_now_s=10,
)
)
self.assertTrue(allowed)
# The time allowed is the current time because we could still repeat the action
# once.
self.assertEqual(10.0, time_allowed)
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=10)
)
self.assertFalse(allowed)
# The time allowed doesn't change despite allowed being False because, while we
# don't allow 2 actions, we could still do 1.
self.assertEqual(10.0, time_allowed)
# Test that after waiting a bit more we can do 2 actions.
allowed, time_allowed = self.get_success_or_raise(
limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=20)
)
self.assertTrue(allowed)
# The time allowed is the current time because we could still repeat the action
# once.
self.assertEqual(20.0, time_allowed)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import optparse
import sys
from collections import defaultdict
from urlparse import urljoin
from twitter.common import log
from apache.aurora.common.pex_version import UnknownVersion, pex_version
from gen.apache.aurora.api.ttypes import ResponseCode
LOCKED_WARNING = """
Note: if the scheduler detects that a job update is in progress (or was not
properly completed) it will reject subsequent updates. This is because your
job is likely in a partially-updated state. You should only begin another
update if you are confident that nobody is updating this job, and that
the job is in a state suitable for an update.
After checking on the above, you may release the update lock on the job by
invoking cancel_update.
"""
def die(msg):
log.fatal(msg)
sys.exit(1)
def combine_messages(response):
"""Combines the message found in the details of a response.
:param response: response to extract messages from.
:return: Messages from the details in the response, or an empty string if there were no messages.
"""
return ', '.join([d.message or 'Unknown error' for d in (response.details or [])])
def format_response(resp):
return 'Response from scheduler: %s (message: %s)' % (
ResponseCode._VALUES_TO_NAMES[resp.responseCode], combine_messages(resp))
def check_and_log_response(resp):
log.info(format_response(resp))
if resp.responseCode != ResponseCode.OK:
if resp.responseCode == ResponseCode.LOCK_ERROR:
log.info(LOCKED_WARNING)
sys.exit(1)
def check_and_log_locked_response(resp):
if resp.responseCode == ResponseCode.LOCK_ERROR:
log.info(LOCKED_WARNING)
def deprecation_warning(text):
log.warning('')
log.warning('*' * 80)
log.warning('* The command you ran is deprecated and will soon break!')
for line in text.split('\n'):
log.warning('* %s' % line)
log.warning('*' * 80)
log.warning('')
class requires(object): # noqa
@classmethod
def wrap_function(cls, fn, fnargs, comparator):
@functools.wraps(fn)
def wrapped_function(args):
if not comparator(args, fnargs):
help = 'Incorrect parameters for %s' % fn.__name__
if fn.__doc__:
help = '%s\n\nsee the help subcommand for more details.' % fn.__doc__.split('\n')[0]
die(help)
return fn(*args)
return wrapped_function
@classmethod
def exactly(cls, *args):
def wrap(fn):
return cls.wrap_function(fn, args, (lambda want, got: len(want) == len(got)))
return wrap
@classmethod
def at_least(cls, *args):
def wrap(fn):
return cls.wrap_function(fn, args, (lambda want, got: len(want) >= len(got)))
return wrap
@classmethod
def nothing(cls, fn):
@functools.wraps(fn)
def real_fn(line):
return fn(*line)
return real_fn
def group_by_host(hostname):
return hostname
def no_grouping(hostname):
return '_all_hosts_'
DEFAULT_GROUPING = 'by_host'
GROUPING_FUNCTIONS = {
'by_host': group_by_host,
'none': no_grouping,
}
def add_grouping(name, function):
GROUPING_FUNCTIONS[name] = function
def remove_grouping(name):
GROUPING_FUNCTIONS.pop(name)
def get_grouping_or_die(grouping_function):
try:
return GROUPING_FUNCTIONS[grouping_function]
except KeyError:
die('Unknown grouping function %s. Must be one of: %s'
% (grouping_function, GROUPING_FUNCTIONS.keys()))
def group_hosts(hostnames, grouping_function=DEFAULT_GROUPING):
"""Place a list of hosts into batches to be operated upon.
By default, the grouping function is 'by host' which means that maintenance will
operate on a single hostname at a time. By adding more grouping functions,
a site can setup a customized way of specifying groups, such as operating on a single
rack of hosts at a time.
:param hostnames: Hostnames to break into groups
:type hostnames: list of host names, must match the host names that slaves are registered with
:param grouping_function: Key within GROUPING_FUNCTIONS to partition hosts into desired batches
:type grouping_function: string
:rtype: dictionary of batches
"""
grouping_function = get_grouping_or_die(grouping_function)
groups = defaultdict(set)
for hostname in hostnames:
groups[grouping_function(hostname)].add(hostname)
return groups
GROUPING_OPTION = optparse.Option(
'--grouping',
type='string',
metavar='GROUPING',
default=DEFAULT_GROUPING,
dest='grouping',
help='Grouping function to use to group hosts. Options: %s. Default: %%default' % (
', '.join(GROUPING_FUNCTIONS.keys())))
def synthesize_url(scheduler_url, role=None, env=None, job=None, update_id=None):
if not scheduler_url:
log.warning("Unable to find scheduler web UI!")
return None
if env and not role:
die('If env specified, must specify role')
if job and not (role and env):
die('If job specified, must specify role and env')
scheduler_url = urljoin(scheduler_url, 'scheduler')
if role:
scheduler_url += '/' + role
if env:
scheduler_url += '/' + env
if job:
scheduler_url += '/' + job
if update_id:
scheduler_url += '/update/' + update_id
return scheduler_url
def get_job_page(api, jobkey):
return synthesize_url(api.scheduler_proxy.scheduler_client().url, jobkey.role,
jobkey.env, jobkey.name)
def get_update_page(api, jobkey, update_id):
return synthesize_url(api.scheduler_proxy.scheduler_client().url, jobkey.role,
jobkey.env, jobkey.name, update_id)
AURORA_V2_USER_AGENT_NAME = 'Aurora V2'
AURORA_ADMIN_USER_AGENT_NAME = 'Aurora Admin'
UNKNOWN_CLIENT_VERSION = 'Unknown Version'
def user_agent(agent_name='Aurora'):
"""Generate a user agent containing the specified agent name and the details of the current
client version."""
try:
build_info = '%s-%s' % pex_version(sys.argv[0])
except UnknownVersion:
build_info = UNKNOWN_CLIENT_VERSION
return '%s;%s' % (agent_name, build_info)
| |
from builtins import zip
from builtins import range
from builtins import object
import re
import csv
import unicodecsv
from bs4 import BeautifulSoup
from openelex.base.load import BaseLoader
from openelex.models import RawResult
from openelex.lib.text import ocd_type_id, slugify
from .datasource import Datasource
class LoadResults(object):
"""Entry point for data loading.
Determines appropriate loader for file and triggers load process.
"""
def run(self, mapping):
election_id = mapping['election']
if '2002' in election_id:
loader = MDLoader2002()
elif '2000' in election_id and 'primary' in election_id:
loader = MDLoader2000Primary()
elif '2008' in election_id and 'special' in election_id:
loader = MDLoader2008Special()
else:
loader = MDLoader()
loader.run(mapping)
class CountyOCDMixin(object):
"""
Loader mixin that adds convenience method for generating county-level
OCD IDs
"""
def _get_county_ocd_id(self, jurisdiction):
"""
Build an OCD ID for a county-level jurisdiction when the mapping
reflects the state OCD ID.
"""
# Baltimore City is treated like a county in the results, but we
# should use the city's OCD ID
if jurisdiction == "Baltimore City":
ocd_id = "{}/place:baltimore".format(self.mapping['ocd_id'])
else:
ocd_id = "{}/county:{}".format(self.mapping['ocd_id'],
ocd_type_id(jurisdiction))
return ocd_id
class MDBaseLoader(CountyOCDMixin, BaseLoader):
datasource = Datasource()
target_offices = set([
'President - Vice Pres',
'President and Vice President of the United States',
'U.S. Senator',
'U.S. Congress',
'Representative in Congress',
'Governor / Lt. Governor',
'Comptroller',
'Attorney General',
'State Senator',
'House of Delegates',
])
district_offices = set([
'U.S. Congress',
'Representative in Congress',
'State Senator',
"House of Delegates",
])
def _skip_row(self, row):
"""
Should this row be skipped?
This should be implemented in subclasses.
"""
return False
class MDLoader(MDBaseLoader):
"""
Parse Maryland election results for the 2000 general election and
all elections after 2002.
"""
def load(self):
with self._file_handle as csvfile:
results = []
reader = unicodecsv.DictReader(csvfile)
for row in reader:
# Skip non-target offices
if self._skip_row(row):
continue
elif 'state_legislative' in self.source:
results.extend(self._prep_state_leg_results(row))
elif 'precinct' in self.source:
results.append(self._prep_precinct_result(row))
else:
results.append(self._prep_county_result(row))
RawResult.objects.insert(results)
def _skip_row(self, row):
if row['Office Name'] == None:
return True
return row['Office Name'].strip() not in self.target_offices
def _build_contest_kwargs(self, row, primary_type):
kwargs = {
'office': row['Office Name'].strip(),
'district': row['Office District'].strip(),
}
# Add party if it's a primary
#TODO: QUESTION: Should semi-closed also have party?
if primary_type == 'closed':
kwargs['primary_party'] = row['Party'].strip()
return kwargs
def _build_candidate_kwargs(self, row):
try:
full_name = row['Candidate Name'].strip()
except KeyError:
# 2000 results use "Candidate" for the column name
full_name = row['Candidate'].strip()
slug = slugify(full_name, substitute='-')
kwargs = {
'full_name': full_name,
#TODO: QUESTION: Do we need this? if so, needs a matching model field on RawResult
'name_slug': slug,
}
return kwargs
def _base_kwargs(self, row):
"Build base set of kwargs for RawResult"
# TODO: Can this just be called once?
kwargs = self._build_common_election_kwargs()
contest_kwargs = self._build_contest_kwargs(row, kwargs['primary_type'])
candidate_kwargs = self._build_candidate_kwargs(row)
kwargs.update(contest_kwargs)
kwargs.update(candidate_kwargs)
return kwargs
def _get_state_ocd_id(self):
"""
Get the state portion of the mapping's OCD ID
This is neccessary because the mappings for some files have OCD IDs
like 'ocd-division/country:us/state:md/sldl:all'. We need to extract
the state portion, 'ocd-division/country:us/state:md' to build OCD
IDs for lower jurisdictions.
"""
bits = []
state_bit = "state:"+ self.state
for bit in self.mapping['ocd_id'].split('/'):
bits.append(bit)
if bit == state_bit:
break
return '/'.join(bits)
def _prep_state_leg_results(self, row):
kwargs = self._base_kwargs(row)
kwargs.update({
'reporting_level': 'state_legislative',
'winner': row['Winner'].strip(),
'write_in': self._writein(row),
'party': row['Party'].strip(),
})
try:
kwargs['write_in'] = row['Write-In?'].strip() # at the contest-level
except KeyError as e:
pass
results = []
for field, val in list(row.items()):
clean_field = field.strip()
# Legislative fields prefixed with LEGS
if not clean_field.startswith('LEGS'):
continue
kwargs.update({
'jurisdiction': clean_field,
# Remove the "LEGS " from the ocd_id. This is a somewhat
# transformy action, but do it here in order to make the OCD IDs
# as usable as possible when we bake out raw results
'ocd_id': "{}/sldl:{}".format(self._get_state_ocd_id(),
ocd_type_id(clean_field.replace("LEGS ", ""))),
'votes': self._votes(val),
})
results.append(RawResult(**kwargs))
return results
def _prep_county_result(self, row):
kwargs = self._base_kwargs(row)
vote_brkdown_fields = [
('election_day', 'Election Night Votes'),
('absentee', 'Absentees Votes'),
('provisional', 'Provisional Votes'),
('second_absentee', '2nd Absentees Votes'),
]
vote_breakdowns = {}
for field, key in vote_brkdown_fields:
try:
vote_breakdowns[field] = self._votes(row[key].strip())
except KeyError:
pass
kwargs.update({
'reporting_level': 'county',
'jurisdiction': self.mapping['name'],
'ocd_id': self.mapping['ocd_id'],
'party': row['Party'].strip(),
'votes': self._votes(row['Total Votes']),
'vote_breakdowns': vote_breakdowns,
})
if (kwargs['office'] not in self.district_offices
and kwargs['district'] != ''):
kwargs['reporting_level'] = 'congressional_district_by_county'
kwargs['reporting_district'] = kwargs['district']
del kwargs['district']
return RawResult(**kwargs)
def _prep_precinct_result(self, row):
kwargs = self._base_kwargs(row)
precinct = "%s-%s" % (row['Election District'], row['Election Precinct'].strip())
ocd_id = "{}/precinct:{}".format(self.mapping['ocd_id'],
ocd_type_id(precinct))
kwargs.update({
'reporting_level': 'precinct',
'jurisdiction': precinct,
'parent_jurisdiction': self.mapping['name'],
'ocd_id': ocd_id,
'party': row['Party'].strip(),
'votes': self._votes(row['Election Night Votes']),
'votes_type': 'election_day',
'winner': row['Winner'],
'write_in': self._writein(row),
})
return RawResult(**kwargs)
def _votes(self, val):
"""
Returns cleaned version of votes or 0 if it's a non-numeric value.
"""
if val.strip() == '':
return 0
try:
return int(float(val))
except ValueError:
# Count'y convert value from string
return 0
def _writein(self, row):
# sometimes write-in field not present
try:
write_in = row['Write-In?'].strip()
except KeyError:
write_in = None
return write_in
class MDLoader2002(MDBaseLoader):
"""
Loads Maryland results for 2002.
Format:
Maryland results for 2002 are in a delimited text file where the delimiter
is '|'.
Fields:
0: Office
1: Office District - '-' is used to denote null values
2: County
3: Last Name - "zz998" is used for write-in candidates
4: Middle Name - "\\N" is used to denote null values
5: First Name - "Other Write-Ins" is used for write-in candidates
6: Party
7: Winner - Value is 0 or 1
8: UNKNOWN - Values are "(Vote for One)", "(Vote for No More Than Three)", etc.
9: Votes
10: UNKNOWN - Values are "\\N" for every row
Sample row:
House of Delegates |32 |Anne Arundel County |Burton |W. |Robert |Republican | 0|(Vote for No More Than Three) | 1494|\\N
Notes:
In the general election file, there are rows for judges and for
"Statewide Ballot Questions". The columns in these rows are shifted over,
but we can ignore these rows since we're not interested in these offices.
"""
def load(self):
headers = [
'office',
'district',
'jurisdiction',
'family_name',
'additional_name',
'given_name',
'party',
'winner',
'vote_type',
'votes',
'fill2'
]
self._common_kwargs = self._build_common_election_kwargs()
self._common_kwargs['reporting_level'] = 'county'
# Store result instances for bulk loading
results = []
with self._file_handle as csvfile:
reader = unicodecsv.DictReader(csvfile, fieldnames=headers, delimiter='|')
for row in reader:
if self._skip_row(row):
continue
rr_kwargs = self._common_kwargs.copy()
if rr_kwargs['primary_type'] == 'closed':
rr_kwargs['primary_party'] = row['party'].strip()
rr_kwargs.update(self._build_contest_kwargs(row))
rr_kwargs.update(self._build_candidate_kwargs(row))
jurisdiction = row['jurisdiction'].strip()
rr_kwargs.update({
'party': row['party'].strip(),
'jurisdiction': jurisdiction,
'ocd_id': self._get_county_ocd_id(jurisdiction),
'office': row['office'].strip(),
'district': row['district'].strip(),
'votes': int(row['votes'].strip()),
})
results.append(RawResult(**rr_kwargs))
RawResult.objects.insert(results)
def _skip_row(self, row):
return row['office'].strip() not in self.target_offices
def _build_contest_kwargs(self, row):
return {
'office': row['office'].strip(),
'district': row['district'].strip(),
}
def _build_candidate_kwargs(self, row):
return {
'family_name': row['family_name'].strip(),
'given_name': row['given_name'].strip(),
'additional_name': row['additional_name'].strip(),
}
class MDLoader2000Primary(MDBaseLoader):
office_choices = [
"President and Vice President of the United States",
"U.S. Senator",
"Representative in Congress",
"Judge of the Circuit Court",
"Female Delegates and Alternate to the Democratic National Convention",
"Female Delegates to the Democratic National Convention",
"Male Delegates to the Democratic National Convention",
"Male Delegates and Alternate to the Democratic National Convention",
"Delegates to the Republican National Convention",
]
def load(self):
candidates = {}
results = []
last_office = None
last_party = None
last_district = None
common_kwargs = self._build_common_election_kwargs()
with self._file_handle as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if not len(row):
continue # Skip blank lines
# determine if this is a row with an office
office, party, district = self._parse_header(row)
if office:
# It's a header row
if office in self.target_offices:
# It's an office we care about. Save the office and
# party for the next row
last_office = office
last_party = party
last_district = district
else:
last_office = None
last_party = None
last_district = None
elif last_office and row[0] == '':
# Candidate name row
candidates, winner_name = self._parse_candidates(row)
elif last_office: # has to be a county result
new_results = self._parse_results(row, last_office,
last_party, last_district,
candidates, winner_name, common_kwargs)
results.extend(new_results)
RawResult.objects.insert(results)
def _parse_header(self, row):
"""
Returns a tuple of office and party and congressional district
if the row is a header.
Returns (None, None, None) for a non-header row.
Note that the district doesn't represent the district of the office
"""
office = self._parse_office(row)
if office:
party = self._parse_party(row)
district = self._parse_district(row)
else:
party = None
district = None
return office, party, district
def _parse_office(self, row):
for o in self.office_choices:
if o in row[0]:
return o
return None
def _parse_party(self, row):
if 'Democratic' in row[0]:
return 'Democratic'
elif 'Republican' in row[0]:
return 'Republican'
else:
return None
def _parse_district(self, row):
if 'District' not in row[0]:
return None
return re.search(r'(\d+)', row[0]).groups(0)[0]
def _parse_candidates(self, row):
candidates = []
for col in row:
if col != '':
full_name = col.strip()
if 'Winner' in full_name:
# Trim winner from candidate name
full_name, remainder = full_name.split(' Winner')
winner = full_name
candidates.append(full_name)
return candidates, winner
# TODO: QUESTION: How to handle "Uncomitted to any ..." values
def _parse_results(self, row, office, party, district, candidates,
winner_name, common_kwargs):
results = []
cols = [x.strip() for x in row if x != '']
county = cols[0].strip()
cand_results = list(zip(candidates, cols[1:]))
for cand, votes in cand_results:
result_kwargs = common_kwargs.copy()
result_kwargs.update({
'jurisdiction': county,
'ocd_id': self._get_county_ocd_id(county),
'office': office,
'party': party,
'full_name': cand,
'votes': int(votes),
})
if result_kwargs['primary_type'] == 'closed':
result_kwargs['primary_party'] = party
if office == "Representative in Congress":
# In the case of U.S. representatives, the district represents
# the office district. In all other cases, it just
# represents the level of result aggregation.
result_kwargs['district'] = district
if cand == winner_name:
result_kwargs['winner'] = 'Winner'
# Try to figure out if this is a case where results are
# provided by congressional district split by county and
# record this.
result_kwargs['reporting_level'] = self._get_reporting_level(district)
if result_kwargs['reporting_level'] == 'congressional_district_by_county':
result_kwargs['reporting_district'] = district
results.append(RawResult(**result_kwargs))
return results
def _get_reporting_level(self, district):
"""
Returns the reporting level based on the value of the results' district.
This deals with the way in which results for 2000 primaries are
returned broken down by both congressional district, split by county.
"""
if district:
return "congressional_district_by_county"
else:
return "county"
class MDLoader2008Special(CountyOCDMixin, BaseLoader):
"""
Loader for the Maryland 2008 4th Congressional District Special election results
"""
datasource = Datasource()
def load(self):
table = self._get_html_table()
rows = self._parse_html_table(table)
winner_name = self._parse_winner_name(table)
candidate_attrs = self._parse_candidates_and_parties(rows[0],
winner_name)
results = self._parse_results(rows[1:3], candidate_attrs)
RawResult.objects.insert(results)
def _get_html_table(self):
soup = BeautifulSoup(self._file_handle, 'html.parser')
return soup.find(text=re.compile("Donna Edwards")).parent.parent.parent
def _parse_html_table(self, table):
rows = []
for tr in table.find_all('tr'):
rows.append(self._parse_html_table_row(tr))
return rows
def _parse_html_table_row(self, tr):
row = []
cells = tr.find_all('th') + tr.find_all('td')
for cell in cells:
row.append(cell.text.strip())
return row
def _parse_winner_name(self, table):
cell = table.select('th > img')[0].parent
return self._parse_name(cell.text.strip())
def _parse_candidates_and_parties(self, row, winner_name):
candidate_attrs = []
for cell in row[1:]:
# Skip the first cell. It's a header, "County"
attrs = {
'full_name': self._parse_name(cell),
'party': self._parse_party(cell),
'write_in': self._parse_write_in(cell),
}
if attrs['full_name'] == winner_name:
attrs['contest_winner'] = True
candidate_attrs.append(attrs)
return candidate_attrs
def _parse_name(self, s):
if s == "Other Write-Ins":
return s
# We know that all the candidate names are just first and last names
bits = re.split(r'\s', s)
return ' '.join(bits[:2])
def _parse_party(self, s):
if s == "Other Write-Ins":
return None
bits = re.split(r'\s', s)
return bits[2]
def _parse_write_in(self, s):
if s == "Other Write-Ins":
return s
elif "Write-In" in s:
return "Write-In"
else:
return ""
def _parse_results(self, rows, candidate_attrs):
# These raw result attributes will be the same for every result.
common_kwargs = self._build_common_election_kwargs()
common_kwargs.update({
'office': "Representative in Congress",
'district': '4',
'reporting_level': "county",
})
results = []
for row in rows:
county = row[0]
for i in range(1, len(row)):
kwargs = common_kwargs.copy()
kwargs.update(candidate_attrs[i-1])
kwargs['jurisdiction'] = county
kwargs['ocd_id'] = self._get_county_ocd_id(county)
kwargs['votes'] = self._parse_votes(row[i])
results.append(RawResult(**kwargs))
return results
def _parse_votes(self, s):
return int(s.split(' ')[0].replace(',', ''))
| |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""This module contains Trait classes that we've pulled from the
traits source and fixed due to various bugs. File and Directory are
redefined as the release version had dependencies on TraitsUI, which
we do not want Nipype to depend on. At least not yet.
Undefined class was missing the __len__ operator, causing edit_traits
and configure_traits to fail on List objects. Even though we don't
require TraitsUI, this bug was the only thing preventing us from
popping up GUIs which users like.
These bugs have been in Traits v3.3.0 and v3.2.1. We have reported
all of these bugs and they've been fixed in enthought svn repository
(usually by Robert Kern).
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import filter, object, str, bytes
import os
# perform all external trait imports here
import traits
if traits.__version__ < '3.7.0':
raise ImportError('Traits version 3.7.0 or higher must be installed')
import traits.api as traits
from traits.trait_handlers import TraitDictObject, TraitListObject
from traits.trait_errors import TraitError
from traits.trait_base import _Undefined, class_of
from traits.api import BaseUnicode
from traits.api import Unicode
DictStrStr = traits.Dict((bytes, str), (bytes, str))
Str = Unicode
class BaseFile(BaseUnicode):
""" Defines a trait whose value must be the name of a file.
"""
# A description of the type of value this trait accepts:
info_text = 'a file name'
def __init__(self, value='', filter=None, auto_set=False,
entries=0, exists=False, **metadata):
""" Creates a File trait.
Parameters
----------
value : string
The default value for the trait
filter : string
A wildcard string to filter filenames in the file dialog box used by
the attribute trait editor.
auto_set : boolean
Indicates whether the file editor updates the trait value after
every key stroke.
exists : boolean
Indicates whether the trait value must be an existing file or
not.
Default Value
-------------
*value* or ''
"""
self.filter = filter
self.auto_set = auto_set
self.entries = entries
self.exists = exists
if exists:
self.info_text = 'an existing file name'
super(BaseFile, self).__init__(value, **metadata)
def validate(self, object, name, value):
""" Validates that a specified value is valid for this trait.
Note: The 'fast validator' version performs this check in C.
"""
validated_value = super(BaseFile, self).validate(object, name, value)
if not self.exists:
return validated_value
elif os.path.isfile(value):
return validated_value
else:
raise TraitError(
args='The trait \'{}\' of {} instance is {}, but the path '
' \'{}\' does not exist.'.format(name, class_of(object),
self.info_text, value))
self.error(object, name, value)
class File (BaseFile):
"""
Defines a trait whose value must be the name of a file.
Disables the default C-level fast validator.
"""
def __init__(self, value='', filter=None, auto_set=False,
entries=0, exists=False, **metadata):
""" Creates a File trait.
Parameters
----------
value : string
The default value for the trait
filter : string
A wildcard string to filter filenames in the file dialog box used by
the attribute trait editor.
auto_set : boolean
Indicates whether the file editor updates the trait value after
every key stroke.
exists : boolean
Indicates whether the trait value must be an existing file or
not.
Default Value
-------------
*value* or ''
"""
# if not exists:
# # Define the C-level fast validator to use:
# fast_validate = (11, str)
super(File, self).__init__(value, filter, auto_set, entries, exists,
**metadata)
# -------------------------------------------------------------------------------
# 'BaseDirectory' and 'Directory' traits:
# -------------------------------------------------------------------------------
class BaseDirectory (BaseUnicode):
"""
Defines a trait whose value must be the name of a directory.
"""
# A description of the type of value this trait accepts:
info_text = 'a directory name'
def __init__(self, value='', auto_set=False, entries=0,
exists=False, **metadata):
""" Creates a BaseDirectory trait.
Parameters
----------
value : string
The default value for the trait
auto_set : boolean
Indicates whether the directory editor updates the trait value
after every key stroke.
exists : boolean
Indicates whether the trait value must be an existing directory or
not.
Default Value
-------------
*value* or ''
"""
self.entries = entries
self.auto_set = auto_set
self.exists = exists
if exists:
self.info_text = 'an existing directory name'
super(BaseDirectory, self).__init__(value, **metadata)
def validate(self, object, name, value):
""" Validates that a specified value is valid for this trait.
Note: The 'fast validator' version performs this check in C.
"""
if isinstance(value, (str, bytes)):
if not self.exists:
return value
if os.path.isdir(value):
return value
else:
raise TraitError(
args='The trait \'{}\' of {} instance is {}, but the path '
' \'{}\' does not exist.'.format(name,
class_of(object), self.info_text, value))
self.error(object, name, value)
class Directory (BaseDirectory):
"""
Defines a trait whose value must be the name of a directory.
Disables the default C-level fast validator.
"""
def __init__(self, value='', auto_set=False, entries=0,
exists=False, **metadata):
""" Creates a Directory trait.
Parameters
----------
value : string
The default value for the trait
auto_set : boolean
Indicates whether the directory editor updates the trait value
after every key stroke.
exists : boolean
Indicates whether the trait value must be an existing directory or
not.
Default Value
-------------
*value* or ''
"""
# Define the C-level fast validator to use if the directory existence
# test is not required:
# if not exists:
# self.fast_validate = (11, str)
super(Directory, self).__init__(value, auto_set, entries, exists,
**metadata)
# lists of tuples
# each element consists of :
# - uncompressed (tuple[0]) extension
# - compressed (tuple[1]) extension
img_fmt_types = {
'nifti1': [('.nii', '.nii.gz'),
(('.hdr', '.img'), ('.hdr', '.img.gz'))],
'mgh': [('.mgh', '.mgz'), ('.mgh', '.mgh.gz')],
'nifti2': [('.nii', '.nii.gz')],
'cifti2': [('.nii', '.nii.gz')],
'gifti': [('.gii', '.gii.gz')],
'dicom': [('.dcm', '.dcm'), ('.IMA', '.IMA'), ('.tar', '.tar.gz')],
'nrrd': [('.nrrd', 'nrrd'), ('nhdr', 'nhdr')],
'afni': [('.HEAD', '.HEAD'), ('.BRIK', '.BRIK')]
}
class ImageFile(File):
""" Defines a trait of specific neuroimaging files """
def __init__(self, value='', filter=None, auto_set=False, entries=0,
exists=False, types=[], allow_compressed=True, **metadata):
""" Trait handles neuroimaging files.
Parameters
----------
types : list
Strings of file format types accepted
compressed : boolean
Indicates whether the file format can compressed
"""
self.types = types
self.allow_compressed = allow_compressed
super(ImageFile, self).__init__(value, filter, auto_set, entries,
exists, **metadata)
def grab_exts(self):
# TODO: file type validation
exts = []
for fmt in self.types:
if fmt in img_fmt_types:
exts.extend(sum([[u for u in y[0]] if isinstance(y[0], tuple)
else [y[0]] for y in img_fmt_types[fmt]], []))
if self.allow_compressed:
exts.extend(sum([[u for u in y[-1]] if isinstance(y[-1],
tuple) else [y[-1]] for y in img_fmt_types[fmt]], []))
else:
raise AttributeError('Information has not been added for format'
' type {} yet. Supported formats include: '
'{}'.format(fmt,
', '.join(img_fmt_types.keys())))
return list(set(exts))
def validate(self, object, name, value):
""" Validates that a specified value is valid for this trait.
"""
validated_value = super(ImageFile, self).validate(object, name, value)
if validated_value and self.types:
self._exts = self.grab_exts()
if not any(validated_value.endswith(x) for x in self._exts):
raise TraitError(
args="{} is not included in allowed types: {}".format(
validated_value, ', '.join(self._exts)))
return validated_value
"""
The functions that pop-up the Traits GUIs, edit_traits and
configure_traits, were failing because all of our inputs default to
Undefined deep and down in traits/ui/wx/list_editor.py it checks for
the len() of the elements of the list. The _Undefined class in traits
does not define the __len__ method and would error. I tried defining
our own Undefined and even sublassing Undefined, but both of those
failed with a TraitError in our initializer when we assign the
Undefined to the inputs because of an incompatible type:
TraitError: The 'vertical_gradient' trait of a BetInputSpec instance must be a float, but a value of <undefined> <class 'nipype.interfaces.traits._Undefined'> was specified.
So... in order to keep the same type but add the missing method, I
monkey patched.
"""
def length(self):
return 0
##########################################################################
# Apply monkeypatch here
_Undefined.__len__ = length
##########################################################################
Undefined = _Undefined()
def isdefined(object):
return not isinstance(object, _Undefined)
def has_metadata(trait, metadata, value=None, recursive=True):
'''
Checks if a given trait has a metadata (and optionally if it is set to particular value)
'''
count = 0
if hasattr(trait, "_metadata") and metadata in list(trait._metadata.keys()) and (trait._metadata[metadata] == value or value is None):
count += 1
if recursive:
if hasattr(trait, 'inner_traits'):
for inner_trait in trait.inner_traits():
count += has_metadata(inner_trait.trait_type, metadata, recursive)
if hasattr(trait, 'handlers') and trait.handlers is not None:
for handler in trait.handlers:
count += has_metadata(handler, metadata, recursive)
return count > 0
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class RouteTablesOperations(object):
"""RouteTablesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-11-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-11-01"
self.config = config
def _delete_initial(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, route_table_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def get(
self, resource_group_name, route_table_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: RouteTable or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_11_01.models.RouteTable or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def _create_or_update_initial(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'RouteTable')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, route_table_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route
table operation.
:type parameters: ~azure.mgmt.network.v2017_11_01.models.RouteTable
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns RouteTable or
ClientRawResponse<RouteTable> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.RouteTable]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_11_01.models.RouteTable]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def _update_tags_initial(
self, resource_group_name, route_table_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, route_table_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns RouteTable or
ClientRawResponse<RouteTable> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_11_01.models.RouteTable]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_11_01.models.RouteTable]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('RouteTable', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteTable
:rtype:
~azure.mgmt.network.v2017_11_01.models.RouteTablePaged[~azure.mgmt.network.v2017_11_01.models.RouteTable]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all route tables in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of RouteTable
:rtype:
~azure.mgmt.network.v2017_11_01.models.RouteTablePaged[~azure.mgmt.network.v2017_11_01.models.RouteTable]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.RouteTablePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.RouteTablePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'}
| |
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
#
# Ripped off from Nova's test_migrations.py
# The only difference between Nova and this code is usage of alembic instead
# of sqlalchemy migrations.
#
# There is an ongoing work to extact similar code to oslo incubator. Once it is
# extracted we'll be able to remove this file and use oslo.
import io
import os
from alembic import command
from alembic import config as alembic_config
from alembic import migration
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
import six
from six.moves import configparser
import six.moves.urllib.parse as urlparse
from storyboard.db import api as db_api
import storyboard.db.migration
from storyboard.tests import base
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
synchronized = lockutils.synchronized_with_prefix('storyboard-')
def _get_connect_string(backend, user, passwd, database):
"""Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "mysql":
backend = "mysql+pymysql"
else:
raise Exception("Unrecognized backend: '%s'" % backend)
return ("%s://%s:%s@localhost/%s" % (backend, user, passwd, database))
def _is_backend_avail(backend, user, passwd, database):
try:
connect_uri = _get_connect_string(backend, user, passwd, database)
CONF.database.connection = connect_uri
engine = db_api.get_engine()
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def _have_mysql(user, passwd, database):
present = os.environ.get('STORYBOARD_MYSQL_PRESENT')
if present is None:
return _is_backend_avail('mysql', user, passwd, database)
return present.lower() in ('', 'true')
def get_mysql_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
if auth_pieces[1].strip():
password = "-p\"%s\"" % auth_pieces[1]
return (user, password, database, host)
class CommonTestsMixIn(object):
"""These tests are shared between TestStoryboardMigrations and
TestBaremetalMigrations.
BaseMigrationTestCase is effectively an abstract class, meant to be derived
from and not directly tested against; that's why these `test_` methods need
to be on a Mixin, so that they won't be picked up as valid tests for
BaseMigrationTestCase.
"""
def test_walk_versions(self):
for key, engine in self.engines.items():
# We start each walk with a completely blank slate.
self._reset_database(key)
self._walk_versions(engine, self.snake_walk, self.downgrade)
def test_mysql_opportunistically(self):
self._test_mysql_opportunistically()
def test_mysql_connect_fail(self):
"""Test that we can trigger a mysql connection failure and we fail
gracefully to ensure we don't break people without mysql
"""
if _is_backend_avail('mysql', "openstack_cifail", self.PASSWD,
self.DATABASE):
self.fail("Shouldn't have connected")
class BaseMigrationTestCase(base.TestCase):
"""Base class for testing migrations and migration utils. This sets up
and configures the databases to run tests against.
"""
# NOTE(jhesketh): It is expected that tests clean up after themselves.
# This is necessary for concurrency to allow multiple tests to work on
# one database.
# The full migration walk tests however do call the old _reset_databases()
# to throw away whatever was there so they need to operate on their own
# database that we know isn't accessed concurrently.
# Hence, BaseWalkMigrationTestCase overwrites the engine list.
USER = None
PASSWD = None
DATABASE = None
TIMEOUT_SCALING_FACTOR = 2
def __init__(self, *args, **kwargs):
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
self.DEFAULT_CONFIG_FILE = os.path.join(
os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the STORYBOARD_TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
self.CONFIG_FILE_PATH = os.environ.get(
'STORYBOARD_TEST_MIGRATIONS_CONF',
self.DEFAULT_CONFIG_FILE)
self.ALEMBIC_CONFIG = alembic_config.Config(
os.path.join(os.path.dirname(storyboard.db.migration.__file__),
'alembic.ini')
)
self.ALEMBIC_CONFIG.storyboard_config = CONF
self.snake_walk = False
self.downgrade = False
self.test_databases = {}
self.migration = None
self.migration_api = None
def setUp(self):
super(BaseMigrationTestCase, self).setUp()
self._load_config()
def _load_config(self):
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
cp = configparser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
config = cp.options('unit_tests')
for key in config:
self.test_databases[key] = cp.get('unit_tests', key)
self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
self.downgrade = cp.getboolean('walk_style', 'downgrade')
except configparser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = db_api.get_engine()
# NOTE(jhesketh): We only need to make sure the databases are created
# not necessarily clean of tables.
self._create_databases()
def execute_cmd(self, cmd=None):
out, err = processutils.trycmd(cmd, shell=True, discard_warnings=True)
output = out or err
LOG.debug(output)
self.assertEqual('', err,
"Failed to run: %s\n%s" % (cmd, output))
@synchronized('mysql', external=True)
def _reset_mysql(self, conn_pieces):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \
get_mysql_connection_info(conn_pieces)
sql = ("drop database if exists %(database)s; "
"create database %(database)s;" % {'database': database})
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
"-e \"%(sql)s\"" % {'user': user, 'password': password,
'host': host, 'sql': sql})
self.execute_cmd(cmd)
@synchronized('sqlite', external=True)
def _reset_sqlite(self, conn_pieces):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
def _create_databases(self):
"""Create all configured databases as needed."""
for key, engine in self.engines.items():
self._create_database(key)
def _create_database(self, key):
"""Create database if it doesn't exist."""
conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
if conn_string.startswith('mysql'):
(user, password, database, host) = \
get_mysql_connection_info(conn_pieces)
sql = "create database if not exists %s;" % database
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
"-e \"%(sql)s\"" % {'user': user, 'password': password,
'host': host, 'sql': sql})
self.execute_cmd(cmd)
def _reset_databases(self):
"""Reset all configured databases."""
for key, engine in self.engines.items():
self._reset_database(key)
def _reset_database(self, key):
"""Reset specific database."""
engine = self.engines[key]
conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
self._reset_sqlite(conn_pieces)
elif conn_string.startswith('mysql'):
self._reset_mysql(conn_pieces)
class BaseWalkMigrationTestCase(BaseMigrationTestCase):
"""BaseWalkMigrationTestCase loads in an alternative set of databases for
testing against. This is necessary as the default databases can run tests
concurrently without interfering with itself. It is expected that
databases listed under [migraiton_dbs] in the configuration are only being
accessed by one test at a time. Currently only test_walk_versions accesses
the databases (and is the only method that calls _reset_database() which
is clearly problematic for concurrency).
"""
def _load_config(self):
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
cp = configparser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
config = cp.options('migration_dbs')
for key in config:
self.test_databases[key] = cp.get('migration_dbs', key)
self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
self.downgrade = cp.getboolean('walk_style', 'downgrade')
except configparser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = db_api.get_engine()
self._create_databases()
def _configure(self, engine):
"""For each type of repository we should do some of configure steps.
For migrate_repo we should set under version control our database.
For alembic we should configure database settings. For this goal we
should use oslo_config and openstack.commom.db.sqlalchemy.session with
database functionality (reset default settings and session cleanup).
"""
CONF.set_override('connection', six.text_type(engine.url),
group='database')
db_api.cleanup()
def _test_mysql_opportunistically(self):
# Test that table creation on mysql only builds InnoDB tables
if not _have_mysql(self.USER, self.PASSWD, self.DATABASE):
self.skipTest("mysql not available")
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string("mysql", self.USER, self.PASSWD,
self.DATABASE)
(user, password, database, host) = \
get_mysql_connection_info(urlparse.urlparse(connect_string))
engine = db_api.get_engine()
self.engines[database] = engine
self.test_databases[database] = connect_string
# build a fully populated mysql database with all the tables
self._reset_database(database)
self._walk_versions(engine, self.snake_walk, self.downgrade)
connection = engine.connect()
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='%(database)s'" %
{'database': database})
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
connection.close()
del(self.engines[database])
del(self.test_databases[database])
def _alembic_command(self, alembic_command, engine, *args, **kwargs):
"""Most of alembic command return data into output.
We should redefine this setting for getting info.
"""
self.ALEMBIC_CONFIG.stdout = buf = io.StringIO()
CONF.set_override('connection', six.text_type(engine.url),
group='database')
db_api.cleanup()
getattr(command, alembic_command)(*args, **kwargs)
res = buf.getvalue().strip()
LOG.debug('Alembic command `%s` returns: %s' % (alembic_command, res))
db_api.cleanup()
return res
def _get_alembic_versions(self, engine):
"""For support of full testing of migrations
we should have an opportunity to run command step by step for each
version in repo. This method returns list of alembic_versions by
historical order.
"""
full_history = self._alembic_command('history',
engine, self.ALEMBIC_CONFIG)
# The piece of output data with version can looked as:
# 'Rev: 17738166b91 (head)' or 'Rev: 43b1a023dfaa'
alembic_history = [r.split(' ')[1] for r in full_history.split("\n")
if r.startswith("Rev")]
alembic_history.reverse()
return alembic_history
def _up_and_down_versions(self, engine):
"""Since alembic version has a random algorithm of generation
(SA-migrate has an ordered autoincrement naming) we should store
a tuple of versions (version for upgrade and version for downgrade)
for successful testing of migrations in up>down>up mode.
"""
versions = self._get_alembic_versions(engine)
return zip(versions, ['-1'] + versions)
def _walk_versions(self, engine=None, snake_walk=False,
downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
self._configure(engine)
up_and_down_versions = self._up_and_down_versions(engine)
for ver_up, ver_down in up_and_down_versions:
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, ver_up, with_data=True)
if snake_walk:
downgraded = self._migrate_down(engine,
ver_down,
with_data=True,
next_version=ver_up)
if downgraded:
self._migrate_up(engine, ver_up)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
up_and_down_versions.reverse()
for ver_up, ver_down in up_and_down_versions:
# downgrade -> upgrade -> downgrade
downgraded = self._migrate_down(engine,
ver_down, next_version=ver_up)
if snake_walk and downgraded:
self._migrate_up(engine, ver_up)
self._migrate_down(engine, ver_down, next_version=ver_up)
def _get_version_from_db(self, engine):
"""For each type of migrate repo latest version from db
will be returned.
"""
conn = engine.connect()
try:
context = migration.MigrationContext.configure(conn)
version = context.get_current_revision() or '-1'
finally:
conn.close()
return version
def _migrate(self, engine, version, cmd):
"""Base method for manipulation with migrate repo.
It will upgrade or downgrade the actual database.
"""
self._alembic_command(cmd, engine, self.ALEMBIC_CONFIG, version)
def _migrate_down(self, engine, version, with_data=False,
next_version=None):
try:
self._migrate(engine, version, 'downgrade')
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(version, self._get_version_from_db(engine))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%s" % next_version, None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
check_version = version
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%s" % check_version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self._migrate(engine, version, 'upgrade')
self.assertEqual(version, self._get_version_from_db(engine))
if with_data:
check = getattr(self, "_check_%s" % check_version, None)
if check:
check(engine, data)
except Exception:
LOG.error("Failed to migrate to version %s on engine %s" %
(version, engine))
raise
| |
import struct
from .. import base58
from .basemonitor import BaseMonitor
from ..keys import PrivateKey, PublicKey
from ..script import *
from ..serialize import Serialize
from ..transaction import TransactionInput, TransactionOutput, TransactionPrevOut
from ..util import *
from ..wallet import InvalidAddress, Spend
class MultisigScriptHashSpendInputCreator:
'''Input creators need to define the following class properties:
self.prevout : a TransactionPrevOut
self.script : a byte sequence containing scriptPubKey
self.sequence: the sequence number of the final TransactionInput
self.hash_flags: the flags used for hashing and signing
Everything else can be class-specific, but the above are used for serialization and signing
'''
def __init__(self, spv, prevout, script, sequence, address_info, hash_flags):
self.spv = spv
self.prevout = prevout
self.sequence = sequence
self.address_info = address_info
self.hash_flags = hash_flags
# P2SH signs the redemption script, not the scriptPubKey
self.script_p2sh = script
self.script = hexstring_to_bytes(address_info['redemption_script'], reverse=False)
def create_tx_input(self, hash_for_signature, flags):
script = Script()
script.push_op(OP_0)
n = 0
for public_key_bytes in self.address_info['public_keys']:
if n >= self.address_info['nreq']:
break
public_key_metadata = self.spv.wallet.get_temp('public_key', PublicKey(public_key_bytes))
if public_key_metadata is not None and 'private_key' in public_key_metadata:
private_key = public_key_metadata['private_key']
signature = private_key.sign(hash_for_signature)
script.push_bytes(signature + bytes([flags]))
n += 1
if n < self.address_info['nreq']:
raise Exception("signature error: not enough signatures for Multisignature Spend")
script.push_bytes(self.script)
return TransactionInput(prevout=self.prevout, script=script)
def estimated_script_size(self):
# signatures are at most 73 bytes, there are nreq of them
# plus 1 byte for the signature hash type on each sig
# plus probably 2 bytes for the redemption script size, plus the redemption script iself
return (73 + 1) * self.address_info['nreq'] + 2 + len(self.script)
class MultisigScriptHashSpend(Spend):
def __init__(self, coin, category, amount, address, prevout, script, address_info, spent_in=None):
Spend.__init__(self, coin, category, amount)
self.prevout = prevout
self.script = script
self.address = address
self.address_info = address_info
self.spent_in = set([] if spent_in is None else spent_in)
def hash(self):
'''one spend is equal to another only based on the prevout value'''
return self.coin.hash(self.prevout.serialize())
def is_spent(self, spv):
return any(not spv.txdb.is_conflicted(tx_hash) for tx_hash in self.spent_in)
def is_spendable(self, spv):
return not self.is_spent(spv) and self.get_confirmations(spv) >= self.coin.TRANSACTION_CONFIRMATION_DEPTH and self.has_signing_keys(spv)
def has_signing_keys(self, spv):
n = 0
for public_key_bytes in self.address_info['public_keys']:
public_key = PublicKey(public_key_bytes)
public_key_metadata = spv.wallet.get_temp('public_key', public_key)
if public_key_metadata is not None and 'private_key' in public_key_metadata:
n += 1
return n >= self.address_info['nreq']
def get_confirmations(self, spv):
return spv.txdb.get_tx_depth(self.prevout.tx_hash)
def create_input_creators(self, spv, hash_flags):
pksic = MultisigScriptHashSpendInputCreator(spv, self.prevout, self.script, 0xffffffff, self.address_info, hash_flags)
return [pksic]
def serialize(self):
return Serialize.serialize_string(self.category) + Serialize.serialize_variable_int(self.amount) + \
self.prevout.serialize() + Serialize.serialize_string(self.address) + \
struct.pack('<L', len(self.script)) + self.script + \
Serialize.serialize_dict(self.address_info) + \
Serialize.serialize_list(list(self.spent_in))
@staticmethod
def unserialize(data, coin):
category, data = Serialize.unserialize_string(data)
amount, data = Serialize.unserialize_variable_int(data)
prevout, data = TransactionPrevOut.unserialize(data)
address, data = Serialize.unserialize_string(data)
script_length = struct.unpack("<L", data[:4])[0]
script = data[4:4+script_length]
address_info, data = Serialize.unserialize_dict(data[4+script_length:])
spent_in, data = Serialize.unserialize_list(data)
spends = MultisigScriptHashSpend(coin, category, amount, address, prevout, script, address_info, spent_in=spent_in)
return spends, data
def __str__(self):
return '<MultisigScriptHashSpend {} BTC prevout={} address={}{}>'.format(self.coin.format_money(self.amount), str(self.prevout), self.address, ' SPENT' if len(self.spent_in) else '')
class MultisigScriptHashPaymentMonitor(BaseMonitor):
spend_classes = [MultisigScriptHashSpend]
def __init__(self, spv):
BaseMonitor.__init__(self, spv)
self.spend_by_prevout = {}
self.script_addresses = {}
def on_new_spend(self, spend):
# We only care about multisig spends
if not isinstance(spend, MultisigScriptHashSpend):
return
# Save spend to check if it gets spent
self.spend_by_prevout[spend.prevout] = spend
def on_new_redemption_script(self, redemption_script, metadata):
# parse redemption_script to verify it's a multisig redemption script
if len(redemption_script) < 3 or redemption_script[-1] != OP_CHECKMULTISIG:
return
if redemption_script[0] == OP_0:
nreq = 0
else:
nreq = redemption_script[0] - OP_1 + 1
if nreq < 0 or nreq > 9:
return
# Get the pubkeys out of the script
index = 1
public_keys = []
while index < len(redemption_script) - 2:
size = redemption_script[index]
if size not in (33, 65): # not a public key, too bad
return
if len(redemption_script) - (index + 1) < size:
return
public_keys.append(redemption_script[index+1:index+1+size])
if (size == 33 and (public_keys[-1][0] not in (0x02, 0x03))) or (size == 65 and public_keys[-1][0] != 0x04):
return
index += size + 1
if (redemption_script[-2] - OP_1 + 1) != len(public_keys):
return
address = base58_check(self.spv.coin, self.spv.coin.hash160(redemption_script), version_bytes=self.spv.coin.P2SH_ADDRESS_VERSION_BYTES)
# TODO on_public_key could check if any of our redemption_scripts reference that pubkey and if a private key is available for signing, etc
self.script_addresses[address] = {
'address' : address,
'redemption_script': bytes_to_hexstring(redemption_script, reverse=False),
'nreq' : nreq,
'public_keys' : public_keys,
}
self.spv.wallet.add_temp('address', address, {'redemption_script': redemption_script})
if self.spv.logging_level <= DEBUG:
print('[MULTISIGSCRIPTHASHPAYMENTMONITOR] watching for multi-signature payment to {}'.format(address))
print('[MULTISIGSCRIPTHASHPAYMENTMONITOR] {} of {} public_keys: {}'.format(nreq, len(public_keys), ', '.join(bytes_to_hexstring(public_key, reverse=False) for public_key in public_keys)))
def on_tx(self, tx):
tx_hash = tx.hash()
# check inputs, they might spend coins from the wallet
for i, input in enumerate(tx.inputs):
spend = self.spend_by_prevout.get(input.prevout, None)
if spend is not None:
# Have we've seen this spend before?
if tx_hash in spend.spent_in:
continue
# Update this Spend with a new spend tx
spend.spent_in.add(tx_hash)
self.spv.wallet.update_spend(spend)
if self.spv.logging_level <= INFO:
print('[MULTISIGSCRIPTHASHPAYMENTMONITOR] tx {} spends {} amount={}'.format(bytes_to_hexstring(tx_hash), input.prevout, self.spv.coin.format_money(spend.amount)))
continue
# check this input and if it's a multisig p2sh spend (OP_0 <sig> .. <sig> <redemption_script>) and check to see if
# the redemption script is in our wallet. if it is, remember this spend for later.
if len(input.script.program) == 0 or input.script.program[0] != OP_0:
continue
# Break the program into data pushes... TODO: move this to script.py
index = 1
pushes = []
while index < len(input.script.program):
size = input.script.program[index]
if size == OP_PUSHDATA1 and (index+1) < len(input.script.program):
size = input.script.program[index+1]
index += 2
elif size == OP_PUSHDATA2 and (index+2) < len(input.script.program):
size = input.script.program[index+1] | (input.script.program[index+2] << 8)
index += 3
elif size == OP_PUSHDATA4 and (index+4) < len(input.script.program):
size = input.script.program[index+1] | (input.script.program[index+2] << 8) | (input.script.program[index+3] << 16) | (input.script.program[index+4] << 24)
index += 5
pushes.append(input.script.program[index:index+size])
index += size
# The last data push has to be our redemption script
if len(pushes) == 0:
continue
redemption_script = pushes[-1]
address = base58_check(self.spv.coin, self.spv.coin.hash160(redemption_script), version_bytes=self.spv.coin.P2SH_ADDRESS_VERSION_BYTES)
address_info = self.script_addresses.get(address, None)
if address_info is None:
continue
# Yes, be sure to save the tx
self.spv.txdb.save_tx(tx)
# Add this spending transaction to the list of spent_in transaction ids for use whenever the payment is received
unknown_redemption_script_spend_key = (input.prevout.tx_hash, input.prevout.n)
unknown_redemption_script_spend_metadata = self.spv.wallet.get('unknown_redemption_script_spends', unknown_redemption_script_spend_key)
if unknown_redemption_script_spend_metadata is not None:
unknown_redemption_script_spend_metadata['spent_in'].append(tx_hash)
self.spv.wallet.update('unknown_redemption_script_spends', unknown_redemption_script_spend_key, unknown_redemption_script_spend_metadata)
else:
unknown_redemption_script_spend_metadata = {'spent_in': [tx_hash]}
self.spv.wallet.add('unknown_redemption_script_spends', unknown_redemption_script_spend_key, unknown_redemption_script_spend_metadata)
if self.spv.logging_level <= DEBUG:
print('[MULTISIGSCRIPTHASHPAYMENTMONITOR] tx {} spends {} from our wallet but we dont know the spend yet!'.format(bytes_to_hexstring(tx_hash), input.prevout))
for i, output in enumerate(tx.outputs):
# Analyze the script for P2SH
script = output.script.program
if len(script) == 23 and script[0] == OP_HASH160 and script[1] == 20 and script[-1] == OP_EQUAL:
redemption_script_hash = script[2:22]
else:
continue
# Check to see if we care about this scripthash
address = base58_check(self.spv.coin, redemption_script_hash, version_bytes=self.spv.coin.P2SH_ADDRESS_VERSION_BYTES)
address_info = self.script_addresses.get(address, None)
if address_info is None:
continue
self.spv.txdb.save_tx(tx)
# Build a multisig payment
# TODO - distinguish between the ones we can/can't spend
prevout = TransactionPrevOut(tx_hash, i)
spend = MultisigScriptHashSpend(self.spv.coin, 'default', output.amount, address, prevout, script, address_info)
unknown_redemption_script_spend_key = (tx_hash, i)
unknown_redemption_script_spend_metadata = self.spv.wallet.get('unknown_redemption_script_spend', unknown_redemption_script_spend_key)
if unknown_redemption_script_spend_metadata is not None:
# this spend is spent already
for tx_hash in unknown_redemption_script_spend_metadata['spent_in']:
spend.spent_in.add(tx_hash)
if not self.spv.wallet.add_spend(spend):
if self.spv.logging_level <= DEBUG:
print('[MULTISIGSCRIPTHASHPAYMENTMONITOR] payment of {} to {} already seen'.format(output.amount, address))
continue
if self.spv.logging_level <= INFO:
print('[MULTISIGSCRIPTHASHPAYMENTMONITOR] processed payment of {} to {}'.format(output.amount, address))
| |
from datetime import (
datetime,
timedelta,
)
from io import StringIO
import warnings
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
MultiIndex,
NaT,
PeriodIndex,
Series,
Timestamp,
date_range,
option_context,
period_range,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
class TestDataFrameReprInfoEtc:
def test_repr_bytes_61_lines(self, using_array_manager):
# GH#12857
lets = list("ACDEFGHIJKLMNOP")
slen = 50
nseqs = 1000
words = [[np.random.choice(lets) for x in range(slen)] for _ in range(nseqs)]
df = DataFrame(words).astype("U1")
# TODO(Arraymanager) astype("U1") actually gives this dtype instead of object
if not using_array_manager:
assert (df.dtypes == object).all()
# smoke tests; at one point this raised with 61 but not 60
repr(df)
repr(df.iloc[:60, :])
repr(df.iloc[:61, :])
def test_repr_unicode_level_names(self, frame_or_series):
index = MultiIndex.from_tuples([(0, 0), (1, 1)], names=["\u0394", "i1"])
obj = DataFrame(np.random.randn(2, 4), index=index)
if frame_or_series is Series:
obj = obj[0]
repr(obj)
def test_assign_index_sequences(self):
# GH#2200
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}).set_index(
["a", "b"]
)
index = list(df.index)
index[0] = ("faz", "boo")
df.index = index
repr(df)
# this travels an improper code path
index[0] = ["faz", "boo"]
df.index = index
repr(df)
def test_repr_with_mi_nat(self, float_string_frame):
df = DataFrame({"X": [1, 2]}, index=[[NaT, Timestamp("20130101")], ["a", "b"]])
result = repr(df)
expected = " X\nNaT a 1\n2013-01-01 b 2"
assert result == expected
def test_multiindex_na_repr(self):
# only an issue with long columns
df3 = DataFrame(
{
"A" * 30: {("A", "A0006000", "nuit"): "A0006000"},
"B" * 30: {("A", "A0006000", "nuit"): np.nan},
"C" * 30: {("A", "A0006000", "nuit"): np.nan},
"D" * 30: {("A", "A0006000", "nuit"): np.nan},
"E" * 30: {("A", "A0006000", "nuit"): "A"},
"F" * 30: {("A", "A0006000", "nuit"): np.nan},
}
)
idf = df3.set_index(["A" * 30, "C" * 30])
repr(idf)
def test_repr_name_coincide(self):
index = MultiIndex.from_tuples(
[("a", 0, "foo"), ("b", 1, "bar")], names=["a", "b", "c"]
)
df = DataFrame({"value": [0, 1]}, index=index)
lines = repr(df).split("\n")
assert lines[2].startswith("a 0 foo")
def test_repr_to_string(
self,
multiindex_year_month_day_dataframe_random_data,
multiindex_dataframe_random_data,
):
ymd = multiindex_year_month_day_dataframe_random_data
frame = multiindex_dataframe_random_data
repr(frame)
repr(ymd)
repr(frame.T)
repr(ymd.T)
buf = StringIO()
frame.to_string(buf=buf)
ymd.to_string(buf=buf)
frame.T.to_string(buf=buf)
ymd.T.to_string(buf=buf)
def test_repr_empty(self):
# empty
repr(DataFrame())
# empty with index
frame = DataFrame(index=np.arange(1000))
repr(frame)
def test_repr_mixed(self, float_string_frame):
buf = StringIO()
# mixed
repr(float_string_frame)
float_string_frame.info(verbose=False, buf=buf)
@pytest.mark.slow
def test_repr_mixed_big(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)}, index=range(200)
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
repr(biggie)
def test_repr(self, float_frame):
buf = StringIO()
# small one
repr(float_frame)
float_frame.info(verbose=False, buf=buf)
# even smaller
float_frame.reindex(columns=["A"]).info(verbose=False, buf=buf)
float_frame.reindex(columns=["A", "B"]).info(verbose=False, buf=buf)
# exhausting cases in DataFrame.info
# columns but no index
no_index = DataFrame(columns=[0, 1, 3])
repr(no_index)
# no columns or index
DataFrame().info(buf=buf)
df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"])
assert "\t" not in repr(df)
assert "\r" not in repr(df)
assert "a\n" not in repr(df)
def test_repr_dimensions(self):
df = DataFrame([[1, 2], [3, 4]])
with option_context("display.show_dimensions", True):
assert "2 rows x 2 columns" in repr(df)
with option_context("display.show_dimensions", False):
assert "2 rows x 2 columns" not in repr(df)
with option_context("display.show_dimensions", "truncate"):
assert "2 rows x 2 columns" not in repr(df)
@pytest.mark.slow
def test_repr_big(self):
# big one
biggie = DataFrame(np.zeros((200, 4)), columns=range(4), index=range(200))
repr(biggie)
def test_repr_unsortable(self, float_frame):
# columns are not sortable
warn_filters = warnings.filters
warnings.filterwarnings("ignore", category=FutureWarning, module=".*format")
unsortable = DataFrame(
{
"foo": [1] * 50,
datetime.today(): [1] * 50,
"bar": ["bar"] * 50,
datetime.today() + timedelta(1): ["bar"] * 50,
},
index=np.arange(50),
)
repr(unsortable)
fmt.set_option("display.precision", 3, "display.column_space", 10)
repr(float_frame)
fmt.set_option("display.max_rows", 10, "display.max_columns", 2)
repr(float_frame)
fmt.set_option("display.max_rows", 1000, "display.max_columns", 1000)
repr(float_frame)
tm.reset_display_options()
warnings.filters = warn_filters
def test_repr_unicode(self):
uval = "\u03c3\u03c3\u03c3\u03c3"
df = DataFrame({"A": [uval, uval]})
result = repr(df)
ex_top = " A"
assert result.split("\n")[0].rstrip() == ex_top
df = DataFrame({"A": [uval, uval]})
result = repr(df)
assert result.split("\n")[0].rstrip() == ex_top
def test_unicode_string_with_unicode(self):
df = DataFrame({"A": ["\u05d0"]})
str(df)
def test_repr_unicode_columns(self):
df = DataFrame({"\u05d0": [1, 2, 3], "\u05d1": [4, 5, 6], "c": [7, 8, 9]})
repr(df.columns) # should not raise UnicodeDecodeError
def test_str_to_bytes_raises(self):
# GH 26447
df = DataFrame({"A": ["abc"]})
msg = "^'str' object cannot be interpreted as an integer$"
with pytest.raises(TypeError, match=msg):
bytes(df)
def test_very_wide_info_repr(self):
df = DataFrame(np.random.randn(10, 20), columns=tm.rands_array(10, 20))
repr(df)
def test_repr_column_name_unicode_truncation_bug(self):
# #1906
df = DataFrame(
{
"Id": [7117434],
"StringCol": (
"Is it possible to modify drop plot code"
"so that the output graph is displayed "
"in iphone simulator, Is it possible to "
"modify drop plot code so that the "
"output graph is \xe2\x80\xa8displayed "
"in iphone simulator.Now we are adding "
"the CSV file externally. I want to Call "
"the File through the code.."
),
}
)
with option_context("display.max_columns", 20):
assert "StringCol" in repr(df)
def test_latex_repr(self):
result = r"""\begin{tabular}{llll}
\toprule
{} & 0 & 1 & 2 \\
\midrule
0 & $\alpha$ & b & c \\
1 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
"""
with option_context("display.latex.escape", False, "display.latex.repr", True):
df = DataFrame([[r"$\alpha$", "b", "c"], [1, 2, 3]])
assert result == df._repr_latex_()
# GH 12182
assert df._repr_latex_() is None
def test_repr_categorical_dates_periods(self):
# normal DataFrame
dt = date_range("2011-01-01 09:00", freq="H", periods=5, tz="US/Eastern")
p = period_range("2011-01", freq="M", periods=5)
df = DataFrame({"dt": dt, "p": p})
exp = """ dt p
0 2011-01-01 09:00:00-05:00 2011-01
1 2011-01-01 10:00:00-05:00 2011-02
2 2011-01-01 11:00:00-05:00 2011-03
3 2011-01-01 12:00:00-05:00 2011-04
4 2011-01-01 13:00:00-05:00 2011-05"""
assert repr(df) == exp
df2 = DataFrame({"dt": Categorical(dt), "p": Categorical(p)})
assert repr(df2) == exp
@pytest.mark.parametrize("arg", [np.datetime64, np.timedelta64])
@pytest.mark.parametrize(
"box, expected",
[[Series, "0 NaT\ndtype: object"], [DataFrame, " 0\n0 NaT"]],
)
def test_repr_np_nat_with_object(self, arg, box, expected):
# GH 25445
result = repr(box([arg("NaT")], dtype=object))
assert result == expected
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")})
# it works!
repr(df)
def test_frame_to_string_with_periodindex(self):
index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M")
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
def test_datetime64tz_slice_non_truncate(self):
# GH 30263
df = DataFrame({"x": date_range("2019", periods=10, tz="UTC")})
expected = repr(df)
df = df.iloc[:, :5]
result = repr(df)
assert result == expected
| |
"""Manage database connections"""
import warnings
from pymongo import uri_parser
try:
# pymongo 2.4+
from pymongo import MongoClient, MongoReplicaSetClient
except ImportError:
from pymongo import (Connection as MongoClient,
ReplicaSetConnection as MongoReplicaSetClient,
version)
pymongo_supports_mongoclient = False
message = ('Support for PyMongo {0} has been deprecated. Please upgrade to'
' 2.4 or newer.')
warnings.warn(message.format(version), DeprecationWarning)
else:
pymongo_supports_mongoclient = True
from .exceptions import ConnectionError
__all__ = ('connect', 'get_database', 'ConnectionError')
_connections = None
_databases = None
def connect(host='localhost', name=None, username=None, password=None,
port=None, alias=None, **kwargs):
"""Connect to a database.
:param host: Hostname, IP address, or MongoDB URI of the host.
:type host: str.
:param name: (optional) The name of the MongoDB database.
:type host: str.
:param username: (optional) The username to use for authentication.
:type username: str.
:param password: (optional) The password to use for authentication.
:type password: str.
:param port: (optional) The port of the MongoDB host.
:type port: int.
:param alias: (optional) An alias to use for accessing the database.
If no value is provided, ``name`` will be used.
:type alias: str.
:param \*\*kwargs: All other keyword arguments accepted by
:class:`pymongo.connection.Connection`.
:type \*\*kwargs: \*\*kwargs.
:returns: :class:`pymongo.database.Database` -- the database.
:raises: :class:`ConnectionError`
.. versionchanged:: 0.2.0
``connect()`` now accepts ``replica_set`` as a kwarg, it is
preferred over ``replicaSet``
"""
# The default settings, based on the arguments passed in
settings = {
'host': host,
'name': name,
'port': port,
'username': username,
'password': password,
}
# Extend the settings with all other keyword arguments
settings.update(kwargs)
# Get replicaSet out of **kwargs because it can be passed in as its
# own parameter
#
# NOTE
# Version 0.1.0 wanted a warnted named replicaSet. 0.2.0 changed it
# to replica_set (replicaSet was meant for parody with PyMongo).
# This is to maintain backwards compatibility.
replica_set = kwargs.pop('replica_set', None)
throw_away = kwargs.pop('replicaSet', None)
if replica_set is None:
replica_set = throw_away
connection, parsed_settings = _get_connection(
host=host, port=port, replica_set=replica_set, **kwargs)
if parsed_settings:
settings.update(parsed_settings)
name = settings.get('name', None)
if name is None:
raise ConnectionError('No database name was provided. '
'Make sure to append it to the host URI.')
if alias is None:
alias = name
# If a username and password have been provided, try to authenticate
# against the database as well. Make sure to save a reference to
# the database so it can be referenced later on.
# Make sure that _databases is a dict before using it
global _databases
if not isinstance(_databases, dict):
_databases = {}
# Capture the database and store it in _databases under its alias
_databases[alias] = db = connection[name]
if 'default' not in _databases:
_databases['default'] = db
if username and password:
db.authenticate(username, password)
return connection
def _get_connection(host, port, replica_set=None, **kwargs):
"""Return a connection to the database.
This will create a connection to a new MongoDB server and store it
internally for later use. If a server is requested that
``_get_connection()`` has seen before, the stored connection will be
used.
If the ``host`` is actually a MongoDB URI, the username, password,
and database name will be parsed from the URI and returned as the
second part of the ``tuple`` returned by this function.
:param host: Hostname, IP address, or MongoDB URI of the host.
:type host: str.
:param port: The port of the MongoDB host.
:type port: int.
:param replica_set: (optional) Name of the replica set when
connecting to one.
:type replica_set: str.
:param \*\*kwargs: All other keyword arguments accepted by
:class:`pymongo.connection.Connection`.
:type \*\*kwargs: \*\*kwargs.
:returns: tuple -- a pair of values containing a
:class:`pymongo.Connection` and any settings
parsed when a URI is provided.
"""
parsed_settings = {}
# If host is already a connection, get out
if hasattr(host, 'database_names'):
return host, parsed_settings
# If a URI has been given for host, parse it and get the settings
if '://' in host:
pieces = uri_parser.parse_uri(host)
name = pieces.get('database', None)
username = pieces.get('username', None)
password = pieces.get('password', None)
# Only update the settings if values were from in the URI
if name is not None:
parsed_settings['name'] = name
if username is not None:
parsed_settings['username'] = username
if password is not None:
parsed_settings['password'] = password
# Check for a replica set
if 'replicaset' in pieces['options']:
replica_set = pieces['options']['replicaset']
# Check the list of nodes in the parsed URI. If there was only
# one, get the updated host and port
if 'nodelist' in pieces and len(pieces['nodelist']) == 1:
host, port = pieces['nodelist'][0]
# For the purpose of building this key, use the default port
# instead of no port so that calls explicity requesting the default
# port won't be treated as different than calls not requesting one.
# When a replica set is behind used however, use the host string
# with the name of the replica set
#
# NOTE: I can foresee a problem here if there are two replica sets
# running on the same host with the same name on different ports.
# I'll look into that more when I have a better way to test
# replica sets than I currently do.
connection_key = '{0}:{1}'.format(host, replica_set if replica_set else
(port or 27017))
global _connections
if not isinstance(_connections, dict):
_connections = {}
if connection_key not in _connections:
# If using a replica set, prepare the settings and class name
if replica_set:
connection_class = MongoReplicaSetClient
settings = {'hosts_or_uri': host, 'replicaSet': replica_set}
else:
connection_class = MongoClient
settings = {'host': host, 'port': port}
# Open a connection to the database and try to connect to it
try:
connection = connection_class(**settings)
except Exception as e:
raise ConnectionError(
"Cannot connection to database '{0}':\n{1}".format(host, e))
# Store the connection in the dictionary for easier retrieval
# next time
_connections[connection_key] = connection
return _connections[connection_key], parsed_settings
def get_database(name):
"""Return a reference to a database.
:param name: The name of the database.
:type name: str.
:returns: ``pymongo.database.Database`` -- a database object.
"""
if not (_databases and name in _databases):
raise ConnectionError("There is no connection for database '{0}'. "
"Use `simon.connection.connect()` to connect "
"to it.".format(name))
return _databases[name]
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from GenomeFileUtil.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'GenomeFileUtil'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil # noqa @IgnorePep8
impl_GenomeFileUtil = GenomeFileUtil(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'GenomeFileUtil'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_GenomeFileUtil.genbank_to_genome,
name='GenomeFileUtil.genbank_to_genome',
types=[dict])
self.method_authentication['GenomeFileUtil.genbank_to_genome'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.genome_to_gff,
name='GenomeFileUtil.genome_to_gff',
types=[dict])
self.method_authentication['GenomeFileUtil.genome_to_gff'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.metagenome_to_gff,
name='GenomeFileUtil.metagenome_to_gff',
types=[dict])
self.method_authentication['GenomeFileUtil.metagenome_to_gff'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.genome_to_genbank,
name='GenomeFileUtil.genome_to_genbank',
types=[dict])
self.method_authentication['GenomeFileUtil.genome_to_genbank'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.genome_features_to_fasta,
name='GenomeFileUtil.genome_features_to_fasta',
types=[dict])
self.method_authentication['GenomeFileUtil.genome_features_to_fasta'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.genome_proteins_to_fasta,
name='GenomeFileUtil.genome_proteins_to_fasta',
types=[dict])
self.method_authentication['GenomeFileUtil.genome_proteins_to_fasta'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.export_genome_as_genbank,
name='GenomeFileUtil.export_genome_as_genbank',
types=[dict])
self.method_authentication['GenomeFileUtil.export_genome_as_genbank'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.export_genome_as_gff,
name='GenomeFileUtil.export_genome_as_gff',
types=[dict])
self.method_authentication['GenomeFileUtil.export_genome_as_gff'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.export_genome_features_protein_to_fasta,
name='GenomeFileUtil.export_genome_features_protein_to_fasta',
types=[dict])
self.method_authentication['GenomeFileUtil.export_genome_features_protein_to_fasta'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.export_metagenome_as_gff,
name='GenomeFileUtil.export_metagenome_as_gff',
types=[dict])
self.method_authentication['GenomeFileUtil.export_metagenome_as_gff'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.fasta_gff_to_genome,
name='GenomeFileUtil.fasta_gff_to_genome',
types=[dict])
self.method_authentication['GenomeFileUtil.fasta_gff_to_genome'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.fasta_gff_to_genome_json,
name='GenomeFileUtil.fasta_gff_to_genome_json',
types=[dict])
self.method_authentication['GenomeFileUtil.fasta_gff_to_genome_json'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.fasta_gff_to_metagenome,
name='GenomeFileUtil.fasta_gff_to_metagenome',
types=[dict])
self.method_authentication['GenomeFileUtil.fasta_gff_to_metagenome'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.save_one_genome,
name='GenomeFileUtil.save_one_genome',
types=[dict])
self.method_authentication['GenomeFileUtil.save_one_genome'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.ws_obj_gff_to_genome,
name='GenomeFileUtil.ws_obj_gff_to_genome',
types=[dict])
self.method_authentication['GenomeFileUtil.ws_obj_gff_to_genome'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.ws_obj_gff_to_metagenome,
name='GenomeFileUtil.ws_obj_gff_to_metagenome',
types=[dict])
self.method_authentication['GenomeFileUtil.ws_obj_gff_to_metagenome'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.update_taxon_assignments,
name='GenomeFileUtil.update_taxon_assignments',
types=[dict])
self.method_authentication['GenomeFileUtil.update_taxon_assignments'] = 'required' # noqa
self.rpc_service.add(impl_GenomeFileUtil.status,
name='GenomeFileUtil.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'GenomeFileUtil ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
| |
import contextlib
import datetime
import os
import tempfile
from alembic import command as alembic_command
from alembic import config as alembic_config
import mock
import netaddr
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy import pool
from sqlalchemy.sql import column
from sqlalchemy.sql import select
from sqlalchemy.sql import table
from quark.db.custom_types import INET
import quark.db.migration
from quark.tests import test_base
class BaseMigrationTest(test_base.TestBase):
def setUp(self):
self.config = alembic_config.Config(
os.path.join(quark.db.migration.__path__[0], 'alembic.ini'))
self.config.set_main_option('script_location',
'quark.db.migration:alembic')
self.config.set_main_option("quiet_mode", "True")
self.fileno, self.filepath = tempfile.mkstemp()
secret_cfg = mock.MagicMock()
secret_cfg.database.connection = "sqlite:///" + self.filepath
self.config.neutron_config = secret_cfg
self.engine = create_engine(
self.config.neutron_config.database.connection,
poolclass=pool.NullPool)
self.connection = self.engine.connect()
def tearDown(self):
self.connection.close()
os.unlink(self.filepath)
class Test2748e48cee3a(BaseMigrationTest):
def setUp(self):
super(Test2748e48cee3a, self).setUp()
alembic_command.upgrade(self.config, '1284c81cf727')
self.ip_policy_cidrs = table(
'quark_ip_policy_cidrs',
column('id', sa.String(length=36)),
column('created_at', sa.DateTime()),
column('ip_policy_id', sa.String(length=36)),
column('cidr', sa.String(length=64)))
self.subnets = table(
'quark_subnets',
column('id', sa.String(length=36)),
column('_cidr', sa.String(length=64)),
column('ip_policy_id', sa.String(length=36)))
def test_upgrade_no_ip_policy_cidr(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id=None))
alembic_command.upgrade(self.config, '2748e48cee3a')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 0)
def test_upgrade_ip_policy_cidr_inside(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id="111"))
dt = datetime.datetime(1970, 1, 1)
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="222", created_at=dt,
ip_policy_id="111", cidr="192.168.10.0/32"))
alembic_command.upgrade(self.config, '2748e48cee3a')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result["id"], "222")
self.assertEqual(result["created_at"], dt)
self.assertEqual(result["ip_policy_id"], "111")
self.assertEqual(result["cidr"], "192.168.10.0/32")
def test_upgrade_ip_policy_cidr_overlaps(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id="111"))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="222", created_at=datetime.date(1970, 1, 1),
ip_policy_id="111", cidr="192.168.10.0/16"))
with contextlib.nested(
mock.patch("neutron.openstack.common.uuidutils"),
mock.patch("oslo.utils.timeutils")
) as (uuid, tu):
tu.utcnow.return_value = datetime.datetime(2004, 2, 14)
uuid.generate_uuid.return_value = "foo"
alembic_command.upgrade(self.config, '2748e48cee3a')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result["id"], uuid.generate_uuid.return_value)
self.assertEqual(result["created_at"], tu.utcnow.return_value)
self.assertEqual(result["ip_policy_id"], "111")
self.assertEqual(result["cidr"], "192.168.10.0/24")
def test_upgrade_ip_policy_cidr_overlaps_v6(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="fd00::/8", ip_policy_id="111"))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="222", created_at=datetime.date(1970, 1, 1),
ip_policy_id="111", cidr="fd00::/7"))
with contextlib.nested(
mock.patch("neutron.openstack.common.uuidutils"),
mock.patch("oslo.utils.timeutils")
) as (uuid, tu):
tu.utcnow.return_value = datetime.datetime(2004, 2, 14)
uuid.generate_uuid.return_value = "foo"
alembic_command.upgrade(self.config, '2748e48cee3a')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result["id"], uuid.generate_uuid.return_value)
self.assertEqual(result["created_at"], tu.utcnow.return_value)
self.assertEqual(result["ip_policy_id"], "111")
self.assertEqual(result["cidr"], "fd00::/8")
def test_upgrade_ip_policy_cidr_outside(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id="111"))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="222", created_at=datetime.date(1970, 1, 1),
ip_policy_id="111", cidr="0.0.0.0/24"))
alembic_command.upgrade(self.config, '2748e48cee3a')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 0)
def test_upgrade_bulk(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id=None),
dict(id="001", _cidr="192.168.10.0/24", ip_policy_id="111"),
dict(id="002", _cidr="192.168.10.0/24", ip_policy_id="112"),
dict(id="003", _cidr="192.168.10.0/24", ip_policy_id="113"))
dt = datetime.datetime(1970, 1, 1)
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="221", created_at=dt, ip_policy_id="111",
cidr="192.168.10.0/32"),
dict(id="222", created_at=dt, ip_policy_id="112",
cidr="192.168.10.0/16"),
dict(id="223", created_at=dt, ip_policy_id="113",
cidr="0.0.0.0/24"))
with contextlib.nested(
mock.patch("neutron.openstack.common.uuidutils"),
mock.patch("oslo.utils.timeutils")
) as (uuid, tu):
tu.utcnow.return_value = datetime.datetime(2004, 2, 14)
uuid.generate_uuid.return_value = "foo"
alembic_command.upgrade(self.config, '2748e48cee3a')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 2)
result = results[0] if results[0]["id"] == "foo" else results[1]
self.assertEqual(result["id"], uuid.generate_uuid.return_value)
self.assertEqual(result["created_at"], tu.utcnow.return_value)
self.assertEqual(result["ip_policy_id"], "112")
self.assertEqual(result["cidr"], "192.168.10.0/24")
result = results[0] if results[0]["id"] != "foo" else results[1]
self.assertEqual(result["id"], "221")
self.assertEqual(result["created_at"], dt)
self.assertEqual(result["ip_policy_id"], "111")
self.assertEqual(result["cidr"], "192.168.10.0/32")
def test_upgrade_multiple_ip_policy_cidrs(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id="111"))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="221", created_at=datetime.date(1970, 1, 1),
ip_policy_id="111", cidr="0.0.0.0/24"),
dict(id="222", created_at=datetime.date(1970, 1, 1),
ip_policy_id="111", cidr="192.168.10.255/32"),
dict(id="223", created_at=datetime.date(1970, 1, 1),
ip_policy_id="111", cidr="192.168.10.0/23"))
with contextlib.nested(
mock.patch("neutron.openstack.common.uuidutils"),
mock.patch("oslo.utils.timeutils")
) as (uuid, tu):
tu.utcnow.return_value = datetime.datetime(2004, 2, 14)
uuid.generate_uuid.return_value = "foo"
alembic_command.upgrade(self.config, '2748e48cee3a')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result["id"], uuid.generate_uuid.return_value)
self.assertEqual(result["created_at"], tu.utcnow.return_value)
self.assertEqual(result["ip_policy_id"], "111")
self.assertEqual(result["cidr"], "192.168.10.0/24")
def test_downgrade(self):
alembic_command.upgrade(self.config, '2748e48cee3a')
with self.assertRaises(NotImplementedError):
alembic_command.downgrade(self.config, '1284c81cf727')
class Test45a07fac3d38(BaseMigrationTest):
def setUp(self):
super(Test45a07fac3d38, self).setUp()
alembic_command.upgrade(self.config, '2748e48cee3a')
self.ip_policy_cidrs = table(
'quark_ip_policy_cidrs',
column('id', sa.String(length=36)),
column('created_at', sa.DateTime()),
column('ip_policy_id', sa.String(length=36)),
column('cidr', sa.String(length=64)))
self.subnets = table(
'quark_subnets',
column('id', sa.String(length=36)),
column('_cidr', sa.String(length=64)),
column('ip_policy_id', sa.String(length=36)))
def test_upgrade_no_subnets_no_ip_policy_cidrs(self):
alembic_command.upgrade(self.config, '45a07fac3d38')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 0)
def test_upgrade_with_subnets_no_ip_policy(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id=None))
alembic_command.upgrade(self.config, '45a07fac3d38')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 0)
def test_upgrade_with_subnets_no_ip_policy_cidrs(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id="111"))
alembic_command.upgrade(self.config, '45a07fac3d38')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 2)
default_cidrs = ["192.168.10.0/32", "192.168.10.255/32"]
self.assertIn(results[0]["cidr"], default_cidrs)
self.assertIn(results[1]["cidr"], default_cidrs)
self.assertNotEqual(results[0]["cidr"], results[1]["cidr"])
def test_upgrade_with_subnets_non_default_ip_policy_cidrs(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id="111"))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="222", created_at=datetime.date(1970, 1, 1),
ip_policy_id="111", cidr="192.168.10.13/32"))
with contextlib.nested(
mock.patch("neutron.openstack.common.uuidutils"),
mock.patch("oslo.utils.timeutils")
) as (uuid, tu):
uuid.generate_uuid.side_effect = (1, 2, 3)
tu.utcnow.return_value = datetime.datetime(1970, 1, 1)
alembic_command.upgrade(self.config, '45a07fac3d38')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 3)
default_cidrs = ["192.168.10.0/32", "192.168.10.255/32",
"192.168.10.13/32"]
for result in results:
self.assertIn(result["cidr"], default_cidrs)
self.assertGreaterEqual(int(result["id"]), 1)
self.assertLessEqual(int(result["id"]), 3)
self.assertEqual(result["created_at"], tu.utcnow.return_value)
self.assertNotEqual(results[0]["cidr"], results[1]["cidr"])
self.assertNotEqual(results[0]["cidr"], results[2]["cidr"])
self.assertNotEqual(results[1]["cidr"], results[2]["cidr"])
def test_upgrade_with_subnets_non_default_ip_policy_cidrs_v6(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="fd00::/64", ip_policy_id="111"))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="222", created_at=datetime.date(1970, 1, 1),
ip_policy_id="111", cidr="fd00::3/128"))
with contextlib.nested(
mock.patch("neutron.openstack.common.uuidutils"),
mock.patch("oslo.utils.timeutils")
) as (uuid, tu):
uuid.generate_uuid.side_effect = (1, 2, 3)
tu.utcnow.return_value = datetime.datetime(1970, 1, 1)
alembic_command.upgrade(self.config, '45a07fac3d38')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 3)
default_cidrs = ["fd00::/128", "fd00::3/128",
"fd00::ffff:ffff:ffff:ffff/128"]
for result in results:
self.assertIn(result["cidr"], default_cidrs)
self.assertGreaterEqual(int(result["id"]), 1)
self.assertLessEqual(int(result["id"]), 3)
self.assertEqual(result["created_at"], tu.utcnow.return_value)
self.assertNotEqual(results[0]["cidr"], results[1]["cidr"])
self.assertNotEqual(results[0]["cidr"], results[2]["cidr"])
self.assertNotEqual(results[1]["cidr"], results[2]["cidr"])
def test_upgrade_with_subnets_default_ip_policy_cidrs(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id="111"))
dt = datetime.datetime(1970, 1, 1)
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="222", created_at=dt,
ip_policy_id="111", cidr="192.168.10.0/32"),
dict(id="223", created_at=dt,
ip_policy_id="111", cidr="192.168.10.255/32"))
alembic_command.upgrade(self.config, '45a07fac3d38')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 2)
default_cidrs = ["192.168.10.0/32", "192.168.10.255/32"]
self.assertIn(results[0]["cidr"], default_cidrs)
self.assertIn(results[1]["cidr"], default_cidrs)
self.assertTrue(results[0]["id"] == "222" or results[0]["id"] == "223")
self.assertTrue(results[1]["id"] == "222" or results[1]["id"] == "223")
self.assertEqual(results[0]["created_at"], dt)
self.assertEqual(results[1]["created_at"], dt)
def test_upgrade_bulk(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", _cidr="192.168.10.0/24", ip_policy_id=None),
dict(id="001", _cidr="192.168.10.0/24", ip_policy_id="111"),
dict(id="002", _cidr="192.168.10.0/24", ip_policy_id="112"),
dict(id="003", _cidr="192.168.10.0/24", ip_policy_id="113"))
dt = datetime.datetime(1970, 1, 1)
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="221", created_at=dt,
ip_policy_id="112", cidr="192.168.10.13/32"),
dict(id="222", created_at=dt,
ip_policy_id="113", cidr="192.168.10.0/32"),
dict(id="223", created_at=dt,
ip_policy_id="113", cidr="192.168.10.255/32"))
alembic_command.upgrade(self.config, '45a07fac3d38')
results = self.connection.execute(
select([self.ip_policy_cidrs]).where(
self.ip_policy_cidrs.c.ip_policy_id == None)).fetchall() # noqa
self.assertEqual(len(results), 0)
results = self.connection.execute(
select([self.ip_policy_cidrs]).where(
self.ip_policy_cidrs.c.ip_policy_id == "111")).fetchall()
self.assertEqual(len(results), 2)
default_cidrs = ["192.168.10.0/32", "192.168.10.255/32"]
self.assertIn(results[0]["cidr"], default_cidrs)
self.assertIn(results[1]["cidr"], default_cidrs)
self.assertNotEqual(results[0]["cidr"], results[1]["cidr"])
results = self.connection.execute(
select([self.ip_policy_cidrs]).where(
self.ip_policy_cidrs.c.ip_policy_id == "112")).fetchall()
self.assertEqual(len(results), 3)
default_cidrs = ["192.168.10.0/32", "192.168.10.255/32",
"192.168.10.13/32"]
for result in results:
self.assertIn(result["cidr"], default_cidrs)
self.assertNotEqual(results[0]["cidr"], results[1]["cidr"])
self.assertNotEqual(results[0]["cidr"], results[2]["cidr"])
self.assertNotEqual(results[1]["cidr"], results[2]["cidr"])
results = self.connection.execute(
select([self.ip_policy_cidrs]).where(
self.ip_policy_cidrs.c.ip_policy_id == "113")).fetchall()
self.assertEqual(len(results), 2)
default_cidrs = ["192.168.10.0/32", "192.168.10.255/32"]
self.assertIn(results[0]["cidr"], default_cidrs)
self.assertIn(results[1]["cidr"], default_cidrs)
self.assertTrue(results[0]["id"] == "222" or results[0]["id"] == "223")
self.assertTrue(results[1]["id"] == "222" or results[1]["id"] == "223")
self.assertEqual(results[0]["created_at"], dt)
self.assertEqual(results[1]["created_at"], dt)
def test_downgrade(self):
alembic_command.upgrade(self.config, '45a07fac3d38')
with self.assertRaises(NotImplementedError):
alembic_command.downgrade(self.config, '2748e48cee3a')
class Test552b213c2b8c(BaseMigrationTest):
def setUp(self):
super(Test552b213c2b8c, self).setUp()
alembic_command.upgrade(self.config, '45a07fac3d38')
self.ip_policy = table(
'quark_ip_policy',
column('id', sa.String(length=36)),
column('tenant_id', sa.String(length=255)),
column('created_at', sa.DateTime()))
self.ip_policy_cidrs = table(
'quark_ip_policy_cidrs',
column('id', sa.String(length=36)),
column('created_at', sa.DateTime()),
column('ip_policy_id', sa.String(length=36)),
column('cidr', sa.String(length=64)))
self.subnets = table(
'quark_subnets',
column('id', sa.String(length=36)),
column('tenant_id', sa.String(length=255)),
column('_cidr', sa.String(length=64)),
column('ip_policy_id', sa.String(length=36)))
def test_upgrade_no_subnets(self):
alembic_command.upgrade(self.config, '552b213c2b8c')
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 0)
def test_upgrade_subnets_with_ip_policy(self):
dt = datetime.datetime(1970, 1, 1)
self.connection.execute(
self.subnets.insert(),
dict(id="000", tenant_id="foo", _cidr="192.168.10.0/24",
ip_policy_id="111"))
self.connection.execute(
self.ip_policy.insert(),
dict(id="111", tenant_id="foo", created_at=dt))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="221", created_at=dt,
ip_policy_id="111", cidr="192.168.10.13/32"))
alembic_command.upgrade(self.config, '552b213c2b8c')
results = self.connection.execute(
select([self.ip_policy])).fetchall()
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result["id"], "111")
self.assertEqual(result["tenant_id"], "foo")
self.assertEqual(result["created_at"], dt)
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result["id"], "221")
self.assertEqual(result["created_at"], dt)
self.assertEqual(result["ip_policy_id"], "111")
self.assertEqual(result["cidr"], "192.168.10.13/32")
results = self.connection.execute(
select([self.subnets])).fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["ip_policy_id"], "111")
def test_upgrade_subnets_no_ip_policy(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", tenant_id="foo", _cidr="192.168.10.0/24",
ip_policy_id=None))
with contextlib.nested(
mock.patch("neutron.openstack.common.uuidutils"),
mock.patch("oslo.utils.timeutils")
) as (uuid, tu):
dt = datetime.datetime(1970, 1, 1)
tu.utcnow.return_value = dt
uuid.generate_uuid.side_effect = ("666", "667", "668")
alembic_command.upgrade(self.config, '552b213c2b8c')
results = self.connection.execute(
select([self.ip_policy])).fetchall()
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result["id"], "666")
self.assertEqual(result["tenant_id"], "foo")
self.assertEqual(result["created_at"], dt)
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 2)
for result in results:
self.assertIn(result["id"], ("667", "668"))
self.assertEqual(result["created_at"], dt)
self.assertEqual(result["ip_policy_id"], "666")
self.assertIn(result["cidr"],
("192.168.10.0/32", "192.168.10.255/32"))
self.assertNotEqual(results[0]["cidr"], results[1]["cidr"])
results = self.connection.execute(
select([self.subnets])).fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["ip_policy_id"], "666")
def test_upgrade_subnets_no_ip_policy_v6(self):
self.connection.execute(
self.subnets.insert(),
dict(id="000", tenant_id="foo", _cidr="fd00::/64",
ip_policy_id=None))
with contextlib.nested(
mock.patch("neutron.openstack.common.uuidutils"),
mock.patch("oslo.utils.timeutils")
) as (uuid, tu):
dt = datetime.datetime(1970, 1, 1)
tu.utcnow.return_value = dt
uuid.generate_uuid.side_effect = ("666", "667", "668")
alembic_command.upgrade(self.config, '552b213c2b8c')
results = self.connection.execute(
select([self.ip_policy])).fetchall()
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result["id"], "666")
self.assertEqual(result["tenant_id"], "foo")
self.assertEqual(result["created_at"], dt)
results = self.connection.execute(
select([self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 2)
for result in results:
self.assertIn(result["id"], ("667", "668"))
self.assertEqual(result["created_at"], dt)
self.assertEqual(result["ip_policy_id"], "666")
self.assertIn(result["cidr"],
("fd00::/128",
"fd00::ffff:ffff:ffff:ffff/128"))
self.assertNotEqual(results[0]["cidr"], results[1]["cidr"])
results = self.connection.execute(
select([self.subnets])).fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["ip_policy_id"], "666")
def test_upgrade_bulk(self):
dt = datetime.datetime(1970, 1, 1)
self.connection.execute(
self.subnets.insert(),
dict(id="000", tenant_id="foo", _cidr="192.168.10.0/24",
ip_policy_id="111"),
dict(id="001", tenant_id="foo", _cidr="192.168.10.0/24",
ip_policy_id=None),
dict(id="002", tenant_id="foo", _cidr="fd00::/64",
ip_policy_id=None))
self.connection.execute(
self.ip_policy.insert(),
dict(id="111", tenant_id="foo", created_at=dt))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="221", created_at=dt,
ip_policy_id="111", cidr="192.168.10.13/32"))
with contextlib.nested(
mock.patch("neutron.openstack.common.uuidutils"),
mock.patch("oslo.utils.timeutils")
) as (uuid, tu):
tu.utcnow.return_value = dt
uuid.generate_uuid.side_effect = ("5", "6", "7", "8", "9", "10")
alembic_command.upgrade(self.config, '552b213c2b8c')
results = self.connection.execute(
select([self.ip_policy]).where(
self.ip_policy.c.id == "111")).fetchall()
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result["id"], "111")
self.assertEqual(result["tenant_id"], "foo")
self.assertEqual(result["created_at"], dt)
results = self.connection.execute(
select([self.ip_policy_cidrs]).where(
self.ip_policy_cidrs.c.ip_policy_id == "111")).fetchall()
self.assertEqual(len(results), 1)
result = results[0]
self.assertEqual(result["id"], "221")
self.assertEqual(result["created_at"], dt)
self.assertEqual(result["ip_policy_id"], "111")
self.assertEqual(result["cidr"], "192.168.10.13/32")
results = self.connection.execute(
select([self.subnets]).where(
self.subnets.c.ip_policy_id == "111")).fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["ip_policy_id"], "111")
results = self.connection.execute(
select([self.ip_policy]).where(
self.ip_policy.c.id != "111")).fetchall()
self.assertEqual(len(results), 2)
for result in results:
self.assertIn(int(result["id"]), range(5, 11))
self.assertEqual(result["tenant_id"], "foo")
self.assertEqual(result["created_at"], dt)
results = self.connection.execute(
select([self.ip_policy_cidrs]).where(
self.ip_policy_cidrs.c.ip_policy_id != "111")).fetchall()
self.assertEqual(len(results), 4)
for result in results:
self.assertIn(int(result["id"]), range(5, 11))
self.assertEqual(result["created_at"], dt)
self.assertIn(int(result["ip_policy_id"]), range(5, 11))
self.assertIn(result["cidr"], (
"192.168.10.0/32", "192.168.10.255/32",
"fd00::/128", "fd00::ffff:ffff:ffff:ffff/128"))
results = self.connection.execute(
select([self.subnets]).where(
self.subnets.c.ip_policy_id != "111")).fetchall()
self.assertEqual(len(results), 2)
for subnet in results:
self.assertIn(int(subnet["ip_policy_id"]), range(5, 11))
def test_downgrade(self):
alembic_command.upgrade(self.config, '552b213c2b8c')
with self.assertRaises(NotImplementedError):
alembic_command.downgrade(self.config, '45a07fac3d38')
class Test28e55acaf366(BaseMigrationTest):
def setUp(self):
super(Test28e55acaf366, self).setUp()
alembic_command.upgrade(self.config, '3d22de205729')
self.ip_policy = table('quark_ip_policy',
column('id', sa.String(length=36)),
column('size', INET()))
self.ip_policy_cidrs = table(
'quark_ip_policy_cidrs',
column('id', sa.String(length=36)),
column('ip_policy_id', sa.String(length=36)),
column('cidr', sa.String(length=64)))
def test_upgrade_none(self):
alembic_command.upgrade(self.config, '28e55acaf366')
results = self.connection.execute(select([
self.ip_policy])).fetchall()
self.assertEqual(len(results), 0)
results = self.connection.execute(select([
self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 0)
def test_upgrade_v4(self):
self.connection.execute(
self.ip_policy.insert(), dict(id="1", size=None))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="2", ip_policy_id="1", cidr="192.168.10.13/32"),
dict(id="3", ip_policy_id="1", cidr="192.168.10.16/31"))
alembic_command.upgrade(self.config, '28e55acaf366')
results = self.connection.execute(select([
self.ip_policy])).fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["id"], "1")
self.assertEqual(results[0]["size"], 3)
def test_upgrade_v6(self):
self.connection.execute(
self.ip_policy.insert(), dict(id="1", size=None))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="2", ip_policy_id="1", cidr="fd00::/64"))
alembic_command.upgrade(self.config, '28e55acaf366')
results = self.connection.execute(select([
self.ip_policy])).fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["id"], "1")
self.assertEqual(results[0]["size"], 2 ** 64)
def test_upgrade_bulk(self):
self.connection.execute(
self.ip_policy.insert(),
dict(id="1", size=None),
dict(id="2", size=None))
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="2", ip_policy_id="1", cidr="192.168.10.13/32"),
dict(id="3", ip_policy_id="1", cidr="192.168.10.16/31"),
dict(id="4", ip_policy_id="2", cidr="fd00::/64"))
alembic_command.upgrade(self.config, '28e55acaf366')
results = self.connection.execute(select([
self.ip_policy])).fetchall()
self.assertEqual(len(results), 2)
for result in results:
self.assertIn(result["id"], ("1", "2"))
if result["id"] == "1":
self.assertEqual(result["size"], 3)
elif result["id"] == "2":
self.assertEqual(result["size"], 2 ** 64)
def test_downgrade(self):
alembic_command.upgrade(self.config, '28e55acaf366')
with self.assertRaises(NotImplementedError):
alembic_command.downgrade(self.config, '3d22de205729')
class Test1664300cb03a(BaseMigrationTest):
def setUp(self):
super(Test1664300cb03a, self).setUp()
alembic_command.upgrade(self.config, '1acd075bd7e1')
self.ip_policy_cidrs = table(
'quark_ip_policy_cidrs',
column('id', sa.String(length=36)),
column('ip_policy_id', sa.String(length=36)),
column('cidr', sa.String(length=64)),
column('first_ip', INET()),
column('last_ip', INET()))
def test_upgrade_empty(self):
alembic_command.upgrade(self.config, '1664300cb03a')
results = self.connection.execute(select([
self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 0)
def test_upgrade_ipv4(self):
net = netaddr.IPNetwork("192.168.10.13/31")
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="1", ip_policy_id="1", cidr=str(net)))
alembic_command.upgrade(self.config, '1664300cb03a')
results = self.connection.execute(select([
self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["id"], "1")
self.assertEqual(results[0]["ip_policy_id"], "1")
self.assertEqual(results[0]["cidr"], str(net))
self.assertEqual(results[0]["first_ip"], net.ipv6().first)
self.assertEqual(results[0]["last_ip"], net.ipv6().last)
def test_upgrade_ipv6(self):
net = netaddr.IPNetwork("fd00::/64")
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="1", ip_policy_id="1", cidr=str(net)))
alembic_command.upgrade(self.config, '1664300cb03a')
results = self.connection.execute(select([
self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 1)
self.assertEqual(results[0]["id"], "1")
self.assertEqual(results[0]["ip_policy_id"], "1")
self.assertEqual(results[0]["cidr"], "fd00::/64")
self.assertEqual(results[0]["first_ip"], net.first)
self.assertEqual(results[0]["last_ip"], net.last)
def test_upgrade_bulk(self):
netv4 = netaddr.IPNetwork("192.168.10.13/31")
netv6 = netaddr.IPNetwork("fd00::/64")
self.connection.execute(
self.ip_policy_cidrs.insert(),
dict(id="1", ip_policy_id="1", cidr=str(netv4)),
dict(id="2", ip_policy_id="2", cidr=str(netv6)))
alembic_command.upgrade(self.config, '1664300cb03a')
results = self.connection.execute(select([
self.ip_policy_cidrs])).fetchall()
self.assertEqual(len(results), 2)
for result in results:
self.assertIn(result["cidr"], (str(netv4), str(netv6)))
if result["cidr"] == "192.168.10.13/31":
self.assertEqual(result["first_ip"], netv4.ipv6().first)
self.assertEqual(result["last_ip"], netv4.ipv6().last)
else:
self.assertEqual(result["first_ip"], netv6.first)
self.assertEqual(result["last_ip"], netv6.last)
def test_downgrade(self):
alembic_command.upgrade(self.config, '1664300cb03a')
with self.assertRaises(NotImplementedError):
alembic_command.downgrade(self.config, '1acd075bd7e1')
class Test4fc07b41d45c(BaseMigrationTest):
def _mock_inserts(self):
mock_rows = [dict(id="1", _deallocated=False, address_type=None),
dict(id="2", _deallocated=True, address_type=None),
dict(id="3", _deallocated=None, address_type=None),
dict(id="4", _deallocated=True, address_type=None),
dict(id="5", _deallocated=False, address_type=None)]
[insert.execute() for insert in
[self.ip_addresses_table.insert().values(**row)
for row in mock_rows]]
def setUp(self):
super(Test4fc07b41d45c, self).setUp()
self.previous_revision = "42a3c8c0db75"
self.current_revision = "4fc07b41d45c"
self.metadata = sa.MetaData(bind=self.engine)
# NOTE(thomasem): Create a quark_ip_addresses table that has an
# identical schema as the revision before it for the columns this data
# migration is concerned with.
self.ip_addresses_table = sa.Table(
'quark_ip_addresses', self.metadata,
sa.Column('id', sa.String(length=36), primary_key=True),
sa.Column('_deallocated', sa.Boolean()),
sa.Column('address_type', sa.Enum('fixed', 'shared', 'floating'))
)
self.metadata.create_all()
alembic_command.stamp(self.config, self.previous_revision)
def test_upgrade(self):
self._mock_inserts()
alembic_command.upgrade(self.config, self.current_revision)
results = self.connection.execute(
select([self.ip_addresses_table]).order_by(
self.ip_addresses_table.c.id)).fetchall()
expected_results = [
(u'1', False, u'fixed'),
(u'2', True, None),
(u'3', None, None),
(u'4', True, None),
(u'5', False, u'fixed')
]
self.assertEqual(results, expected_results)
def test_downgrade(self):
self._mock_inserts()
alembic_command.upgrade(self.config, self.current_revision)
alembic_command.downgrade(self.config, self.previous_revision)
results = self.connection.execute(
select([self.ip_addresses_table]).order_by(
self.ip_addresses_table.c.id)).fetchall()
expected_results = [
(u'1', False, None),
(u'2', True, None),
(u'3', None, None),
(u'4', True, None),
(u'5', False, None)
]
self.assertEqual(results, expected_results)
def test_upgrade_empty(self):
alembic_command.upgrade(self.config, self.current_revision)
results = self.connection.execute(
select([self.ip_addresses_table]).order_by(
self.ip_addresses_table.c.id)).fetchall()
expected_results = []
self.assertEqual(results, expected_results)
| |
from __future__ import absolute_import
from typing import Any, Iterable, Dict, Tuple, Callable, Text, Mapping, Optional
import requests
import json
import sys
import inspect
import logging
import re
from six.moves import urllib
from functools import reduce
from requests import Response
from django.utils.translation import ugettext as _
from zerver.models import Realm, UserProfile, get_realm_by_email_domain, get_user_profile_by_id, get_client, \
GENERIC_INTERFACE, Service, SLACK_INTERFACE, email_to_domain, get_service_profile
from zerver.lib.actions import check_send_message
from zerver.lib.queue import queue_json_publish
from zerver.lib.validator import check_dict, check_string
from zerver.decorator import JsonableError
MAX_REQUEST_RETRIES = 3
class OutgoingWebhookServiceInterface(object):
def __init__(self, base_url, token, user_profile, service_name):
# type: (Text, Text, UserProfile, Text) -> None
self.base_url = base_url # type: Text
self.token = token # type: Text
self.user_profile = user_profile # type: Text
self.service_name = service_name # type: Text
# Given an event that triggers an outgoing webhook operation, returns:
# - The REST operation that should be performed
# - The body of the request
#
# The REST operation is a dictionary with the following keys:
# - method
# - base_url
# - relative_url_path
# - request_kwargs
def process_event(self, event):
# type: (Dict[Text, Any]) -> Tuple[Dict[str, Any], Any]
raise NotImplementedError()
# Given a successful outgoing webhook REST operation, returns the message
# to sent back to the user (or None if no message should be sent).
def process_success(self, response, event):
# type: (Response, Dict[Text, Any]) -> Optional[str]
raise NotImplementedError()
class GenericOutgoingWebhookService(OutgoingWebhookServiceInterface):
def process_event(self, event):
# type: (Dict[Text, Any]) -> Tuple[Dict[str, Any], Any]
rest_operation = {'method': 'POST',
'relative_url_path': '',
'base_url': self.base_url,
'request_kwargs': {}}
request_data = {"data": event['command'],
"message": event['message'],
"token": self.token}
return rest_operation, json.dumps(request_data)
def process_success(self, response, event):
# type: (Response, Dict[Text, Any]) -> Optional[str]
response_json = json.loads(response.text)
if "response_not_required" in response_json and response_json['response_not_required']:
return None
if "response_string" in response_json:
return str(response_json['response_string'])
else:
return None
class SlackOutgoingWebhookService(OutgoingWebhookServiceInterface):
def process_event(self, event):
# type: (Dict[Text, Any]) -> Tuple[Dict[str, Any], Any]
rest_operation = {'method': 'POST',
'relative_url_path': '',
'base_url': self.base_url,
'request_kwargs': {}}
if event['message']['type'] == 'private':
raise NotImplementedError("Private messaging service not supported.")
service = get_service_profile(event['user_profile_id'], str(self.service_name))
request_data = [("token", self.token),
("team_id", event['message']['sender_realm_str']),
("team_domain", email_to_domain(event['message']['sender_email'])),
("channel_id", event['message']['stream_id']),
("channel_name", event['message']['display_recipient']),
("timestamp", event['message']['timestamp']),
("user_id", event['message']['sender_id']),
("user_name", event['message']['sender_full_name']),
("text", event['command']),
("trigger_word", event['trigger']),
("service_id", service.id),
]
return rest_operation, request_data
def process_success(self, response, event):
# type: (Response, Dict[Text, Any]) -> Optional[str]
response_json = json.loads(response.text)
if "text" in response_json:
return response_json["text"]
else:
return None
AVAILABLE_OUTGOING_WEBHOOK_INTERFACES = {
GENERIC_INTERFACE: GenericOutgoingWebhookService,
SLACK_INTERFACE: SlackOutgoingWebhookService,
} # type: Dict[Text, Any]
def get_service_interface_class(interface):
# type: (Text) -> Any
if interface is None or interface not in AVAILABLE_OUTGOING_WEBHOOK_INTERFACES:
return AVAILABLE_OUTGOING_WEBHOOK_INTERFACES[GENERIC_INTERFACE]
else:
return AVAILABLE_OUTGOING_WEBHOOK_INTERFACES[interface]
def get_outgoing_webhook_service_handler(service):
# type: (Service) -> Any
service_interface_class = get_service_interface_class(service.interface_name())
service_interface = service_interface_class(base_url=service.base_url,
token=service.token,
user_profile=service.user_profile,
service_name=service.name)
return service_interface
def send_response_message(bot_id, message, response_message_content):
# type: (str, Dict[str, Any], Text) -> None
recipient_type_name = message['type']
bot_user = get_user_profile_by_id(bot_id)
realm = get_realm_by_email_domain(message['sender_email'])
if recipient_type_name == 'stream':
recipients = [message['display_recipient']]
check_send_message(bot_user, get_client("OutgoingWebhookResponse"), recipient_type_name, recipients,
message['subject'], response_message_content, realm, forwarder_user_profile=bot_user)
else:
# Private message; only send if the bot is there in the recipients
recipients = [recipient['email'] for recipient in message['display_recipient']]
if bot_user.email in recipients:
check_send_message(bot_user, get_client("OutgoingWebhookResponse"), recipient_type_name, recipients,
message['subject'], response_message_content, realm, forwarder_user_profile=bot_user)
def succeed_with_message(event, success_message):
# type: (Dict[str, Any], Text) -> None
success_message = "Success! " + success_message
send_response_message(event['user_profile_id'], event['message'], success_message)
def fail_with_message(event, failure_message):
# type: (Dict[str, Any], Text) -> None
failure_message = "Failure! " + failure_message
send_response_message(event['user_profile_id'], event['message'], failure_message)
def request_retry(event, failure_message):
# type: (Dict[str, Any], Text) -> None
event['failed_tries'] += 1
if event['failed_tries'] > MAX_REQUEST_RETRIES:
bot_user = get_user_profile_by_id(event['user_profile_id'])
failure_message = "Maximum retries exceeded! " + failure_message
fail_with_message(event, failure_message)
logging.warning("Maximum retries exceeded for trigger:%s event:%s" % (bot_user.email, event['command']))
else:
queue_json_publish("outgoing_webhooks", event, lambda x: None)
def do_rest_call(rest_operation, request_data, event, service_handler, timeout=None):
# type: (Dict[str, Any], Dict[str, Any], Dict[str, Any], Any, Any) -> None
rest_operation_validator = check_dict([
('method', check_string),
('relative_url_path', check_string),
('request_kwargs', check_dict([])),
('base_url', check_string),
])
error = rest_operation_validator('rest_operation', rest_operation)
if error:
raise JsonableError(error)
http_method = rest_operation['method']
final_url = urllib.parse.urljoin(rest_operation['base_url'], rest_operation['relative_url_path'])
request_kwargs = rest_operation['request_kwargs']
request_kwargs['timeout'] = timeout
try:
response = requests.request(http_method, final_url, data=request_data, **request_kwargs)
if str(response.status_code).startswith('2'):
response_message = service_handler.process_success(response, event)
if response_message is not None:
succeed_with_message(event, response_message)
# On 50x errors, try retry
elif str(response.status_code).startswith('5'):
request_retry(event, "Internal Server error at third party.")
else:
failure_message = "Third party responded with %d" % (response.status_code)
fail_with_message(event, failure_message)
except requests.exceptions.Timeout:
logging.info("Trigger event %s on %s timed out. Retrying" % (event["command"], event['service_name']))
request_retry(event, 'Unable to connect with the third party.')
except requests.exceptions.RequestException as e:
response_message = "An exception occured for message `%s`! See the logs for more information." % (event["command"],)
logging.exception("Outhook trigger failed:\n %s" % (e,))
fail_with_message(event, response_message)
| |
"""Views fo the node settings page."""
# -*- coding: utf-8 -*-
import os
import httplib as http
from flask import request
from box.client import BoxClientException
from urllib3.exceptions import MaxRetryError
from framework.exceptions import HTTPError
from website.util import web_url_for
from website.util import permissions
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
)
from website.addons.box.client import get_node_client
from website.addons.box.client import get_client_from_user_settings
@must_have_addon('box', 'node')
@must_have_permission(permissions.WRITE)
def box_config_get(node_addon, auth, **kwargs):
"""API that returns the serialized node settings."""
return {
'result': serialize_settings(node_addon, auth.user),
}
def serialize_folder(metadata):
"""Serializes metadata to a dict with the display name and path
of the folder.
"""
# if path is root
if metadata['path'] == '' or metadata['path'] == '/':
name = '/ (Full Box)'
else:
name = 'Box' + metadata['path']
return {
'name': name,
'path': metadata['path'],
}
def get_folders(client):
"""Gets a list of folders in a user's Box, including the root.
Each folder is represented as a dict with its display name and path.
"""
metadata = client.metadata('/', list=True)
# List each folder, including the root
root = {
'name': '/ (Full Box)',
'path': '',
}
folders = [root] + [
serialize_folder(each)
for each in metadata['contents'] if each['is_dir']
]
return folders
def serialize_urls(node_settings):
node = node_settings.owner
urls = {
'settings': web_url_for('user_addons'),
'auth': node.api_url_for('box_oauth_start'),
'config': node.api_url_for('box_config_put'),
'files': node.web_url_for('collect_file_trees'),
'deauthorize': node.api_url_for('box_deauthorize'),
'importAuth': node.api_url_for('box_import_user_auth'),
# Endpoint for fetching only folders (including root)
'folders': node.api_url_for('box_list_folders'),
}
return urls
def serialize_settings(node_settings, current_user, client=None):
"""View helper that returns a dictionary representation of a
BoxNodeSettings record. Provides the return value for the
box config endpoints.
"""
valid_credentials = True
user_settings = node_settings.user_settings
current_user_settings = current_user.get_addon('box')
user_is_owner = user_settings is not None and user_settings.owner == current_user
if user_settings:
try:
client = client or get_client_from_user_settings(user_settings)
client.get_user_info()
except BoxClientException:
valid_credentials = False
result = {
'userIsOwner': user_is_owner,
'nodeHasAuth': node_settings.has_auth,
'urls': serialize_urls(node_settings),
'validCredentials': valid_credentials,
'userHasAuth': current_user_settings is not None and current_user_settings.has_auth,
}
if node_settings.has_auth:
# Add owner's profile URL
result['urls']['owner'] = web_url_for(
'profile_view_id',
uid=user_settings.owner._id
)
result['ownerName'] = user_settings.owner.fullname
# Show available folders
# path = node_settings.folder
if node_settings.folder_id is None:
result['folder'] = {'name': None, 'path': None}
elif valid_credentials:
path = node_settings.fetch_full_folder_path()
result['folder'] = {
'path': path,
'name': path.replace('All Files', '', 1) if path != 'All Files' else '/ (Full Box)'
}
return result
@must_not_be_registration
@must_have_addon('box', 'user')
@must_have_addon('box', 'node')
@must_be_addon_authorizer('box')
@must_have_permission(permissions.WRITE)
def box_config_put(node_addon, user_addon, auth, **kwargs):
"""View for changing a node's linked box folder."""
folder = request.json.get('selected')
uid = folder['id']
path = folder['path']
node_addon.set_folder(uid, auth=auth)
return {
'result': {
'folder': {
'name': path.replace('All Files', '') if path != 'All Files' else '/ (Full Box)',
'path': path,
},
'urls': serialize_urls(node_addon),
},
'message': 'Successfully updated settings.',
}
@must_have_addon('box', 'user')
@must_have_addon('box', 'node')
@must_have_permission(permissions.WRITE)
def box_import_user_auth(auth, node_addon, user_addon, **kwargs):
"""Import box credentials from the currently logged-in user to a node.
"""
node_addon.set_user_auth(user_addon)
node_addon.save()
return {
'result': serialize_settings(node_addon, auth.user),
'message': 'Successfully imported access token from profile.',
}
@must_not_be_registration
@must_have_addon('box', 'node')
@must_have_permission(permissions.WRITE)
def box_deauthorize(auth, node_addon, **kwargs):
node_addon.deauthorize(auth=auth)
node_addon.save()
@must_have_addon('box', 'user')
@must_have_addon('box', 'node')
@must_have_permission(permissions.WRITE)
def box_get_share_emails(auth, user_addon, node_addon, **kwargs):
"""Return a list of emails of the contributors on a project.
The current user MUST be the user who authenticated Box for the node.
"""
if not node_addon.user_settings:
raise HTTPError(http.BAD_REQUEST)
# Current user must be the user who authorized the addon
if node_addon.user_settings.owner != auth.user:
raise HTTPError(http.FORBIDDEN)
return {
'result': {
'emails': [
contrib.username
for contrib in node_addon.owner.contributors
if contrib != auth.user
],
}
}
@must_have_addon('box', 'node')
@must_be_addon_authorizer('box')
def box_list_folders(node_addon, **kwargs):
"""Returns a list of folders in Box"""
if not node_addon.has_auth:
raise HTTPError(http.FORBIDDEN)
node = node_addon.owner
folder_id = request.args.get('folderId')
if folder_id is None:
return [{
'id': '0',
'path': 'All Files',
'addon': 'box',
'kind': 'folder',
'name': '/ (Full Box)',
'urls': {
'folders': node.api_url_for('box_list_folders', folderId=0),
}
}]
try:
client = get_node_client(node)
except BoxClientException:
raise HTTPError(http.FORBIDDEN)
try:
metadata = client.get_folder(folder_id)
except BoxClientException:
raise HTTPError(http.NOT_FOUND)
except MaxRetryError:
raise HTTPError(http.BAD_REQUEST)
# Raise error if folder was deleted
if metadata.get('is_deleted'):
raise HTTPError(http.NOT_FOUND)
folder_path = '/'.join(
[
x['name']
for x in metadata['path_collection']['entries']
] + [metadata['name']]
)
return [
{
'addon': 'box',
'kind': 'folder',
'id': item['id'],
'name': item['name'],
'path': os.path.join(folder_path, item['name']),
'urls': {
'folders': node.api_url_for('box_list_folders', folderId=item['id']),
}
}
for item in metadata['item_collection']['entries']
if item['type'] == 'folder'
]
| |
import gtk, gobject
debug = lambda x: 0
class ColumnMapper(object):
def __init__(self, mapping_dictionary):
"""
takes a dictionary of the form
{"Text": (row_id, "attribute_name")}
"""
self._col_map = mapping_dictionary
rev_map = {}
for k, v in self._col_map.iteritems():
rev_map[v[0]] = k
self.rev_map = rev_map
def __getitem__(self, key):
return self._col_map[key]
def iteritems(self):
return self._col_map.iteritems()
def sort_iteritems(self):
for i in xrange(len(self.rev_map)):
yield self.rev_map[i], self._col_map[self.rev_map[i]]
def name_by_column(self, column):
return self._col_map[self.rev_map[column]][1]
def __len__(self):
return len (self._col_map)
class TreeModel(gtk.GenericTreeModel):
offset = 0 # number of elements to be cutoff from treeiter paths
def __init__(self, col_mapper):
self.col_mapper = col_mapper
gtk.GenericTreeModel.__init__(self)
def get_object(self, gtk_tree_iter):
"""
gtk has its on special tree_iter that don't do our cool stuff
the method gets our on tree_iter-version of the gtk one and returns
its holding object
"""
tree_iter = self.get_user_data(gtk_tree_iter)
return tree_iter._obj
def highlight(self, obj, value, column=0):
"""
highlights value depending on whether obj is merged
or a terminology default etc.
"""
color = None
italics = False
merged = obj.get_merged_equivalent()
if merged is not None:
if column == 0: color = "darkgrey"
if merged == obj: color = "grey"
merged = obj.get_terminology_equivalent()
if column == 0 and merged is not None:
italics = True
if italics:
value = "<i>%s</i>" % value
# check for validation errors
if column == 0:
warning = -1
doc = obj.document
if doc is not None and hasattr(doc, "validation_result"):
for err in doc.validation_result.errors:
if err.obj is obj:
warning = max(warning, 1 if err.is_error else 0)
if warning >= 0:
colors = ['orange', 'red']
value = value + u" <span foreground='%s'>\u26A0</span>" % colors[warning]
if color is None: return value
return "<span foreground='%s'>%s</span>" % (color, value)
def on_get_flags(self):
return 0
def on_get_n_columns(self):
return len(self.col_mapper)
def on_get_column_type(self, index):
return gobject.TYPE_STRING
def on_get_path(self, tree_iter):
return self.odml_path_to_model_path(tree_iter.to_path()[self.offset:])
def on_get_value(self, tree_iter, column):
attr = self.col_mapper.name_by_column(column)
debug(":on_get_value [%d:%s]: %s" % (column, attr, tree_iter))
return tree_iter.get_value(attr)
def on_iter_next(self, tree_iter):
next = tree_iter.get_next()
debug(":on_iter_next [%s]: %s" % (tree_iter, next))
return next
def on_iter_children(self, tree_iter):
debug(":on_iter_children [%s]" % tree_iter)
return tree_iter.get_children()
def on_iter_has_child(self, tree_iter):
debug(":on_iter_has_child [%s,%s]" % (tree_iter, tree_iter.has_child))
return tree_iter.has_child
def on_iter_n_children(self, tree_iter):
return tree_iter.n_children
def on_iter_nth_child(self, tree_iter, n):
debug(":on_iter_nth_child [%d]: %s " % (n, tree_iter))
if tree_iter is None:
return None
return tree_iter.get_nth_child(n)
def on_iter_parent(self, tree_iter):
debug(":on_iter_parent [%s]" % tree_iter)
return tree_iter.parent
def _get_node_iter(self, node):
raise NotImplementedError
def get_node_iter(self, node):
"""
returns the corresponding iter to a node
"""
#ugly fix, so to get a GtkTreeIter from our custom Iter instance
#we first convert our custom Iter to a path and the return an iter from it
#(apparently they are different)
custom_iter = self._get_node_iter(node)
if custom_iter is not None:
return self.create_tree_iter(custom_iter)
def get_node_path(self, node):
"""
returns the path of a node
"""
custom_iter = self._get_node_iter(node)
if custom_iter is not None:
return self.on_get_path(custom_iter)
def post_insert(self, node):
"""
called to notify the treemodel that *node* is a new inserted row
and the parent may have a child toggled
"""
iter = self.get_node_iter(node)
self.row_inserted(self.get_path(iter), iter)
if self.iter_has_child(iter):
self.row_has_child_toggled(self.get_path(iter), iter)
# todo recurse to children!
iter = self.iter_parent(iter)
if iter is not None:
self.row_has_child_toggled(self.get_path(iter), iter)
def post_delete(self, parent, old_path):
"""
called to notify the treemodel that the path *old_path* is no
longer valid and parent might have its child toggled
TODO figure out how to handle recursive removals
"""
self.row_deleted(old_path)
iter = self.get_node_iter(parent)
if iter is not None:
path = self.get_path(iter)
if path:
self.row_has_child_toggled(path, iter)
def event_remove(self, context):
"""
handles action="remove" events and notifies the model about
occured changes. Be sure to call this method for both preChange
and postChange events.
"""
if not hasattr(context, "path"):
context.path = {}
context.parent = {}
if context.preChange:
context.path[self] = self.get_node_path(context.val)
context.parent[self] = context.val.parent
if context.postChange:
path = context.path[self]
self.post_delete(context.parent[self], path)
def event_insert(self, context):
"""
handles action="append" and action="insert" events and notifies the
model about occured changes.
"""
if context.postChange:
self.post_insert(context.val)
def event_reorder(self, context):
"""
handles action="reorder" and notifies the model accordingly issuing
a rows_reordered call
"""
if context.preChange and not hasattr(context, "neworder"):
(childlist, new_index) = context.val
old_index = childlist.index(context.obj)
res = list(range(len(childlist)))
res.insert(new_index if new_index < old_index else new_index+1, old_index)
del res[old_index if new_index > old_index else (old_index+1)]
context.neworder = res
if context.postChange:
iter = self.get_node_iter(context.obj.parent)
path = self.get_path(iter)
if not path and context.obj.parent is not self._section:
return # not our deal
self.rows_reordered(path, iter, context.neworder)
| |
#
# File : iar.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
#
import os
import sys
import string
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from utils import _make_path_relative
from utils import xml_indent
fs_encoding = sys.getfilesystemencoding()
iar_workspace = '''<?xml version="1.0" encoding="iso-8859-1"?>
<workspace>
<project>
<path>$WS_DIR$\%s</path>
</project>
<batchBuild/>
</workspace>
'''
def IARAddGroup(parent, name, files, project_path):
group = SubElement(parent, 'group')
group_name = SubElement(group, 'name')
group_name.text = name
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
file = SubElement(group, 'file')
file_name = SubElement(file, 'name')
if os.path.isabs(path):
file_name.text = path.decode(fs_encoding)
else:
file_name.text = ('$PROJ_DIR$\\' + path).decode(fs_encoding)
def IARWorkspace(target):
# make an workspace
workspace = target.replace('.ewp', '.eww')
out = file(workspace, 'wb')
xml = iar_workspace % target
out.write(xml)
out.close()
def IARProject(target, script):
project_path = os.path.dirname(os.path.abspath(target))
tree = etree.parse('template.ewp')
root = tree.getroot()
out = file(target, 'wb')
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CCFLAGS = ''
Libs = []
lib_prefix = ['lib', '']
lib_suffix = ['.a', '.o', '']
def searchLib(group):
for path_item in group['LIBPATH']:
for prefix_item in lib_prefix:
for suffix_item in lib_suffix:
lib_full_path = os.path.join(path_item, prefix_item + item + suffix_item)
if os.path.isfile(lib_full_path):
return lib_full_path
else:
return ''
# add group
for group in script:
IARAddGroup(root, group['name'], group['src'], project_path)
# get each include path
if group.has_key('CPPPATH') and group['CPPPATH']:
CPPPATH += group['CPPPATH']
# get each group's definitions
if group.has_key('CPPDEFINES') and group['CPPDEFINES']:
CPPDEFINES += group['CPPDEFINES']
# get each group's link flags
if group.has_key('LINKFLAGS') and group['LINKFLAGS']:
LINKFLAGS += group['LINKFLAGS']
if group.has_key('LIBS') and group['LIBS']:
for item in group['LIBS']:
lib_path = searchLib(group)
if lib_path != '':
lib_path = _make_path_relative(project_path, lib_path)
Libs += [lib_path]
# print('found lib isfile: ' + lib_path)
else:
print('not found LIB: ' + item)
# make relative path
paths = set()
for path in CPPPATH:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
# setting options
options = tree.findall('configuration/settings/data/option')
for option in options:
# print option.text
name = option.find('name')
if name.text == 'CCIncludePath2' or name.text == 'newCCIncludePaths':
for path in paths:
state = SubElement(option, 'state')
if os.path.isabs(path) or path.startswith('$'):
state.text = path
else:
state.text = '$PROJ_DIR$\\' + path
if name.text == 'CCDefines':
for define in CPPDEFINES:
state = SubElement(option, 'state')
state.text = define
if name.text == 'IlinkAdditionalLibs':
for path in Libs:
state = SubElement(option, 'state')
if os.path.isabs(path) or path.startswith('$'):
path = path.decode(fs_encoding)
else:
path = ('$PROJ_DIR$\\' + path).decode(fs_encoding)
state.text = path
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
IARWorkspace(target)
def IARVersion():
import subprocess
import re
def IARPath():
import rtconfig
# backup environ
old_environ = os.environ
os.environ['RTT_CC'] = 'iar'
reload(rtconfig)
# get iar path
path = rtconfig.EXEC_PATH
# restore environ
os.environ = old_environ
reload(rtconfig)
return path
path = IARPath();
if os.path.exists(path):
cmd = os.path.join(path, 'iccarm.exe')
else:
print('Error: get IAR version failed. Please update the IAR installation path in rtconfig.py!')
return "0.0"
child = subprocess.Popen([cmd, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = child.communicate()
# example stdout: IAR ANSI C/C++ Compiler V8.20.1.14183/W32 for ARM
return re.search('[\d\.]+', stdout).group(0)
| |
import uuid
from datetime import timedelta
from django.contrib.auth.models import User
from django.contrib.postgres.fields import JSONField
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.timezone import now
from contentstore.models import Message, MessageSet, Schedule
@python_2_unicode_compatible
class Subscription(models.Model):
""" Identity subscriptions and their status
"""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
identity = models.CharField(max_length=36, null=False, blank=False, db_index=True)
version = models.IntegerField(default=1)
messageset = models.ForeignKey(
MessageSet, related_name="subscriptions", null=False, on_delete=models.PROTECT
)
initial_sequence_number = models.IntegerField(default=1, null=False, blank=False)
next_sequence_number = models.IntegerField(default=1, null=False, blank=False)
lang = models.CharField(max_length=6, null=False, blank=False)
active = models.BooleanField(default=True)
completed = models.BooleanField(default=False)
schedule = models.ForeignKey(
Schedule, related_name="subscriptions", null=False, on_delete=models.PROTECT
)
process_status = models.IntegerField(default=0, null=False, blank=False)
metadata = JSONField(null=True, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(
User, related_name="subscriptions_created", null=True, on_delete=models.SET_NULL
)
updated_by = models.ForeignKey(
User, related_name="subscriptions_updated", null=True, on_delete=models.SET_NULL
)
user = property(lambda self: self.created_by)
def get_scheduler_id(self):
return self.metadata.get("scheduler_schedule_id")
def __str__(self):
return str(self.id)
def messages_behind(self, end_date=None, running_total=0):
"""
Determines how many messages behind the subscription is, taking into
account the MessageSet `next_set` chain.
"""
expected, complete = self.get_expected_next_sequence_number(end_date)
behind = expected - self.next_sequence_number
behind = max(0, behind) + running_total
# We should count the last message if we've completed the messageset
if complete and expected != 0:
behind += 1
if complete and self.messageset.next_set is not None:
try:
last_run = self.messageset.get_all_run_dates(
self.created_at,
self.lang,
self.schedule,
self.initial_sequence_number,
).pop()
except IndexError:
last_run = self.created_at
next_sub = Subscription(
lang=self.lang,
messageset=self.messageset.next_set,
schedule=self.messageset.next_set.default_schedule,
created_at=last_run,
)
return next_sub.messages_behind(end_date, behind)
return behind
def get_expected_next_sequence_number(self, end_date=None):
"""Determines the expected next sequence number this subscription
should be at based on the configured schedule, message set and
creation date. It also checks if the subscription should be completed.
Returns a tuple of next_sequence_number, completed.
"""
if end_date is None:
end_date = now()
set_max = self.messageset.get_messageset_max(self.lang)
runs = self.schedule.get_run_times_between(self.created_at, end_date)
count = len(runs) + (self.initial_sequence_number - 1)
if count >= set_max:
return set_max, True
else:
expected = count + 1
return expected, False
@property
def has_next_sequence_number(self):
"""Returns True if this Subscription has not yet reached the
configured MessageSet's maximum sequence number, returns False
otherwise.
"""
return self.next_sequence_number < self.messageset.get_messageset_max(self.lang)
def mark_as_complete(self, save=True):
self.completed = True
self.active = False
self.process_status = 2 # Completed
if save:
self.save()
def fast_forward(self, end_date=None, save=True):
"""Moves a subscription forward to where it should be based on the
configured MessageSet and schedule and the given end_date (defaults
to utcnow if not specified).
Returns True if the subscription was completed due to this action,
False otherwise.
"""
number, complete = self.get_expected_next_sequence_number(end_date)
if complete:
self.mark_as_complete(save=save)
self.next_sequence_number = number
if save:
self.save()
return complete
@classmethod
def fast_forward_lifecycle(self, subscription, end_date=None, save=True):
"""Takes an existing Subscription object and fast forwards it through
the entire lifecycle based on the given end_date. If no end_date is
specified now will be used.
This method will create all subsequent Subscription objects as required
by the configured MessageSet object's next_set value.
Returns a list of all Subscription objects operated on.
"""
if end_date is None:
end_date = now()
subscriptions = [subscription]
done = False
sub = subscription
while not done:
completed = sub.fast_forward(end_date, save=save)
if completed:
if sub.messageset.next_set:
# If the sub.lang is None or empty there is a problem with
# the data that we can't directly resolve here so we
# guard against that breaking things here.
if not sub.lang:
# TODO: what do we do here?
break
run_dates = sub.messageset.get_all_run_dates(
sub.created_at,
sub.lang,
sub.schedule,
sub.initial_sequence_number,
)
if run_dates:
last_date = run_dates.pop()
newsub = Subscription(
identity=sub.identity,
lang=sub.lang,
messageset=sub.messageset.next_set,
schedule=sub.messageset.next_set.default_schedule,
)
if save:
newsub.save()
# Because created_at uses auto_now we have to set the
# created date manually after creation. Add a minute to
# the expected last run date because in the normal flow
# new subscriptions are processed after the day's send
# has been completed.
newsub.created_at = last_date + timedelta(minutes=1)
completed = newsub.fast_forward(end_date, save=save)
subscriptions.append(newsub)
sub = newsub
else:
# This subscription is new and hasn't had any runs yet
done = True
else:
# There are no more subscriptions in this lifecycle.
done = True
else:
# The subscription isn't completed yet.
done = True
return subscriptions
@property
def is_ready_for_processing(self):
return (
self.process_status == 0
and self.completed is not True
and self.active is True
)
@python_2_unicode_compatible
class SubscriptionSendFailure(models.Model):
subscription = models.ForeignKey(Subscription, on_delete=models.CASCADE)
task_id = models.UUIDField()
initiated_at = models.DateTimeField()
reason = models.TextField()
def __str__(self): # __unicode__ on Python 2
return str(self.id)
@python_2_unicode_compatible
class EstimatedSend(models.Model):
""" Estimated number of messages to be sent per message set per day
"""
send_date = models.DateField()
messageset = models.ForeignKey(
MessageSet, related_name="estimates", null=False, on_delete=models.CASCADE
)
estimate_subscriptions = models.IntegerField(null=False, blank=False)
estimate_identities = models.IntegerField(null=False, blank=False)
class Meta:
unique_together = (("send_date", "messageset"),)
def __str__(self):
return "{},{}:{}/{}".format(
self.send_date,
self.messageset.short_name,
self.estimate_subscriptions,
self.estimate_identities,
)
@python_2_unicode_compatible
class ResendRequest(models.Model):
""" Resend Request from user, used to trigger a resend.
"""
received_at = models.DateTimeField(auto_now_add=True)
subscription = models.ForeignKey(Subscription, on_delete=models.CASCADE)
outbound = models.UUIDField(null=True)
message = models.ForeignKey(
Message, related_name="resend_requests", null=True, on_delete=models.SET_NULL
)
def __str__(self):
return "{}: {}".format(self.id, self.received_at)
class BehindSubscription(models.Model):
"""
Subscriptions that are behind where they should be. Filled out by
subscriptions.tasks.find_behind_subscriptions
"""
subscription = models.ForeignKey(
to=Subscription,
on_delete=models.CASCADE,
help_text="The subscription that is behind",
)
messages_behind = models.IntegerField(
help_text="The number of messages the subscription is behind by"
)
current_messageset = models.ForeignKey(
to=MessageSet,
on_delete=models.CASCADE,
related_name="+",
help_text="The message set the the subscription is on",
)
current_sequence_number = models.IntegerField(
help_text="Which sequence in the messageset we are at"
)
expected_messageset = models.ForeignKey(
to=MessageSet,
on_delete=models.CASCADE,
related_name="+",
help_text="The messageset that the subscription should be on",
)
expected_sequence_number = models.IntegerField(
help_text="Which sequence in the messageset we expect to be"
)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
permissions = (
("can_find_behind_subscriptions", "Can find behind subscriptions"),
)
| |
#!/usr/bin/env python
#
# ROS node to provide access to the camera by wrapping NaoQI access (may not
# be the most efficient way...)
#
# Copyright 2012 Daniel Maier, University of Freiburg
# Copyright 2014 Aldebaran Robotics
# http://www.ros.org/wiki/nao
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the University of Freiburg nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from collections import defaultdict
from distutils.version import LooseVersion
import rospy
from sensor_msgs.msg import Image, CameraInfo
from naoqi_driver.naoqi_node import NaoqiNode
import camera_info_manager
from dynamic_reconfigure.server import Server
from naoqi_sensors.cfg import NaoqiCameraConfig
# import resolutions
from naoqi_sensors.vision_definitions import k960p, k4VGA, kVGA, kQVGA, kQQVGA
# import color spaces
from naoqi_sensors.vision_definitions import kYUV422ColorSpace, kYUVColorSpace, \
kRGBColorSpace, kBGRColorSpace, kDepthColorSpace
# import extra parameters
from naoqi_sensors.vision_definitions import kCameraSelectID, kCameraAutoExpositionID, kCameraAecAlgorithmID, \
kCameraContrastID, kCameraSaturationID, kCameraHueID, kCameraSharpnessID, kCameraAutoWhiteBalanceID, \
kCameraExposureID, kCameraAutoGainID, kCameraGainID, kCameraBrightnessID, kCameraWhiteBalanceID
# those should appear in vision_definitions.py at some point
kTopCamera = 0
kBottomCamera = 1
kDepthCamera = 2
class NaoqiCam (NaoqiNode):
def __init__(self, node_name='naoqi_camera'):
NaoqiNode.__init__(self, node_name)
self.camProxy = self.get_proxy("ALVideoDevice")
if self.camProxy is None:
exit(1)
self.nameId = None
self.camera_infos = {}
def returnNone():
return None
self.config = defaultdict(returnNone)
# ROS publishers
self.pub_img_ = rospy.Publisher('~image_raw', Image, queue_size=5)
self.pub_info_ = rospy.Publisher('~camera_info', CameraInfo, queue_size=5)
# initialize the parameter server
self.srv = Server(NaoqiCameraConfig, self.reconfigure)
# initial load from param server
self.init_config()
# initially load configurations
self.reconfigure(self.config, 0)
def init_config( self ):
# mandatory configurations to be set
self.config['frame_rate'] = rospy.get_param('~frame_rate')
self.config['source'] = rospy.get_param('~source')
self.config['resolution'] = rospy.get_param('~resolution')
self.config['color_space'] = rospy.get_param('~color_space')
# optional for camera frames
# top camera with default
if rospy.has_param('~camera_top_frame'):
self.config['camera_top_frame'] = rospy.get_param('~camera_top_frame')
else:
self.config['camera_top_frame'] = "/CameraTop_frame"
# bottom camera with default
if rospy.has_param('~camera_bottom_frame'):
self.config['camera_bottom_frame'] = rospy.get_param('~camera_bottom_frame')
else:
self.config['camera_bottom_frame'] = "/CameraBottom_frame"
# depth camera with default
if rospy.has_param('~camera_depth_frame'):
self.config['camera_depth_frame'] = rospy.get_param('~camera_depth_frame')
else:
self.config['camera_depth_frame'] = "/DepthCamera_frame"
#load calibration files
if rospy.has_param('~calibration_file_top'):
self.config['calibration_file_top'] = rospy.get_param('~calibration_file_top')
if rospy.has_param('~calibration_file_bottom'):
self.config['calibration_file_bottom'] = rospy.get_param('~calibration_file_bottom')
if rospy.has_param('~use_ros_time'):
self.config['use_ros_time'] = rospy.get_param('~use_ros_time')
else:
self.config['use_ros_time'] = False
def reconfigure( self, new_config, level ):
"""
Reconfigure the camera
"""
rospy.loginfo('reconfigure changed')
if self.pub_img_.get_num_connections() == 0:
rospy.loginfo('Changes recorded but not applied as nobody is subscribed to the ROS topics.')
self.config.update(new_config)
return self.config
# check if we are even subscribed to a camera
is_camera_new = self.nameId is None
if is_camera_new:
rospy.loginfo('subscribed to camera proxy, since this is the first camera')
if self.get_version() < LooseVersion('2.0'):
self.nameId = self.camProxy.subscribe("rospy_gvm", new_config['source'],
new_config['resolution'], new_config['color_space'],
new_config['frame_rate'])
else:
self.nameId = self.camProxy.subscribeCamera("rospy_gvm", new_config['source'],
new_config['resolution'], new_config['color_space'],
new_config['frame_rate'])
if self.config['source'] != new_config['source'] or is_camera_new:
rospy.loginfo('updating camera source information')
if new_config['source'] == kTopCamera:
self.frame_id = self.config['camera_top_frame']
elif new_config['source'] == kBottomCamera:
self.frame_id = self.config['camera_bottom_frame']
elif new_config['source'] == kDepthCamera:
self.frame_id = self.config['camera_depth_frame']
else:
rospy.logerr('Invalid source. Must be 0, 1 or 2')
exit(1)
# check if the camera changed
if self.config['camera_info_url'] != new_config['camera_info_url'] and \
new_config['camera_info_url'] and new_config['camera_info_url'] not in self.camera_infos:
if 'cim' not in self.__dict__:
self.cim = camera_info_manager.CameraInfoManager(cname='nao_camera')
if not self.cim.setURL( new_config['camera_info_url'] ):
rospy.logerr('malformed URL for calibration file')
else:
try:
self.cim.loadCameraInfo()
except IOExcept:
rospy.logerr('Could not read from existing calibration file')
if self.cim.isCalibrated():
rospy.loginfo('Successfully loaded camera info')
self.camera_infos[new_config['camera_info_url']] = self.cim.getCameraInfo()
else:
rospy.logerr('There was a problem loading the calibration file. Check the URL!')
# set params
camParams = self.extractParams(new_config)
self.setParams(camParams)
key_methods = [ ('resolution', 'setResolution'), ('color_space', 'setColorSpace'), ('frame_rate', 'setFrameRate')]
if self.get_version() >= LooseVersion('2.0'):
key_methods.append(('source', 'setActiveCamera'))
for key, method in key_methods:
if self.config[key] != new_config[key] or is_camera_new:
self.camProxy.__getattribute__(method)(self.nameId, new_config[key])
self.config.update(new_config)
return self.config
def extractParams(self, new_config):
camParams = []
camParams.append( (kCameraAecAlgorithmID, new_config['auto_exposure_algo']) )
camParams.append( (kCameraContrastID, new_config['contrast']) )
camParams.append( (kCameraSaturationID, new_config['saturation']) )
camParams.append( (kCameraHueID, new_config['hue']) ) # Migth be deprecated
camParams.append( (kCameraSharpnessID, new_config['sharpness']) )
camParams.append( (kCameraAutoWhiteBalanceID, new_config['auto_white_balance']) )
if ( new_config['auto_white_balance']==0):
camParams.append( (kCameraWhiteBalanceID, new_config['white_balance']) )
camParams.append( (kCameraAutoExpositionID, new_config['auto_exposition']) )
if ( new_config['auto_exposition']==0):
camParams.append( (kCameraGainID, new_config['gain']) )
camParams.append( (kCameraExposureID, new_config['exposure']) )
else:
camParams.append( (kCameraBrightnessID, new_config['brightness']) )
return camParams
def setParams(self, key_list):
for key, value in key_list:
if self.get_version() < LooseVersion('2.0'):
self.camProxy.setParam(key, value)
else:
self.camProxy.setCameraParameter(self.nameId, key, value)
def run(self):
img = Image()
r = rospy.Rate(self.config['frame_rate'])
while self.is_looping():
if self.pub_img_.get_num_connections() == 0:
if self.nameId:
rospy.loginfo('Unsubscribing from camera as nobody listens to the topics.')
self.camProxy.unsubscribe(self.nameId)
self.nameId = None
r.sleep()
continue
if self.nameId is None:
self.reconfigure(self.config, 0)
r.sleep()
continue
image = self.camProxy.getImageRemote(self.nameId)
if image is None:
continue
# Deal with the image
if self.config['use_ros_time']:
img.header.stamp = rospy.Time.now()
else:
img.header.stamp = rospy.Time(image[4] + image[5]*1e-6)
img.header.frame_id = self.frame_id
img.height = image[1]
img.width = image[0]
nbLayers = image[2]
if image[3] == kYUVColorSpace:
encoding = "mono8"
elif image[3] == kRGBColorSpace:
encoding = "rgb8"
elif image[3] == kBGRColorSpace:
encoding = "bgr8"
elif image[3] == kYUV422ColorSpace:
encoding = "yuv422" # this works only in ROS groovy and later
elif image[3] == kDepthColorSpace:
encoding = "mono16"
else:
rospy.logerr("Received unknown encoding: {0}".format(image[3]))
img.encoding = encoding
img.step = img.width * nbLayers
img.data = image[6]
self.pub_img_.publish(img)
# deal with the camera info
if self.config['source'] == kDepthCamera and image[3] == kDepthColorSpace:
infomsg = CameraInfo()
# yes, this is only for an XTion / Kinect but that's the only thing supported by NAO
ratio_x = float(640)/float(img.width)
ratio_y = float(480)/float(img.height)
infomsg.width = img.width
infomsg.height = img.height
# [ 525., 0., 3.1950000000000000e+02, 0., 525., 2.3950000000000000e+02, 0., 0., 1. ]
infomsg.K = [ 525, 0, 3.1950000000000000e+02,
0, 525, 2.3950000000000000e+02,
0, 0, 1 ]
infomsg.P = [ 525, 0, 3.1950000000000000e+02, 0,
0, 525, 2.3950000000000000e+02, 0,
0, 0, 1, 0 ]
for i in range(3):
infomsg.K[i] = infomsg.K[i] / ratio_x
infomsg.K[3+i] = infomsg.K[3+i] / ratio_y
infomsg.P[i] = infomsg.P[i] / ratio_x
infomsg.P[4+i] = infomsg.P[4+i] / ratio_y
infomsg.D = []
infomsg.binning_x = 0
infomsg.binning_y = 0
infomsg.distortion_model = ""
infomsg.header = img.header
self.pub_info_.publish(infomsg)
elif self.config['camera_info_url'] in self.camera_infos:
infomsg = self.camera_infos[self.config['camera_info_url']]
infomsg.header = img.header
self.pub_info_.publish(infomsg)
r.sleep()
if (self.nameId):
rospy.loginfo("unsubscribing from camera ")
self.camProxy.unsubscribe(self.nameId)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.phishingprotection.v1beta1 PhishingProtectionServiceV1Beta1 API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import grpc
from google.cloud.phishingprotection_v1beta1.gapic import (
phishing_protection_service_client_config,
)
from google.cloud.phishingprotection_v1beta1.gapic.transports import (
phishing_protection_service_grpc_transport,
)
from google.cloud.phishingprotection_v1beta1.proto import phishingprotection_pb2
from google.cloud.phishingprotection_v1beta1.proto import phishingprotection_pb2_grpc
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-phishing-protection"
).version
class PhishingProtectionServiceClient(object):
"""Service to report phishing URIs."""
SERVICE_ADDRESS = "phishingprotection.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = (
"google.cloud.phishingprotection.v1beta1.PhishingProtectionServiceV1Beta1"
)
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
PhishingProtectionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.PhishingProtectionServiceGrpcTransport,
Callable[[~.Credentials, type], ~.PhishingProtectionServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = phishing_protection_service_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=phishing_protection_service_grpc_transport.PhishingProtectionServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = phishing_protection_service_grpc_transport.PhishingProtectionServiceGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def report_phishing(
self,
parent,
uri,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Reports a URI suspected of containing phishing content to be reviewed.
Once the report review is complete, its result can be found in the Cloud
Security Command Center findings dashboard for Phishing Protection. If
the result verifies the existence of malicious phishing content, the
site will be added the to `Google's Social Engineering
lists <https://support.google.com/webmasters/answer/6350487/>`__ in
order to protect users that could get exposed to this threat in the
future.
Example:
>>> from google.cloud import phishingprotection_v1beta1
>>>
>>> client = phishingprotection_v1beta1.PhishingProtectionServiceClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `uri`:
>>> uri = ''
>>>
>>> response = client.report_phishing(parent, uri)
Args:
parent (str): Required. The name of the project for which the report will be created,
in the format "projects/{project\_number}".
uri (str): Required. The URI that is being reported for phishing content to be analyzed.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.phishingprotection_v1beta1.types.ReportPhishingResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "report_phishing" not in self._inner_api_calls:
self._inner_api_calls[
"report_phishing"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.report_phishing,
default_retry=self._method_configs["ReportPhishing"].retry,
default_timeout=self._method_configs["ReportPhishing"].timeout,
client_info=self._client_info,
)
request = phishingprotection_pb2.ReportPhishingRequest(parent=parent, uri=uri)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["report_phishing"](
request, retry=retry, timeout=timeout, metadata=metadata
)
| |
import pytest
import vcr
import gocd
import gocd.api
@pytest.fixture
def server():
return gocd.Server('http://localhost:8153', user='ba', password='secret')
@pytest.fixture
def pipeline(server):
return server.pipeline('Simple')
@pytest.fixture
def locked_pipeline(server):
return server.pipeline('Simple-with-lock')
@pytest.fixture
def pipeline_multiple_stages(server):
return server.pipeline('Multiple-Stages-And-Jobs')
@pytest.fixture
def pipeline_multiple_stages_manual(server):
return server.pipeline('Multiple-Stages-And-Jobs-Manual')
@pytest.mark.parametrize('cassette_name,offset,counter', [
('tests/fixtures/cassettes/api/pipeline/history-offset-0.yml', 0, 11),
('tests/fixtures/cassettes/api/pipeline/history-offset-10.yml', 10, 1)
])
def test_history(pipeline, cassette_name, offset, counter):
with vcr.use_cassette(cassette_name):
response = pipeline.history(offset=offset)
assert response.is_ok
assert response.content_type == 'application/json'
assert 'pipelines' in response
run = response['pipelines'][0]
assert run['name'] == 'Simple'
assert run['counter'] == counter
@vcr.use_cassette('tests/fixtures/cassettes/api/pipeline/release-successful.yml')
def test_release(locked_pipeline):
response = locked_pipeline.release()
assert response.is_ok
assert response.content_type == 'text/html'
assert response.payload.decode('utf-8') == 'pipeline lock released for {0}\n'.format(
locked_pipeline.name
)
@vcr.use_cassette('tests/fixtures/cassettes/api/pipeline/release-unsuccessful.yml')
def test_release_when_pipeline_is_unlocked(locked_pipeline):
response = locked_pipeline.release()
assert not response
assert not response.is_ok
assert response.content_type == 'text/html'
assert response.payload.decode('utf-8') == (
'lock exists within the pipeline configuration but no pipeline '
'instance is currently in progress\n'
)
@vcr.use_cassette('tests/fixtures/cassettes/api/pipeline/pause-successful.yml')
def test_pause(pipeline):
response = pipeline.pause('Time to sleep')
assert response.is_ok
assert response.content_type == 'text/html'
assert response.payload.decode('utf-8') == ' '
@vcr.use_cassette('tests/fixtures/cassettes/api/pipeline/unpause-successful.yml')
def test_unpause(pipeline):
response = pipeline.unpause()
assert response.is_ok
assert response.content_type == 'text/html'
assert response.payload.decode('utf-8') == ' '
@vcr.use_cassette('tests/fixtures/cassettes/api/pipeline/status.yml')
def test_status(pipeline):
response = pipeline.status()
assert response.is_ok
assert response.content_type == 'application/json'
assert not response['locked']
assert not response['paused']
assert response['schedulable']
@vcr.use_cassette('tests/fixtures/cassettes/api/pipeline/instance.yml')
def test_instance(pipeline):
response = pipeline.instance(1)
assert response.is_ok
assert response.content_type == 'application/json'
assert response['name'] == pipeline.name
assert response['counter'] == 1
@vcr.use_cassette('tests/fixtures/cassettes/api/pipeline/instance-return-latest.yml')
def test_instance_without_argument_returns_latest(pipeline):
history_instance = pipeline.history()['pipelines'][0]
response = pipeline.instance()
assert response.is_ok
assert response['counter'] == history_instance['counter']
@vcr.use_cassette('tests/fixtures/cassettes/api/pipeline/schedule-successful-no-args.yml')
def test_schedule(pipeline):
response = pipeline.schedule()
assert response.status_code == 202
assert response.is_ok
assert response.content_type == 'text/html'
assert response.payload.decode('utf-8') == (
u'Request to schedule pipeline {0} accepted\n'.format(pipeline.name)
)
@vcr.use_cassette('tests/fixtures/cassettes/api/pipeline/schedule-successful-with-material.yml')
def test_schedule_with_git_arg(pipeline):
git_revision = (
'29f5d8ec63b7200d06a25f0b1df0e321bd95f1ec823d3ef8bac7c5295affa488'
)
# This feels bananas.
# TODO: Check with Go devs what the format for all these material
# revs are, and how to figure it out
# If this is it then I need to find a better way for users of this
# library to interact with it, duplicating the revision isn't cool.
response = pipeline.schedule(materials={git_revision: git_revision})
assert response.status_code == 202
assert response.is_ok
assert response.content_type == 'text/html'
assert response.payload.decode('utf-8') == (
'Request to schedule pipeline {0} accepted\n'.format(pipeline.name)
)
@vcr.use_cassette('tests/fixtures/cassettes/api/pipeline/schedule-successful-with-env-var.yml')
def test_schedule_with_environment_variable_passed(pipeline):
response = pipeline.schedule(variables=dict(UPSTREAM_REVISION='42'))
assert response.status_code == 202
assert response.is_ok
assert response.content_type == 'text/html'
assert response.payload.decode('utf-8') == (
'Request to schedule pipeline {0} accepted\n'.format(pipeline.name)
)
@vcr.use_cassette(
'tests/fixtures/cassettes/api/pipeline/schedule-successful-with-secure-env-var.yml'
)
def test_schedule_with_secure_environment_variable_passed(pipeline):
response = pipeline.schedule(secure_variables=dict(UPLOAD_PASSWORD='ssh, not so loud'))
assert response.status_code == 202
assert response.is_ok
assert response.content_type == 'text/html'
assert response.payload.decode('utf-8') == (
'Request to schedule pipeline {0} accepted\n'.format(pipeline.name)
)
@vcr.use_cassette(
'tests/fixtures/cassettes/api/pipeline/schedule-unsuccessful-when-already-running.yml'
)
def test_schedule_when_pipeline_is_already_running(pipeline):
response = pipeline.schedule()
assert response.status_code == 409
assert not response.is_ok
assert response.content_type == 'text/html'
assert response.payload.decode('utf-8') == (
'Failed to trigger pipeline [{pipeline}] {{ Stage [Hello] in '
'pipeline [{pipeline}] is still in progress }}\n'
).format(pipeline=pipeline.name)
@vcr.use_cassette(
'tests/fixtures/cassettes/api/pipeline/schedule-successful-and-return-new-instance.yml'
)
def test_schedule_pipeline_and_return_new_instance(pipeline):
before_run = pipeline.history()['pipelines'][0]
# By setting the backoff to 0 the test runs faster, since it's all mocked out anyway.
response = pipeline.schedule(return_new_instance=True, backoff_time=0)
assert response.status_code == 200
assert response.is_ok
assert response.content_type == 'application/json'
assert response['counter'] != before_run['counter']
assert (before_run['counter'] + 1) == response['counter']
@vcr.use_cassette(
'tests/fixtures/cassettes/api/pipeline/console-output.yml'
)
def test_console_output_single_stage(pipeline):
instance = pipeline.instance()
metadata, output = next(pipeline.console_output(instance))
assert r'[go] Job completed' in output.decode('utf8')
assert {'pipeline': 'Simple',
'pipeline_counter': instance['counter'],
'stage': 'defaultStage',
'stage_counter': '1',
'job': 'defaultJob',
'job_result': 'Passed',
} == metadata
@vcr.use_cassette(
'tests/fixtures/cassettes/api/pipeline/console-output-multiple-stages.yml'
)
def test_console_output_multiple_stages(pipeline_multiple_stages):
pipeline = pipeline_multiple_stages
valid_args = ['Good Bye', 'Hello', 'ehlo test.somewhere.tld']
valid = 0
for metadata, output in pipeline.console_output():
output = output.decode('utf8')
assert r'[go] Job completed' in output
assert True in (
'<arg>{0}</arg>'.format(job) in output for job in valid_args
), 'No match for {0}'.format(metadata)
valid += 1
assert valid == 3
@vcr.use_cassette(
'tests/fixtures/cassettes/api/pipeline/console-output-job-not-finished.yml'
)
def test_console_output_only_where_stage_has_finished(pipeline_multiple_stages_manual):
# The second stage has been scheduled but has no agent to run on, so the only output in
# the console.log is that there's no console.log To avoid showing that message it'll only
# output if the pipeline has gotten into a finalized state.
pipeline = pipeline_multiple_stages_manual
jobs_with_output = set()
for metadata, output in pipeline.console_output():
if output:
jobs_with_output.add(metadata['job'])
assert 'Ehlo' not in jobs_with_output
assert 'Hello' in jobs_with_output
assert 'Bye' in jobs_with_output
@vcr.use_cassette(
'tests/fixtures/cassettes/api/pipeline/stage.yml'
)
def test_get_stage_for_a_pipeline(pipeline):
stage = pipeline.stage('Hello')
assert isinstance(stage, gocd.api.Stage)
assert stage.pipeline_name == pipeline.name
assert stage.stage_name == 'Hello'
| |
import json
import discord
import sys
import logging
from os import listdir, remove
log = logging.getLogger("bot")
# TODO: Standardize how I do function documentation since it's all over the place
# TODO: Add the new option to the server config
class Bot:
def __init__(self, client):
try:
with open('config.json', 'r+') as json_config_info:
config = json.load(json_config_info)
except IOError:
log.warning("config.json was not found, exiting.")
sys.exit("config.json not found in running directory.")
self.owner_id = config["owner_id"]
self.info_message = config["info_message"]
self.servers = {} # List of server objects
self.initializing = False
self.client = client
self.debug = True # Just used for testing.
def sudo(self, message):
# Allows for owner-only commands.
if message.author.id == self.owner_id:
return True
else:
return False
def get_server(self, server=None, svr_id=None):
# Get server from server list.
if server is not None:
return self.servers[server.id]
elif svr_id is not None:
return self.servers[svr_id]
def init(self, client):
datafiles = listdir("server_data/")
for file in datafiles:
server = Server(client)
# Pull the file ID from the name, so that we can be sure
try:
server.init_from_file("server_data/" + file, client) # Probably a neater way of doing this
except KeyError: # If there's a discrepancy, update the file otg
server.update_data_files(client, "server_data/" + file, file)
self.servers[server.id] = server
def add_new_server(self, client, server):
# Create a new server datafile and object on the fly
server_obj = Server(client)
server_obj.init_from_join(server)
self.servers[server.id] = server_obj
return
def remove_server(self, server):
# Remove the server datafile and object when leaving a server.
print("Removing datafile for {0.name}".format(server))
remove("server_data/{0.id}.json".format(server))
self.servers.pop(server.id)
@staticmethod
def update_datafiles(client):
# Loop through all server objects and update their datafiles to reflect new changes.
datafiles = listdir("server_data/")
for file in datafiles:
server = Server(client)
# Pull the file ID from the name, so that we can be sure
server.update_data_files(client, "server_data/" + file, file) # Probably a neater way of doing this
async def get_server_from_pm(self, message):
# TODO: Work this out
"""
Handle the logic for determining how many servers are shared between the bot and the message author.
Implemented to be used in PMs so we can get the right server object to be able to assign roles through PMs.
:param message:
:return: Server object
"""
# Get the servers shared between the author and the bot
servers_shared = []
for server in self.client.servers:
for member in server.members:
if member.id == message.author.id:
servers_shared.append(member.server)
if len(servers_shared) == 0: # This shouldn't normally appear
await self.client.send_message(message.channel, "Something is wrong. We don't appear to share any servers.")
return None
elif len(servers_shared) == 1: # This makes it really easy, since there's only one server, just use that one
return servers_shared[0]
else: # Things get complicated. From here, it's mostly just message management.
base_message = "Oops, looks like I share more than one server with you. Which server would you like to set your role in? Reply with the digit of the server.\n"
i = 1
for svr in servers_shared:
base_message += "{0}: {1.name}\n".format(i, svr)
i += 1
await self.client.send_message(message.channel, base_message)
# Wait for the message that the user sends back.
server_selection = await get_message(self.client, message, i, base_message)
# For some reason, the filter on wait_for_message isn't consistent, so we have to have more checks outside
if server_selection > i:
await self.client.send_message(message.channel, "That number was too large, try %team again.")
return None
else:
try:
server = servers_shared[(int(server_selection) - 1)]
except IndexError:
await self.client.send_message(message.channel, "That number was too large, try %team again.")
return None
# TODO: Fix possibility of IndexError
return server
class Server:
"""
Allowing servers to have different roles, and to be able to use different role names.
"""
base_roles = ["Instinct", "Valor", "Mystic"]
def __init__(self, client):
self.id = ""
self.name = ""
self.roles = []
self.channel_whitelist = []
self.pm_config = ""
self.exclusive = ""
self.user_ctrl = "0"
self.obj = discord.utils.get(client.servers, id=self.id) # This actually seems to end up as None
# Init on launch, if file has been stored.
def init_from_file(self, datafile_path, client):
# Note: if we do it when the server object is initialized, then we run a risk of it not existing then.
with open(datafile_path, "r", encoding="utf-8") as tmp:
data = json.load(tmp)
self.channel_whitelist = data["team_ch_wl"]
self.pm_config = data["pm"]
self.id = data["server_id"]
self.name = data["server_name"] # Server name was blank in old code, let's fix that
self.roles = data["custom_roles"]
self.exclusive = data["exclusive"]
self.user_ctrl = data["user_ctrl"]
self.obj = discord.utils.get(client.servers, id=self.id) # Needs to be called here and not __init__() for some reason
def update_data_files(self, client, datafile_path, datafile_filename):
# Utility to update all existing datafiles in case I add new stuff to dicts.
# Mostly hardcoded, and requires that the client connect in the first place.
# To use, add the new keys to test_var.
server_id = datafile_filename[:-5]
with open(datafile_path, "r", encoding="utf-8") as tmp:
data = json.load(tmp)
try:
# Add the new keys here
test_var = data["custom_roles"], data["server_id"], data["exclusive"], data["user_ctrl"]
except KeyError:
with open(datafile_path, "r", encoding="utf-8") as tmp:
data = json.load(tmp)
server = discord.utils.get(client.servers, id=server_id)
if server is None:
# This means that we don't belong to the server for some reason. Throws AttributeErrors.
# Just reformat the file so that we don't get errors.
self.name = ""
self.id = server_id
self.roles = []
self.exclusive = "0"
self.user_ctrl = "0"
self.export_to_file()
log.info("Datafile {0}.json blanked due to not belonging to the server anymore.".format(server_id))
return
else:
self.name = server.name
self.id = server.id # omit .json
# These we can assume already exist in the datafiles, so let's not try to re-create them.
self.pm_config = data["pm"]
self.channel_whitelist = data["team_ch_wl"]
# This one already exists, but is empty. We really don't need to worry ourselves with it too much.
# These ones don't exist in the new json files, so we need to add them.
# We have to check to see if the server contains the default roles already, and if not, add them.
self.exclusive = "0"
self.user_ctrl = "0"
for role in server.roles:
if role.name in ["Valor", "Mystic", "Instinct"]:
self.roles.append(role.name)
print("Updated server datafiles for {0.name}".format(server))
self.export_to_file()
def init_from_join(self, server):
# Creation of object on joining server through oauth.
# Note that the first time through, it creates an empty object that should be popularized on next boot.
# Kinda roundabout, probably a nicer way to do this
self.id = server.id
self.name = server.name
self.roles = []
self.channel_whitelist = []
self.pm_config = "0"
self.exclusive = "0"
self.obj = server
self.user_ctrl = "0"
output = init_server_datafile
output["server_id"] = server.id
output["server_name"] = server.name
with open("server_data/{0}.json".format(self.id), "w", encoding="utf-8") as tmp:
json.dump(output, tmp)
def export_to_file(self):
# Take the current server object as it is and push it to a .json file.
with open("server_data/{0}.json".format(self.id), "r", encoding="utf-8") as tmp:
data = json.load(tmp)
data["server_id"] = self.id
data["name"] = self.name
data["custom_roles"] = self.roles # List of names
data["team_ch_wl"] = self.channel_whitelist
data["pm"] = self.pm_config
data["exclusive"] = self.exclusive
data["user_ctrl"] = self.user_ctrl
with open("server_data/{0}.json".format(self.id), "w", encoding="utf-8") as tmp:
json.dump(data, tmp)
def add_custom_role(self, message, external_role=None):
# TODO: Occasionally omits the role from the role list. Probably from failing to write to the file for some reason.
# initialize a custom role that can be added through %team. Server dependent.
# If external_role is not None, then it's being called by something that's not the command, and we're just passing in the names we know.
if external_role is not None:
role_name = external_role
else:
role_name = message.content[13:]
role = discord.utils.get(message.server.roles, name=role_name)
if role is None:
return None
else:
self.roles.append(role.name)
self.export_to_file()
return role
def remove_custom_role(self, message, external_role=None):
# Remove a role from the server object's role list.
# Returns True if it succeeds, or False if not.
if external_role is not None:
role_name = external_role
else:
role_name = message.content[14:]
try:
self.roles.remove(role_name)
self.export_to_file()
return True
except IndexError:
return False
def add_to_whitelist(self, message):
"""Add a channel to the server whitelist and write it out."""
self.channel_whitelist.append(message.channel.id)
self.export_to_file()
def remove_from_whitelist(self, message):
"""Remove the channel from the whitelist and write it out."""
self.channel_whitelist.remove(message.channel.id)
self.export_to_file()
async def check_whitelist(self, message):
"""
Check against the channel whitelist to see if the command should be allowed in this channel.
:param message: message object from context
:return: None if the command goes through, otherwise a message detailing why it didn't, and where to direct the messages.
"""
if self.channel_whitelist is None or message.channel.id in self.channel_whitelist:
# Command does go through
return None
elif message.channel.id not in self.channel_whitelist:
# Command doesn't go through, return a message on where that should be allowed.
if len(self.channel_whitelist) == 1:
return "Please put team requests in <#{0}>.".format(self.channel_whitelist[0])
elif len(self.channel_whitelist) > 1: # Grammar for grammar's sake, any more are ignored.
return "Please put team requests in <#{0}> or <#{1}>.".format(self.channel_whitelist[0], self.channel_whitelist[1])
# TODO: Set this up to basically handle most of the code in the %rank command
def init_default_roles(self, message):
# To be called when someone calls %create_roles, after the roles have been created successfully.
self.add_custom_role(message, "Valor")
self.add_custom_role(message, "Mystic")
self.add_custom_role(message, "Instinct")
return
def check_default_roles(self):
# Check to see if the only roles that exist are the default roles.
for role in self.roles:
if role in ["Mystic", "Valor", "Instinct"]:
continue
else:
return False
else:
return True
def exists_default_roles(self):
# Check to see if default roles exist or not.
for role in self.obj.roles:
if role.name in self.base_roles: # Just assume that if one role exists, then they all do.
return True
else:
return False
def check_role(self, user_input):
"""
A simple check to see if the user's message was in the role list.
:param user_input: Message content.
:return: True if role is in the team list, otherwise False.
"""
for role_name in self.roles:
if role_name == user_input:
return True
return False
def list_roles(self):
# Compile roles into a fancy, readable format.
role_list = self.roles
base_message = "roles that can be added with %team are "
if len(role_list) == 1: # If there's only one role addable, make it mostly clean.
base_message += "`{}`."
else:
for role_name in role_list[:-1]:
base_message += "`{}`, ".format(role_name)
else: # For the last object
base_message += "and `{}`.".format(role_list[-1])
return base_message
def generate_config_msg(self):
# Create a message used for %server_config.
if self.roles != []: # pycharm doesn't like this, but self.roles is not None doesn't work right
addable_roles = pp_list(self.roles)
else:
addable_roles = "None"
chan_name_list = []
if self.channel_whitelist != []:
for chan in self.channel_whitelist:
chan_obj = discord.utils.get(self.obj.channels, id=chan)
if chan_obj is not None:
chan_name_list.append(chan_obj.name)
else:
continue
else:
whitelist = pp_list(chan_name_list)
else: # Should trigger if there are no channels in the whitelist.
whitelist = "None"
pm = "optional" if self.pm_config == "0" else "required" # Python is so nice
role_cfg = "exclusive" if self.exclusive == "0" else "multiple"
user_ctrl = "disabled" if self.user_ctrl == "0" else "enabled"
return server_config_message.format(addable_roles, whitelist, pm, role_cfg, user_ctrl)
# Function used to check the message received for an int.
async def get_message(client, message, i, base_message):
msg = await client.wait_for_message(author=message.author, channel=message.channel)
try:
if 0 < int(msg.content) <= i:
return int(msg.content)
else:
await client.send_message(message.channel, base_message)
await get_message(client, message, i, base_message)
except ValueError: # In case what they return isn't convertible to an int
await client.send_message(message.channel, base_message)
await get_message(client, message, i, base_message)
def check_perms(message):
# Only allows execution of command if author has Manage Server permission, or is owner.
if message.server.owner.id == message.author.id:
return True
else:
for role in message.author.roles:
if role.permissions.manage_server: # May throw errors.
return True
else:
return False
def pp_list(ls):
# Take a list of strings and format it nicely
output = ""
for item in ls[:-1]:
output += "{}, ".format(item)
else:
output += ls[-1]
return output
def get_percentage(amount, total):
if amount > 0:
percentage = (amount / total) * 100
return percentage
else:
return amount # Avoiding div by zero
init_server_datafile = {
"server_name": "", # Server name, ofc subject to change.
"server_id": "", # Server ID
"team_ch_wl": [], # channel ids where %team is allowed.
"pm": "0", # Whether or not users can set roles in messages or not.
# Here's where I run into a bit of a fix. Setting custom_roles to just a role name means that if the role is
# re-named, it needs to be re-added. However, just using names means that it's made substantially easier to keep
# track of roles. There seems to be a bit of a trade-off in utility here, but I'm going to make it a list for now.
# This does mean possibly having to remake every single json file if I change my mind, though.
"custom_roles": [], # Just role name.
"exclusive": "0", # Whether or not the user can have more than one role. 0 is no, 1 is yes.
"user_ctrl": "0" # Whether or not users can remove their roles that goPC can assign
}
# Required perms for bot operation. Used for sending an oauth link.
server_config_message = '''
Current server settings:
```
Addable roles: {0}
Channels whitelisted: {1}
PM settings: {2}
Role settings:
Role exclusivity: {3}
User control (whether or not %leaveteam is usable): {4}
```'''
# Flags used for config commands
flags = {
"pm": {
"optional": "0",
"required": "1",
},
"role": {
"exclusive": "0",
"multiple": "1"
},
"ctrl": {
"disabled": "0",
"enabled": "1"
}
}
required_perms = discord.Permissions.none()
required_perms.read_messages = True
required_perms.send_messages = True
required_perms.manage_roles = True
# Default perms for the team role.
team_perms = discord.Permissions.none()
| |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for protocol buffer enum types."""
import enum
class Likelihood(enum.IntEnum):
"""
Categorization of results based on how likely they are to represent a match,
based on the number of elements they contain which imply a match.
Attributes:
LIKELIHOOD_UNSPECIFIED (int): Default value; same as POSSIBLE.
VERY_UNLIKELY (int): Few matching elements.
UNLIKELY (int)
POSSIBLE (int): Some matching elements.
LIKELY (int)
VERY_LIKELY (int): Many matching elements.
"""
LIKELIHOOD_UNSPECIFIED = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
POSSIBLE = 3
LIKELY = 4
VERY_LIKELY = 5
class FileType(enum.IntEnum):
"""
Definitions of file type groups to scan.
Attributes:
FILE_TYPE_UNSPECIFIED (int): Includes all files.
BINARY_FILE (int): Includes all file extensions not covered by text file types.
TEXT_FILE (int): Included file extensions:
asc, brf, c, cc, cpp, csv, cxx, c++, cs, css, dart, eml, go, h, hh, hpp,
hxx, h++, hs, html, htm, shtml, shtm, xhtml, lhs, ini, java, js, json,
ocaml, md, mkd, markdown, m, ml, mli, pl, pm, php, phtml, pht, py, pyw,
rb, rbw, rs, rc, scala, sh, sql, tex, txt, text, tsv, vcard, vcs, wml,
xml, xsl, xsd, yml, yaml.
"""
FILE_TYPE_UNSPECIFIED = 0
BINARY_FILE = 1
TEXT_FILE = 2
class DayOfWeek(enum.IntEnum):
"""
Represents a day of week.
Attributes:
DAY_OF_WEEK_UNSPECIFIED (int): The unspecified day-of-week.
MONDAY (int): The day-of-week of Monday.
TUESDAY (int): The day-of-week of Tuesday.
WEDNESDAY (int): The day-of-week of Wednesday.
THURSDAY (int): The day-of-week of Thursday.
FRIDAY (int): The day-of-week of Friday.
SATURDAY (int): The day-of-week of Saturday.
SUNDAY (int): The day-of-week of Sunday.
"""
DAY_OF_WEEK_UNSPECIFIED = 0
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
SUNDAY = 7
class ContentOption(enum.IntEnum):
"""
Options describing which parts of the provided content should be scanned.
Attributes:
CONTENT_UNSPECIFIED (int): Includes entire content of a file or a data stream.
CONTENT_TEXT (int): Text content within the data, excluding any metadata.
CONTENT_IMAGE (int): Images found in the data.
"""
CONTENT_UNSPECIFIED = 0
CONTENT_TEXT = 1
CONTENT_IMAGE = 2
class MatchingType(enum.IntEnum):
"""
Type of the match which can be applied to different ways of matching, like
Dictionary, regular expression and intersecting with findings of another
info type.
Attributes:
MATCHING_TYPE_UNSPECIFIED (int): Invalid.
MATCHING_TYPE_FULL_MATCH (int): Full match.
- Dictionary: join of Dictionary results matched complete finding quote
- Regex: all regex matches fill a finding quote start to end
- Exclude info type: completely inside affecting info types findings
MATCHING_TYPE_PARTIAL_MATCH (int): Partial match.
- Dictionary: at least one of the tokens in the finding matches
- Regex: substring of the finding matches
- Exclude info type: intersects with affecting info types findings
MATCHING_TYPE_INVERSE_MATCH (int): Inverse match.
- Dictionary: no tokens in the finding match the dictionary
- Regex: finding doesn't match the regex
- Exclude info type: no intersection with affecting info types findings
"""
MATCHING_TYPE_UNSPECIFIED = 0
MATCHING_TYPE_FULL_MATCH = 1
MATCHING_TYPE_PARTIAL_MATCH = 2
MATCHING_TYPE_INVERSE_MATCH = 3
class InfoTypeSupportedBy(enum.IntEnum):
"""
Parts of the APIs which use certain infoTypes.
Attributes:
ENUM_TYPE_UNSPECIFIED (int)
INSPECT (int): Supported by the inspect operations.
RISK_ANALYSIS (int): Supported by the risk analysis operations.
"""
ENUM_TYPE_UNSPECIFIED = 0
INSPECT = 1
RISK_ANALYSIS = 2
class RelationalOperator(enum.IntEnum):
"""
Operators available for comparing the value of fields.
Attributes:
RELATIONAL_OPERATOR_UNSPECIFIED (int)
EQUAL_TO (int): Equal.
NOT_EQUAL_TO (int): Not equal to.
GREATER_THAN (int): Greater than.
LESS_THAN (int): Less than.
GREATER_THAN_OR_EQUALS (int): Greater than or equals.
LESS_THAN_OR_EQUALS (int): Less than or equals.
EXISTS (int): Exists
"""
RELATIONAL_OPERATOR_UNSPECIFIED = 0
EQUAL_TO = 1
NOT_EQUAL_TO = 2
GREATER_THAN = 3
LESS_THAN = 4
GREATER_THAN_OR_EQUALS = 5
LESS_THAN_OR_EQUALS = 6
EXISTS = 7
class DlpJobType(enum.IntEnum):
"""
An enum to represent the various type of DLP jobs.
Attributes:
DLP_JOB_TYPE_UNSPECIFIED (int)
INSPECT_JOB (int): The job inspected Google Cloud for sensitive data.
RISK_ANALYSIS_JOB (int): The job executed a Risk Analysis computation.
"""
DLP_JOB_TYPE_UNSPECIFIED = 0
INSPECT_JOB = 1
RISK_ANALYSIS_JOB = 2
class StoredInfoTypeState(enum.IntEnum):
"""
State of a StoredInfoType version.
Attributes:
STORED_INFO_TYPE_STATE_UNSPECIFIED (int)
PENDING (int): StoredInfoType version is being created.
READY (int): StoredInfoType version is ready for use.
FAILED (int): StoredInfoType creation failed. All relevant error messages are returned
in the ``StoredInfoTypeVersion`` message.
INVALID (int): StoredInfoType is no longer valid because artifacts stored in
user-controlled storage were modified. To fix an invalid StoredInfoType,
use the ``UpdateStoredInfoType`` method to create a new version.
"""
STORED_INFO_TYPE_STATE_UNSPECIFIED = 0
PENDING = 1
READY = 2
FAILED = 3
INVALID = 4
class CustomInfoType(object):
class ExclusionType(enum.IntEnum):
"""
Attributes:
EXCLUSION_TYPE_UNSPECIFIED (int): A finding of this custom info type will not be excluded from results.
EXCLUSION_TYPE_EXCLUDE (int): A finding of this custom info type will be excluded from final results,
but can still affect rule execution.
"""
EXCLUSION_TYPE_UNSPECIFIED = 0
EXCLUSION_TYPE_EXCLUDE = 1
class CloudStorageOptions(object):
class SampleMethod(enum.IntEnum):
"""
How to sample bytes if not all bytes are scanned. Meaningful only when
used in conjunction with bytes\_limit\_per\_file. If not specified,
scanning would start from the top.
Attributes:
SAMPLE_METHOD_UNSPECIFIED (int)
TOP (int): Scan from the top (default).
RANDOM_START (int): For each file larger than bytes\_limit\_per\_file, randomly pick the
offset to start scanning. The scanned bytes are contiguous.
"""
SAMPLE_METHOD_UNSPECIFIED = 0
TOP = 1
RANDOM_START = 2
class BigQueryOptions(object):
class SampleMethod(enum.IntEnum):
"""
How to sample rows if not all rows are scanned. Meaningful only when
used in conjunction with rows\_limit. If not specified, scanning would
start from the top.
Attributes:
SAMPLE_METHOD_UNSPECIFIED (int)
TOP (int): Scan from the top (default).
RANDOM_START (int): Randomly pick the row to start scanning. The scanned rows are contiguous.
"""
SAMPLE_METHOD_UNSPECIFIED = 0
TOP = 1
RANDOM_START = 2
class ByteContentItem(object):
class BytesType(enum.IntEnum):
"""
Attributes:
BYTES_TYPE_UNSPECIFIED (int)
IMAGE (int)
IMAGE_JPEG (int)
IMAGE_BMP (int)
IMAGE_PNG (int)
IMAGE_SVG (int)
TEXT_UTF8 (int)
"""
BYTES_TYPE_UNSPECIFIED = 0
IMAGE = 6
IMAGE_JPEG = 1
IMAGE_BMP = 2
IMAGE_PNG = 3
IMAGE_SVG = 4
TEXT_UTF8 = 5
class OutputStorageConfig(object):
class OutputSchema(enum.IntEnum):
"""
Predefined schemas for storing findings.
Attributes:
OUTPUT_SCHEMA_UNSPECIFIED (int)
BASIC_COLUMNS (int): Basic schema including only ``info_type``, ``quote``, ``certainty``, and
``timestamp``.
GCS_COLUMNS (int): Schema tailored to findings from scanning Google Cloud Storage.
DATASTORE_COLUMNS (int): Schema tailored to findings from scanning Google Datastore.
BIG_QUERY_COLUMNS (int): Schema tailored to findings from scanning Google BigQuery.
ALL_COLUMNS (int): Schema containing all columns.
"""
OUTPUT_SCHEMA_UNSPECIFIED = 0
BASIC_COLUMNS = 1
GCS_COLUMNS = 2
DATASTORE_COLUMNS = 3
BIG_QUERY_COLUMNS = 4
ALL_COLUMNS = 5
class TimePartConfig(object):
class TimePart(enum.IntEnum):
"""
Attributes:
TIME_PART_UNSPECIFIED (int)
YEAR (int): [0-9999]
MONTH (int): [1-12]
DAY_OF_MONTH (int): [1-31]
DAY_OF_WEEK (int): [1-7]
WEEK_OF_YEAR (int): [1-52]
HOUR_OF_DAY (int): [0-23]
"""
TIME_PART_UNSPECIFIED = 0
YEAR = 1
MONTH = 2
DAY_OF_MONTH = 3
DAY_OF_WEEK = 4
WEEK_OF_YEAR = 5
HOUR_OF_DAY = 6
class CharsToIgnore(object):
class CommonCharsToIgnore(enum.IntEnum):
"""
Attributes:
COMMON_CHARS_TO_IGNORE_UNSPECIFIED (int)
NUMERIC (int): 0-9
ALPHA_UPPER_CASE (int): A-Z
ALPHA_LOWER_CASE (int): a-z
PUNCTUATION (int): US Punctuation, one of !"#$%&'()\*+,-./:;<=>?@[]^\_\`{\|}~
WHITESPACE (int): Whitespace character
"""
COMMON_CHARS_TO_IGNORE_UNSPECIFIED = 0
NUMERIC = 1
ALPHA_UPPER_CASE = 2
ALPHA_LOWER_CASE = 3
PUNCTUATION = 4
WHITESPACE = 5
class CryptoReplaceFfxFpeConfig(object):
class FfxCommonNativeAlphabet(enum.IntEnum):
"""
These are commonly used subsets of the alphabet that the FFX mode
natively supports. In the algorithm, the alphabet is selected using
the "radix". Therefore each corresponds to particular radix.
Attributes:
FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED (int)
NUMERIC (int): [0-9] (radix of 10)
HEXADECIMAL (int): [0-9A-F] (radix of 16)
UPPER_CASE_ALPHA_NUMERIC (int): [0-9A-Z] (radix of 36)
ALPHA_NUMERIC (int): [0-9A-Za-z] (radix of 62)
"""
FFX_COMMON_NATIVE_ALPHABET_UNSPECIFIED = 0
NUMERIC = 1
HEXADECIMAL = 2
UPPER_CASE_ALPHA_NUMERIC = 3
ALPHA_NUMERIC = 4
class RecordCondition(object):
class Expressions(object):
class LogicalOperator(enum.IntEnum):
"""
Attributes:
LOGICAL_OPERATOR_UNSPECIFIED (int)
AND (int)
"""
LOGICAL_OPERATOR_UNSPECIFIED = 0
AND = 1
class TransformationSummary(object):
class TransformationResultCode(enum.IntEnum):
"""
Possible outcomes of transformations.
Attributes:
TRANSFORMATION_RESULT_CODE_UNSPECIFIED (int)
SUCCESS (int)
ERROR (int)
"""
TRANSFORMATION_RESULT_CODE_UNSPECIFIED = 0
SUCCESS = 1
ERROR = 2
class JobTrigger(object):
class Status(enum.IntEnum):
"""
Whether the trigger is currently active. If PAUSED or CANCELLED, no jobs
will be created with this configuration. The service may automatically
pause triggers experiencing frequent errors. To restart a job, set the
status to HEALTHY after correcting user errors.
Attributes:
STATUS_UNSPECIFIED (int)
HEALTHY (int): Trigger is healthy.
PAUSED (int): Trigger is temporarily paused.
CANCELLED (int): Trigger is cancelled and can not be resumed.
"""
STATUS_UNSPECIFIED = 0
HEALTHY = 1
PAUSED = 2
CANCELLED = 3
class DlpJob(object):
class JobState(enum.IntEnum):
"""
Attributes:
JOB_STATE_UNSPECIFIED (int)
PENDING (int): The job has not yet started.
RUNNING (int): The job is currently running.
DONE (int): The job is no longer running.
CANCELED (int): The job was canceled before it could complete.
FAILED (int): The job had an error and did not complete.
"""
JOB_STATE_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
DONE = 3
CANCELED = 4
FAILED = 5
| |
from __future__ import with_statement
from sqlalchemy import (
testing, exc as sa_exc, event, String, Column, Table, select, func)
from sqlalchemy.testing import (
fixtures, engines, eq_, assert_raises, assert_raises_message,
assert_warnings, mock, expect_warnings)
from sqlalchemy.orm import (
exc as orm_exc, Session, mapper, sessionmaker, create_session,
relationship, attributes)
from sqlalchemy.testing.util import gc_collect
from test.orm._fixtures import FixtureTest
class SessionTransactionTest(FixtureTest):
run_inserts = None
__backend__ = True
def test_no_close_transaction_on_flush(self):
User, users = self.classes.User, self.tables.users
c = testing.db.connect()
try:
mapper(User, users)
s = create_session(bind=c)
s.begin()
tran = s.transaction
s.add(User(name='first'))
s.flush()
c.execute("select * from users")
u = User(name='two')
s.add(u)
s.flush()
u = User(name='third')
s.add(u)
s.flush()
assert s.transaction is tran
tran.close()
finally:
c.close()
@engines.close_open_connections
def test_subtransaction_on_external(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
conn = testing.db.connect()
trans = conn.begin()
sess = create_session(bind=conn, autocommit=False, autoflush=True)
sess.begin(subtransactions=True)
u = User(name='ed')
sess.add(u)
sess.flush()
sess.commit() # commit does nothing
trans.rollback() # rolls back
assert len(sess.query(User).all()) == 0
sess.close()
@testing.requires.savepoints
@engines.close_open_connections
def test_external_nested_transaction(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
try:
conn = testing.db.connect()
trans = conn.begin()
sess = create_session(bind=conn, autocommit=False,
autoflush=True)
u1 = User(name='u1')
sess.add(u1)
sess.flush()
sess.begin_nested()
u2 = User(name='u2')
sess.add(u2)
sess.flush()
sess.rollback()
trans.commit()
assert len(sess.query(User).all()) == 1
except:
conn.close()
raise
@testing.requires.savepoints
def test_nested_accounting_new_items_removed(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session(bind=testing.db)
session.begin()
session.begin_nested()
u1 = User(name='u1')
session.add(u1)
session.commit()
assert u1 in session
session.rollback()
assert u1 not in session
@testing.requires.savepoints
def test_nested_accounting_deleted_items_restored(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session(bind=testing.db)
session.begin()
u1 = User(name='u1')
session.add(u1)
session.commit()
session.begin()
u1 = session.query(User).first()
session.begin_nested()
session.delete(u1)
session.commit()
assert u1 not in session
session.rollback()
assert u1 in session
@testing.requires.savepoints
def test_heavy_nesting(self):
users = self.tables.users
session = create_session(bind=testing.db)
session.begin()
session.connection().execute(users.insert().values(
name='user1'))
session.begin(subtransactions=True)
session.begin_nested()
session.connection().execute(users.insert().values(
name='user2'))
assert session.connection().execute(
'select count(1) from users').scalar() == 2
session.rollback()
assert session.connection().execute(
'select count(1) from users').scalar() == 1
session.connection().execute(users.insert().values(
name='user3'))
session.commit()
assert session.connection().execute(
'select count(1) from users').scalar() == 2
@testing.requires.independent_connections
def test_transactions_isolated(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
s1 = create_session(bind=testing.db, autocommit=False)
s2 = create_session(bind=testing.db, autocommit=False)
u1 = User(name='u1')
s1.add(u1)
s1.flush()
assert s2.query(User).all() == []
@testing.requires.two_phase_transactions
def test_twophase(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
# TODO: mock up a failure condition here
# to ensure a rollback succeeds
mapper(User, users)
mapper(Address, addresses)
engine2 = engines.testing_engine()
sess = create_session(autocommit=True, autoflush=False,
twophase=True)
sess.bind_mapper(User, testing.db)
sess.bind_mapper(Address, engine2)
sess.begin()
u1 = User(name='u1')
a1 = Address(email_address='u1@e')
sess.add_all((u1, a1))
sess.commit()
sess.close()
engine2.dispose()
assert users.count().scalar() == 1
assert addresses.count().scalar() == 1
@testing.requires.independent_connections
def test_invalidate(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = Session()
u = User(name='u1')
sess.add(u)
sess.flush()
c1 = sess.connection(User)
sess.invalidate()
assert c1.invalidated
eq_(sess.query(User).all(), [])
c2 = sess.connection(User)
assert not c2.invalidated
def test_subtransaction_on_noautocommit(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session(autocommit=False, autoflush=True)
sess.begin(subtransactions=True)
u = User(name='u1')
sess.add(u)
sess.flush()
sess.commit() # commit does nothing
sess.rollback() # rolls back
assert len(sess.query(User).all()) == 0
sess.close()
@testing.requires.savepoints
def test_nested_transaction(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
sess.begin()
u = User(name='u1')
sess.add(u)
sess.flush()
sess.begin_nested() # nested transaction
u2 = User(name='u2')
sess.add(u2)
sess.flush()
sess.rollback()
sess.commit()
assert len(sess.query(User).all()) == 1
sess.close()
@testing.requires.savepoints
def test_nested_autotrans(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session(autocommit=False)
u = User(name='u1')
sess.add(u)
sess.flush()
sess.begin_nested() # nested transaction
u2 = User(name='u2')
sess.add(u2)
sess.flush()
sess.rollback()
sess.commit()
assert len(sess.query(User).all()) == 1
sess.close()
@testing.requires.savepoints
def test_nested_transaction_connection_add(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=True)
sess.begin()
sess.begin_nested()
u1 = User(name='u1')
sess.add(u1)
sess.flush()
sess.rollback()
u2 = User(name='u2')
sess.add(u2)
sess.commit()
eq_(set(sess.query(User).all()), set([u2]))
sess.begin()
sess.begin_nested()
u3 = User(name='u3')
sess.add(u3)
sess.commit() # commit the nested transaction
sess.rollback()
eq_(set(sess.query(User).all()), set([u2]))
sess.close()
@testing.requires.savepoints
def test_mixed_transaction_control(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=True)
sess.begin()
sess.begin_nested()
transaction = sess.begin(subtransactions=True)
sess.add(User(name='u1'))
transaction.commit()
sess.commit()
sess.commit()
sess.close()
eq_(len(sess.query(User).all()), 1)
t1 = sess.begin()
t2 = sess.begin_nested()
sess.add(User(name='u2'))
t2.commit()
assert sess.transaction is t1
sess.close()
@testing.requires.savepoints
def test_mixed_transaction_close(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=False)
sess.begin_nested()
sess.add(User(name='u1'))
sess.flush()
sess.close()
sess.add(User(name='u2'))
sess.commit()
sess.close()
eq_(len(sess.query(User).all()), 1)
def test_continue_flushing_on_commit(self):
"""test that post-flush actions get flushed also if
we're in commit()"""
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
to_flush = [User(name='ed'), User(name='jack'), User(name='wendy')]
@event.listens_for(sess, "after_flush_postexec")
def add_another_user(session, ctx):
if to_flush:
session.add(to_flush.pop(0))
x = [1]
@event.listens_for(sess, "after_commit") # noqa
def add_another_user(session):
x[0] += 1
sess.add(to_flush.pop())
sess.commit()
eq_(x, [2])
eq_(
sess.scalar(select([func.count(users.c.id)])), 3
)
def test_continue_flushing_guard(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
@event.listens_for(sess, "after_flush_postexec")
def add_another_user(session, ctx):
session.add(User(name='x'))
sess.add(User(name='x'))
assert_raises_message(
orm_exc.FlushError,
"Over 100 subsequent flushes have occurred",
sess.commit
)
def test_error_on_using_inactive_session_commands(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session(autocommit=True)
sess.begin()
sess.begin(subtransactions=True)
sess.add(User(name='u1'))
sess.flush()
sess.rollback()
assert_raises_message(sa_exc.InvalidRequestError,
"This Session's transaction has been "
r"rolled back by a nested rollback\(\) "
"call. To begin a new transaction, "
r"issue Session.rollback\(\) first.",
sess.begin, subtransactions=True)
sess.close()
def test_no_sql_during_commit(self):
sess = create_session(bind=testing.db, autocommit=False)
@event.listens_for(sess, "after_commit")
def go(session):
session.execute("select 1")
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction.",
sess.commit)
def test_no_sql_during_prepare(self):
sess = create_session(bind=testing.db, autocommit=False, twophase=True)
sess.prepare()
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction.",
sess.execute, "select 1")
def test_no_prepare_wo_twophase(self):
sess = create_session(bind=testing.db, autocommit=False)
assert_raises_message(sa_exc.InvalidRequestError,
"'twophase' mode not enabled, or not root "
"transaction; can't prepare.",
sess.prepare)
def test_closed_status_check(self):
sess = create_session()
trans = sess.begin()
trans.rollback()
assert_raises_message(
sa_exc.ResourceClosedError, "This transaction is closed",
trans.rollback)
assert_raises_message(
sa_exc.ResourceClosedError, "This transaction is closed",
trans.commit)
def test_deactive_status_check(self):
sess = create_session()
trans = sess.begin()
trans2 = sess.begin(subtransactions=True)
trans2.rollback()
assert_raises_message(
sa_exc.InvalidRequestError,
"This Session's transaction has been rolled back by a nested "
"rollback\(\) call. To begin a new transaction, issue "
"Session.rollback\(\) first.",
trans.commit
)
def test_deactive_status_check_w_exception(self):
sess = create_session()
trans = sess.begin()
trans2 = sess.begin(subtransactions=True)
try:
raise Exception("test")
except:
trans2.rollback(_capture_exception=True)
assert_raises_message(
sa_exc.InvalidRequestError,
"This Session's transaction has been rolled back due to a "
"previous exception during flush. To begin a new transaction "
"with this Session, first issue Session.rollback\(\). "
"Original exception was: test",
trans.commit
)
def _inactive_flushed_session_fixture(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(id=1, name='u1')
sess.add(u1)
sess.commit()
sess.add(User(id=1, name='u2'))
assert_raises(
orm_exc.FlushError, sess.flush
)
return sess, u1
def test_execution_options_begin_transaction(self):
bind = mock.Mock()
sess = Session(bind=bind)
c1 = sess.connection(execution_options={'isolation_level': 'FOO'})
eq_(
bind.mock_calls,
[
mock.call.contextual_connect(),
mock.call.contextual_connect().
execution_options(isolation_level='FOO'),
mock.call.contextual_connect().execution_options().begin()
]
)
eq_(c1, bind.contextual_connect().execution_options())
def test_execution_options_ignored_mid_transaction(self):
bind = mock.Mock()
conn = mock.Mock(engine=bind)
bind.contextual_connect = mock.Mock(return_value=conn)
sess = Session(bind=bind)
sess.execute("select 1")
with expect_warnings(
"Connection is already established for the "
"given bind; execution_options ignored"):
sess.connection(execution_options={'isolation_level': 'FOO'})
def test_warning_on_using_inactive_session_new(self):
User = self.classes.User
sess, u1 = self._inactive_flushed_session_fixture()
u2 = User(name='u2')
sess.add(u2)
def go():
sess.rollback()
assert_warnings(go,
["Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."],
)
assert u2 not in sess
assert u1 in sess
def test_warning_on_using_inactive_session_dirty(self):
sess, u1 = self._inactive_flushed_session_fixture()
u1.name = 'newname'
def go():
sess.rollback()
assert_warnings(go,
["Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."],
)
assert u1 in sess
assert u1 not in sess.dirty
def test_warning_on_using_inactive_session_delete(self):
sess, u1 = self._inactive_flushed_session_fixture()
sess.delete(u1)
def go():
sess.rollback()
assert_warnings(go,
["Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."],
)
assert u1 in sess
assert u1 not in sess.deleted
def test_warning_on_using_inactive_session_rollback_evt(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
u1 = User(id=1, name='u1')
sess.add(u1)
sess.commit()
u3 = User(name='u3')
@event.listens_for(sess, "after_rollback")
def evt(s):
sess.add(u3)
sess.add(User(id=1, name='u2'))
def go():
assert_raises(
orm_exc.FlushError, sess.flush
)
assert_warnings(go,
["Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."],
)
assert u3 not in sess
def test_preserve_flush_error(self):
User = self.classes.User
sess, u1 = self._inactive_flushed_session_fixture()
for i in range(5):
assert_raises_message(sa_exc.InvalidRequestError,
"^This Session's transaction has been "
r"rolled back due to a previous exception "
"during flush. To "
"begin a new transaction with this "
"Session, first issue "
r"Session.rollback\(\). Original exception "
"was:",
sess.commit)
sess.rollback()
sess.add(User(id=5, name='some name'))
sess.commit()
def test_no_autocommit_with_explicit_commit(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
session = create_session(autocommit=False)
session.add(User(name='ed'))
session.transaction.commit()
assert session.transaction is not None, \
'autocommit=False should start a new transaction'
class _LocalFixture(FixtureTest):
run_setup_mappers = 'once'
run_inserts = None
session = sessionmaker()
@classmethod
def setup_mappers(cls):
User, Address = cls.classes.User, cls.classes.Address
users, addresses = cls.tables.users, cls.tables.addresses
mapper(
User, users, properties={
'addresses': relationship(
Address, backref='user', cascade="all, delete-orphan",
order_by=addresses.c.id),
})
mapper(Address, addresses)
class FixtureDataTest(_LocalFixture):
run_inserts = 'each'
__backend__ = True
def test_attrs_on_rollback(self):
User = self.classes.User
sess = self.session()
u1 = sess.query(User).get(7)
u1.name = 'ed'
sess.rollback()
eq_(u1.name, 'jack')
def test_commit_persistent(self):
User = self.classes.User
sess = self.session()
u1 = sess.query(User).get(7)
u1.name = 'ed'
sess.flush()
sess.commit()
eq_(u1.name, 'ed')
def test_concurrent_commit_persistent(self):
User = self.classes.User
s1 = self.session()
u1 = s1.query(User).get(7)
u1.name = 'ed'
s1.commit()
s2 = self.session()
u2 = s2.query(User).get(7)
assert u2.name == 'ed'
u2.name = 'will'
s2.commit()
assert u1.name == 'will'
class CleanSavepointTest(FixtureTest):
"""test the behavior for [ticket:2452] - rollback on begin_nested()
only expires objects tracked as being modified in that transaction.
"""
run_inserts = None
__backend__ = True
def _run_test(self, update_fn):
User, users = self.classes.User, self.tables.users
mapper(User, users)
s = Session(bind=testing.db)
u1 = User(name='u1')
u2 = User(name='u2')
s.add_all([u1, u2])
s.commit()
u1.name
u2.name
s.begin_nested()
update_fn(s, u2)
eq_(u2.name, 'u2modified')
s.rollback()
eq_(u1.__dict__['name'], 'u1')
assert 'name' not in u2.__dict__
eq_(u2.name, 'u2')
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint(self):
def update_fn(s, u2):
u2.name = 'u2modified'
self._run_test(update_fn)
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint_agg_upd_eval(self):
User = self.classes.User
def update_fn(s, u2):
s.query(User).filter_by(name='u2').update(
dict(name='u2modified'), synchronize_session='evaluate')
self._run_test(update_fn)
@testing.requires.savepoints
def test_rollback_ignores_clean_on_savepoint_agg_upd_fetch(self):
User = self.classes.User
def update_fn(s, u2):
s.query(User).filter_by(name='u2').update(
dict(name='u2modified'),
synchronize_session='fetch')
self._run_test(update_fn)
class ContextManagerTest(FixtureTest):
run_inserts = None
__backend__ = True
@testing.requires.savepoints
@engines.close_open_connections
def test_contextmanager_nested_rollback(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session()
def go():
with sess.begin_nested():
sess.add(User()) # name can't be null
sess.flush()
# and not InvalidRequestError
assert_raises(
sa_exc.DBAPIError,
go
)
with sess.begin_nested():
sess.add(User(name='u1'))
eq_(sess.query(User).count(), 1)
def test_contextmanager_commit(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session(autocommit=True)
with sess.begin():
sess.add(User(name='u1'))
sess.rollback()
eq_(sess.query(User).count(), 1)
def test_contextmanager_rollback(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = Session(autocommit=True)
def go():
with sess.begin():
sess.add(User()) # name can't be null
assert_raises(
sa_exc.DBAPIError,
go
)
eq_(sess.query(User).count(), 0)
with sess.begin():
sess.add(User(name='u1'))
eq_(sess.query(User).count(), 1)
class AutoExpireTest(_LocalFixture):
__backend__ = True
def test_expunge_pending_on_rollback(self):
User = self.classes.User
sess = self.session()
u2 = User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
def test_trans_pending_cleared_on_commit(self):
User = self.classes.User
sess = self.session()
u2 = User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.commit()
assert u2 in sess
u3 = User(name='anotheruser')
sess.add(u3)
sess.rollback()
assert u3 not in sess
assert u2 in sess
def test_update_deleted_on_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
# this actually tests that the delete() operation,
# when cascaded to the "addresses" collection, does not
# trigger a flush (via lazyload) before the cascade is complete.
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
@testing.requires.predictable_gc
def test_gced_delete_on_rollback(self):
User, users = self.classes.User, self.tables.users
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.delete(u1)
u1_state = attributes.instance_state(u1)
assert u1_state in s.identity_map.all_states()
assert u1_state in s._deleted
s.flush()
assert u1_state not in s.identity_map.all_states()
assert u1_state not in s._deleted
del u1
gc_collect()
assert u1_state.obj() is None
s.rollback()
assert u1_state in s.identity_map.all_states()
u1 = s.query(User).filter_by(name='ed').one()
assert u1_state not in s.identity_map.all_states()
assert s.scalar(users.count()) == 1
s.delete(u1)
s.flush()
assert s.scalar(users.count()) == 0
s.commit()
def test_trans_deleted_cleared_on_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.delete(u1)
s.commit()
assert u1 not in s
s.rollback()
assert u1 not in s
def test_update_deleted_on_rollback_cascade(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
s.delete(u1)
assert u1 in s.deleted
assert u1.addresses[0] in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
assert u1.addresses[0] not in s.deleted
def test_update_deleted_on_rollback_orphan(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
a1 = u1.addresses[0]
u1.addresses.remove(a1)
s.flush()
eq_(s.query(Address).filter(Address.email_address == 'foo').all(), [])
s.rollback()
assert a1 not in s.deleted
assert u1.addresses == [a1]
def test_commit_pending(self):
User = self.classes.User
sess = self.session()
u1 = User(name='newuser')
sess.add(u1)
sess.flush()
sess.commit()
eq_(u1.name, 'newuser')
def test_concurrent_commit_pending(self):
User = self.classes.User
s1 = self.session()
u1 = User(name='edward')
s1.add(u1)
s1.commit()
s2 = self.session()
u2 = s2.query(User).filter(User.name == 'edward').one()
u2.name = 'will'
s2.commit()
assert u1.name == 'will'
class TwoPhaseTest(_LocalFixture):
__backend__ = True
@testing.requires.two_phase_transactions
def test_rollback_on_prepare(self):
User = self.classes.User
s = self.session(twophase=True)
u = User(name='ed')
s.add(u)
s.prepare()
s.rollback()
assert u not in s
class RollbackRecoverTest(_LocalFixture):
__backend__ = True
def test_pk_violation(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
a1 = Address(email_address='foo')
u1 = User(id=1, name='ed', addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address='bar')
u2 = User(id=1, name='jack', addresses=[a2])
u1.name = 'edward'
a1.email_address = 'foober'
s.add(u2)
assert_raises(orm_exc.FlushError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
assert u1.name == 'ed'
assert a1.email_address == 'foo'
u1.name = 'edward'
a1.email_address = 'foober'
s.commit()
eq_(
s.query(User).all(),
[User(id=1, name='edward',
addresses=[Address(email_address='foober')])]
)
@testing.requires.savepoints
def test_pk_violation_with_savepoint(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
a1 = Address(email_address='foo')
u1 = User(id=1, name='ed', addresses=[a1])
s.add(u1)
s.commit()
a2 = Address(email_address='bar')
u2 = User(id=1, name='jack', addresses=[a2])
u1.name = 'edward'
a1.email_address = 'foober'
s.begin_nested()
s.add(u2)
assert_raises(orm_exc.FlushError, s.commit)
assert_raises(sa_exc.InvalidRequestError, s.commit)
s.rollback()
assert u2 not in s
assert a2 not in s
assert u1 in s
assert a1 in s
s.commit()
eq_(
s.query(User).all(),
[
User(
id=1, name='edward',
addresses=[Address(email_address='foober')])])
class SavepointTest(_LocalFixture):
__backend__ = True
@testing.requires.savepoints
def test_savepoint_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
u2 = User(name='jack')
s.add_all([u1, u2])
s.begin_nested()
u3 = User(name='wendy')
u4 = User(name='foo')
u1.name = 'edward'
u2.name = 'jackward'
s.add_all([u3, u4])
eq_(
s.query(User.name).order_by(User.id).all(),
[('edward',), ('jackward',), ('wendy',), ('foo',)])
s.rollback()
assert u1.name == 'ed'
assert u2.name == 'jack'
eq_(
s.query(User.name).order_by(User.id).all(),
[('ed',), ('jack',)])
s.commit()
assert u1.name == 'ed'
assert u2.name == 'jack'
eq_(s.query(User.name).order_by(User.id).all(), [('ed',), ('jack',)])
@testing.requires.savepoints
def test_savepoint_delete(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
eq_(s.query(User).filter_by(name='ed').count(), 1)
s.begin_nested()
s.delete(u1)
s.commit()
eq_(s.query(User).filter_by(name='ed').count(), 0)
s.commit()
@testing.requires.savepoints
def test_savepoint_commit(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
u2 = User(name='jack')
s.add_all([u1, u2])
s.begin_nested()
u3 = User(name='wendy')
u4 = User(name='foo')
u1.name = 'edward'
u2.name = 'jackward'
s.add_all([u3, u4])
eq_(
s.query(User.name).order_by(User.id).all(),
[('edward',), ('jackward',), ('wendy',), ('foo',)])
s.commit()
def go():
assert u1.name == 'edward'
assert u2.name == 'jackward'
eq_(
s.query(User.name).order_by(User.id).all(),
[('edward',), ('jackward',), ('wendy',), ('foo',)])
self.assert_sql_count(testing.db, go, 1)
s.commit()
eq_(
s.query(User.name).order_by(User.id).all(),
[('edward',), ('jackward',), ('wendy',), ('foo',)])
@testing.requires.savepoints
def test_savepoint_rollback_collections(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
u1.name = 'edward'
u1.addresses.append(Address(email_address='bar'))
s.begin_nested()
u2 = User(name='jack', addresses=[Address(email_address='bat')])
s.add(u2)
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
])
s.rollback()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
])
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
]
)
@testing.requires.savepoints
def test_savepoint_commit_collections(self):
User, Address = self.classes.User, self.classes.Address
s = self.session()
u1 = User(name='ed', addresses=[Address(email_address='foo')])
s.add(u1)
s.commit()
u1.name = 'edward'
u1.addresses.append(Address(email_address='bar'))
s.begin_nested()
u2 = User(name='jack', addresses=[Address(email_address='bat')])
s.add(u2)
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
s.commit()
eq_(
s.query(User).order_by(User.id).all(),
[
User(
name='edward',
addresses=[
Address(email_address='foo'),
Address(email_address='bar')]),
User(name='jack', addresses=[Address(email_address='bat')])
]
)
@testing.requires.savepoints
def test_expunge_pending_on_rollback(self):
User = self.classes.User
sess = self.session()
sess.begin_nested()
u2 = User(name='newuser')
sess.add(u2)
assert u2 in sess
sess.rollback()
assert u2 not in sess
@testing.requires.savepoints
def test_update_deleted_on_rollback(self):
User = self.classes.User
s = self.session()
u1 = User(name='ed')
s.add(u1)
s.commit()
s.begin_nested()
s.delete(u1)
assert u1 in s.deleted
s.rollback()
assert u1 in s
assert u1 not in s.deleted
class AccountingFlagsTest(_LocalFixture):
__backend__ = True
def test_no_expire_on_commit(self):
User, users = self.classes.User, self.tables.users
sess = sessionmaker(expire_on_commit=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
testing.db.execute(
users.update(users.c.name == 'ed').values(name='edward'))
assert u1.name == 'ed'
sess.expire_all()
assert u1.name == 'edward'
def test_rollback_no_accounting(self):
User, users = self.classes.User, self.tables.users
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
u1.name = 'edwardo'
sess.rollback()
testing.db.execute(
users.update(users.c.name == 'ed').values(name='edward'))
assert u1.name == 'edwardo'
sess.expire_all()
assert u1.name == 'edward'
def test_commit_no_accounting(self):
User, users = self.classes.User, self.tables.users
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name='ed')
sess.add(u1)
sess.commit()
u1.name = 'edwardo'
sess.rollback()
testing.db.execute(
users.update(users.c.name == 'ed').values(name='edward'))
assert u1.name == 'edwardo'
sess.commit()
assert testing.db.execute(select([users.c.name])).fetchall() == \
[('edwardo',)]
assert u1.name == 'edwardo'
sess.delete(u1)
sess.commit()
def test_preflush_no_accounting(self):
User, users = self.classes.User, self.tables.users
sess = Session(
_enable_transaction_accounting=False, autocommit=True,
autoflush=False)
u1 = User(name='ed')
sess.add(u1)
sess.flush()
sess.begin()
u1.name = 'edwardo'
u2 = User(name="some other user")
sess.add(u2)
sess.rollback()
sess.begin()
assert testing.db.execute(select([users.c.name])).fetchall() == \
[('ed',)]
class AutoCommitTest(_LocalFixture):
__backend__ = True
def test_begin_nested_requires_trans(self):
sess = create_session(autocommit=True)
assert_raises(sa_exc.InvalidRequestError, sess.begin_nested)
def test_begin_preflush(self):
User = self.classes.User
sess = create_session(autocommit=True)
u1 = User(name='ed')
sess.add(u1)
sess.begin()
u2 = User(name='some other user')
sess.add(u2)
sess.rollback()
assert u2 not in sess
assert u1 in sess
assert sess.query(User).filter_by(name='ed').one() is u1
def test_accounting_commit_fails_add(self):
User = self.classes.User
sess = create_session(autocommit=True)
fail = False
def fail_fn(*arg, **kw):
if fail:
raise Exception("commit fails")
event.listen(sess, "after_flush_postexec", fail_fn)
u1 = User(name='ed')
sess.add(u1)
fail = True
assert_raises(
Exception,
sess.flush
)
fail = False
assert u1 not in sess
u1new = User(id=2, name='fred')
sess.add(u1new)
sess.add(u1)
sess.flush()
assert u1 in sess
eq_(
sess.query(User.name).order_by(User.name).all(),
[('ed', ), ('fred',)]
)
def test_accounting_commit_fails_delete(self):
User = self.classes.User
sess = create_session(autocommit=True)
fail = False
def fail_fn(*arg, **kw):
if fail:
raise Exception("commit fails")
event.listen(sess, "after_flush_postexec", fail_fn)
u1 = User(name='ed')
sess.add(u1)
sess.flush()
sess.delete(u1)
fail = True
assert_raises(
Exception,
sess.flush
)
fail = False
assert u1 in sess
assert u1 not in sess.deleted
sess.delete(u1)
sess.flush()
assert u1 not in sess
eq_(
sess.query(User.name).order_by(User.name).all(),
[]
)
@testing.requires.updateable_autoincrement_pks
def test_accounting_no_select_needed(self):
"""test that flush accounting works on non-expired instances
when autocommit=True/expire_on_commit=True."""
User = self.classes.User
sess = create_session(autocommit=True, expire_on_commit=True)
u1 = User(id=1, name='ed')
sess.add(u1)
sess.flush()
u1.id = 3
u1.name = 'fred'
self.assert_sql_count(testing.db, sess.flush, 1)
assert 'id' not in u1.__dict__
eq_(u1.id, 3)
class NaturalPKRollbackTest(fixtures.MappedTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('users', metadata, Column('name', String(50), primary_key=True))
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
def test_rollback_recover(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
session = sessionmaker()()
u1, u2, u3 = User(name='u1'), User(name='u2'), User(name='u3')
session.add_all([u1, u2, u3])
session.commit()
session.delete(u2)
u4 = User(name='u2')
session.add(u4)
session.flush()
u5 = User(name='u3')
session.add(u5)
assert_raises(orm_exc.FlushError, session.flush)
assert u5 not in session
assert u2 not in session.deleted
session.rollback()
def test_key_replaced_by_update(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name='u1')
u2 = User(name='u2')
s = Session()
s.add_all([u1, u2])
s.commit()
s.delete(u1)
s.flush()
u2.name = 'u1'
s.flush()
assert u1 not in s
s.rollback()
assert u1 in s
assert u2 in s
assert s.identity_map[(User, ('u1',))] is u1
assert s.identity_map[(User, ('u2',))] is u2
def test_multiple_key_replaced_by_update(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name='u1')
u2 = User(name='u2')
u3 = User(name='u3')
s = Session()
s.add_all([u1, u2, u3])
s.commit()
s.delete(u1)
s.delete(u2)
s.flush()
u3.name = 'u1'
s.flush()
u3.name = 'u2'
s.flush()
s.rollback()
assert u1 in s
assert u2 in s
assert u3 in s
assert s.identity_map[(User, ('u1',))] is u1
assert s.identity_map[(User, ('u2',))] is u2
assert s.identity_map[(User, ('u3',))] is u3
def test_key_replaced_by_oob_insert(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
u1 = User(name='u1')
s = Session()
s.add(u1)
s.commit()
s.delete(u1)
s.flush()
s.execute(users.insert().values(name='u1'))
u2 = s.query(User).get('u1')
assert u1 not in s
s.rollback()
assert u1 in s
assert u2 not in s
assert s.identity_map[(User, ('u1',))] is u1
| |
# -*- coding: utf-8 -*-
"""This file contains preprocessors for Mac OS X."""
import abc
import plistlib
from plaso.containers import artifacts
from plaso.lib import errors
from plaso.lib import plist
from plaso.parsers.plist_plugins import interface as plist_interface
from plaso.preprocessors import interface
from plaso.preprocessors import manager
class PlistPreprocessPlugin(interface.FileSystemPreprocessPlugin):
"""The plist preprocess plugin interface."""
_PLIST_PATH = u''
def _GetPlistRootKey(self, file_entry):
"""Opens a plist file entry.
Args:
file_entry (dfvfs.FileEntry): file entry of the plist.
Returns:
plistlib._InternalDict: plist root key.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
file_object = file_entry.GetFileObject()
try:
plist_file = plist.PlistFile()
plist_file.Read(file_object)
except IOError as exception:
location = getattr(file_entry.path_spec, u'location', u'')
raise errors.PreProcessFail(
u'Unable to read plist file: {0:s} with error: {1:s}'.format(
location, exception))
finally:
file_object.close()
return plist_file.root_key
@abc.abstractmethod
def Run(self, searcher, knowledge_base):
"""Determines the value of the preprocessing attributes.
Args:
searcher (dfvfs.FileSystemSearcher): file system searcher.
knowledge_base (KnowledgeBase): to fill with preprocessing information.
"""
class PlistKeyPreprocessPlugin(PlistPreprocessPlugin):
"""The plist key preprocess plugin interface.
The plist key preprocess plugin retieves values from key names,
defined in _PLIST_KEYS, from a specific plist file, defined in
_PLIST_PATH.
"""
# The key that's value should be returned back. It is an ordered list
# of preference. If the first value is found it will be returned and no
# others will be searched.
_PLIST_KEYS = [u'']
def _FindKeys(self, key, names, matches):
"""Searches the plist key hierarchy for keys with matching names.
If a match is found a tuple of the key name and value is added to
the matches list.
Args:
key (plistlib._InternalDict): plist key.
names (list[str]): names of the keys to match.
matches (list[str]): keys with matching names.
"""
for name, subkey in iter(key.items()):
if name in names:
matches.append((name, subkey))
# pylint: disable=protected-access
if isinstance(subkey, plistlib._InternalDict):
self._FindKeys(subkey, names, matches)
@abc.abstractmethod
def _ParseValue(self, knowledge_base, name, value):
"""Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
"""
def Run(self, searcher, knowledge_base):
"""Determines the value of the preprocessing attributes.
Args:
searcher (dfvfs.FileSystemSearcher): file system searcher.
knowledge_base (KnowledgeBase): to fill with preprocessing information.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
file_entry = self._FindFileEntry(searcher, self._PLIST_PATH)
if not file_entry:
return
root_key = self._GetPlistRootKey(file_entry)
if not root_key:
location = getattr(file_entry.path_spec, u'location', u'')
raise errors.PreProcessFail(
u'Missing root key in plist: {0:s}'.format(location))
matches = []
self._FindKeys(root_key, self._PLIST_KEYS, matches)
if not matches:
raise errors.PreProcessFail(u'No such keys: {0:s}.'.format(
u', '.join(self._PLIST_KEYS)))
name = None
value = None
for name, value in matches:
if value:
break
if value is None:
raise errors.PreProcessFail(u'No values found for keys: {0:s}.'.format(
u', '.join(self._PLIST_KEYS)))
self._ParseValue(knowledge_base, name, value)
class MacOSXHostnamePreprocessPlugin(PlistKeyPreprocessPlugin):
"""Mac OS X hostname preprocessing plugin."""
_PLIST_PATH = u'/Library/Preferences/SystemConfiguration/preferences.plist'
_PLIST_KEYS = [u'ComputerName', u'LocalHostName']
def _ParseValue(self, knowledge_base, name, value):
"""Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
"""
if name not in self._PLIST_KEYS:
return
hostname_artifact = artifacts.HostnameArtifact(name=value)
# TODO: refactor the use of store number.
hostname_artifact.store_number = 0
knowledge_base.SetHostname(hostname_artifact)
class MacOSXKeyboardLayoutPreprocessPlugin(PlistKeyPreprocessPlugin):
"""Mac OS X keyboard layout preprocessing plugin."""
_PLIST_PATH = u'/Library/Preferences/com.apple.HIToolbox.plist'
_PLIST_KEYS = [u'AppleCurrentKeyboardLayoutInputSourceID']
def _ParseValue(self, knowledge_base, name, value):
"""Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
"""
if name not in self._PLIST_KEYS:
return
if isinstance(value, (list, tuple)):
value = value[0]
_, _, keyboard_layout = value.rpartition(u'.')
knowledge_base.SetValue(u'keyboard_layout', keyboard_layout)
class MacOSXSystemVersionPreprocessPlugin(PlistKeyPreprocessPlugin):
"""Mac OS X system version information preprocessing plugin."""
_PLIST_PATH = u'/System/Library/CoreServices/SystemVersion.plist'
_PLIST_KEYS = [u'ProductUserVisibleVersion']
def _ParseValue(self, knowledge_base, name, value):
"""Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
"""
if name not in self._PLIST_KEYS:
return
knowledge_base.SetValue(u'operating_system_version', value)
class MacOSXTimeZonePreprocessPlugin(interface.FileSystemPreprocessPlugin):
"""Mac OS X time zone preprocessing plugin."""
_PATH = u'/private/etc/localtime'
def Run(self, searcher, knowledge_base):
"""Determines the value of the preprocessing attributes.
Args:
searcher (dfvfs.FileSystemSearcher): file system searcher.
knowledge_base (KnowledgeBase): to fill with preprocessing information.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
file_entry = self._FindFileEntry(searcher, self._PATH)
if not file_entry:
return
if not file_entry.link:
raise errors.PreProcessFail(
u'Unable to retrieve timezone information from: {0:s}.'.format(
self._PATH))
_, _, timezone = file_entry.link.partition(u'zoneinfo/')
if timezone:
knowledge_base.SetValue(u'time_zone_str', timezone)
class MacOSXUserAccountsPreprocessPlugin(PlistPreprocessPlugin):
"""Mac OS X user accouns preprocessing plugin."""
ATTRIBUTE = u'users'
# Define the path to the user account information.
_PLIST_PATH_REGEX = (
u'/private/var/db/dslocal/nodes/Default/users/[^_].+.plist')
_KEYS = frozenset([u'gid', 'home', u'name', u'realname', u'shell', u'uid'])
def _GetKeysDefaultEmpty(self, top_level, keys, depth=1):
"""Return keys nested in a plist dict, defaulting to an empty value.
The method GetKeys fails if the supplied key does not exist within the
plist object. This alternate method behaves the same way as GetKeys
except that instead of raising an error if the key doesn't exist it will
assign an empty string value ('') to the field.
Args:
top_level (plistlib._InternalDict): top level plist object.
keys (set[str]): names of keys that should be returned.
depth (int): depth within the plist, where 1 is top level.
Returns:
dict[str,str]: values of the requested keys.
"""
keys = set(keys)
match = {}
if depth == 1:
for key in keys:
value = top_level.get(key, None)
if value is not None:
match[key] = value
else:
for _, parsed_key, parsed_value in plist_interface.RecurseKey(
top_level, depth=depth):
if parsed_key in keys:
match[parsed_key] = parsed_value
if set(match.keys()) == keys:
return match
return match
def _ParsePlistFileEntry(self, knowledge_base, file_entry):
"""Parses an user account plist file.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileNetry): file entry of the user account plist file.
Raises:
errors.PreProcessFail: if the preprocessing fails.
"""
root_key = self._GetPlistRootKey(file_entry)
if not root_key:
location = getattr(file_entry.path_spec, u'location', u'')
raise errors.PreProcessFail(
u'Missing root key in plist: {0:s}'.format(location))
try:
match = self._GetKeysDefaultEmpty(root_key, self._KEYS)
except KeyError as exception:
location = getattr(file_entry.path_spec, u'location', u'')
raise errors.PreProcessFail(
u'Unable to read user plist file: {0:s} with error: {1:s}'.format(
location, exception))
name = match.get(u'name', [None])[0]
uid = match.get(u'uid', [None])[0]
if not name or not uid:
# TODO: add and store preprocessing errors.
return
user_account = artifacts.UserAccountArtifact(
identifier=uid, username=name)
user_account.group_identifier = match.get(u'gid', [None])[0]
user_account.full_name = match.get(u'realname', [None])[0]
user_account.shell = match.get(u'shell', [None])[0]
user_account.user_directory = match.get(u'home', [None])[0]
# TODO: refactor the use of store number.
user_account.store_number = 0
knowledge_base.SetUserAccount(user_account)
def Run(self, searcher, knowledge_base):
"""Determines the value of the preprocessing attributes.
Args:
searcher (dfvfs.FileSystemSearcher): file system searcher.
knowledge_base (KnowledgeBase): to fill with preprocessing information.
"""
path_specs = self._FindPathSpecs(searcher, self._PLIST_PATH_REGEX)
if not path_specs:
return
for path_spec in path_specs:
file_entry = searcher.GetFileEntryByPathSpec(path_spec)
self._ParsePlistFileEntry(knowledge_base, file_entry)
manager.PreprocessPluginsManager.RegisterPlugins([
MacOSXHostnamePreprocessPlugin, MacOSXKeyboardLayoutPreprocessPlugin,
MacOSXSystemVersionPreprocessPlugin, MacOSXTimeZonePreprocessPlugin,
MacOSXUserAccountsPreprocessPlugin])
| |
import errno
import fcntl
import logging
import os
import signal
import sys
import time
import __main__
logger = logging.getLogger(__name__)
class Daemon(object):
def __init__(self,
pidfile=None,
stdin='/dev/null',
stdout='/dev/null',
stderr='/dev/null',
close_fds=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile or _default_pid_file()
# NOTE: We need to open another separate file to avoid the file
# being reopened again.
# In which case, process loses file lock.
#
# From "man fcntl":
# As well as being removed by an explicit F_UNLCK, record locks are
# automatically released when the process terminates or if it
# closes any file descriptor referring to a file on which locks
# are held. This is bad: it means that a process can lose the locks
# on a file like /etc/passwd or /etc/mtab when for some reason a
# library function decides to open, read and close it.
self.lockfile = self.pidfile + ".lock"
self.lockfp = None
self.close_fds = close_fds
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
_close_std_io()
sys.exit(0)
except OSError as e:
logger.error("fork #1 failed: " + repr(e))
sys.exit(1)
# decouple from parent environment
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
_close_std_io()
sys.exit(0)
except OSError as e:
logger.error("fork #2 failed: " + repr(e))
sys.exit(1)
if self.close_fds:
_close_fds()
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
logger.info("OK daemonized")
def trylock_or_exit(self, timeout=10):
interval = 0.1
n = int(timeout / interval) + 1
flag = fcntl.LOCK_EX | fcntl.LOCK_NB
for ii in range(n):
fd = os.open(self.lockfile, os.O_RDWR | os.O_CREAT)
fcntl.fcntl(fd, fcntl.F_SETFD,
fcntl.fcntl(fd, fcntl.F_GETFD, 0)
| fcntl.FD_CLOEXEC)
try:
fcntl.lockf(fd, flag)
self.lockfp = os.fdopen(fd, 'w+r')
break
except IOError as e:
os.close(fd)
if e[0] == errno.EAGAIN:
time.sleep(interval)
else:
raise
else:
logger.info("Failure acquiring lock %s" % (self.lockfile, ))
sys.exit(1)
logger.info("OK acquired lock %s" % (self.lockfile))
def unlock(self):
if self.lockfp is None:
return
fd = self.lockfp.fileno()
fcntl.lockf(fd, fcntl.LOCK_UN)
self.lockfp.close()
self.lockfp = None
def start(self):
self.daemonize()
self.init_proc()
def init_proc(self):
self.trylock_or_exit()
self.write_pid_or_exit()
def write_pid_or_exit(self):
self.pf = open(self.pidfile, 'w+r')
pf = self.pf
fd = pf.fileno()
fcntl.fcntl(fd, fcntl.F_SETFD,
fcntl.fcntl(fd, fcntl.F_GETFD, 0)
| fcntl.FD_CLOEXEC)
try:
pid = os.getpid()
logger.debug('write pid:' + str(pid))
pf.truncate(0)
pf.write(str(pid))
pf.flush()
except Exception as e:
logger.exception('write pid failed.' + repr(e))
sys.exit(0)
def stop(self):
pid = None
if not os.path.exists(self.pidfile):
logger.debug('pidfile not exist:' + self.pidfile)
return
try:
pid = _read_file(self.pidfile)
pid = int(pid)
os.kill(pid, signal.SIGTERM)
return
except Exception as e:
logger.warn('{e} while get and kill pid={pid}'.format(
e=repr(e), pid=pid))
def _read_file(fn):
with open(fn, 'r') as f:
return f.read()
def _close_std_io():
os.close(0)
os.close(1)
os.close(2)
def _close_fds():
try:
max_fd = os.sysconf("SC_OPEN_MAX")
except ValueError as e:
logger.warn(repr(e) + ' while get max fds of a process')
max_fd = 65536
for i in xrange(3, max_fd):
try:
os.close(i)
except OSError:
pass
def _default_pid_file():
if hasattr(__main__, '__file__'):
name = __main__.__file__
name = os.path.basename(name)
if name == '<stdin>':
name = '__stdin__'
return '/var/run/' + name.rsplit('.', 1)[0]
else:
return '/var/run/pykit.daemonize'
def daemonize_cli(run_func, pidfn, close_fds=False):
logging.basicConfig(stream=sys.stderr)
logging.getLogger(__name__).setLevel(logging.DEBUG)
d = Daemon(pidfile=pidfn, close_fds=close_fds)
logger.info("sys.argv: " + repr(sys.argv))
try:
if len(sys.argv) == 1:
d.init_proc()
run_func()
elif len(sys.argv) == 2:
if 'start' == sys.argv[1]:
d.start()
run_func()
elif 'stop' == sys.argv[1]:
d.stop()
elif 'restart' == sys.argv[1]:
d.stop()
d.start()
run_func()
else:
logger.error("Unknown command: %s" % (sys.argv[1]))
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
except Exception as e:
logger.exception(repr(e))
standard_daemonize = daemonize_cli
| |
import json
from django.utils import timezone
from django.db import transaction
from channels import Group
from net.equipment.generic import GenericEquipment
from net.tasks import ping_task, login_suggest_task, long_job_task, celery_scan_nets_with_fping, \
celery_discover_vendor, celery_get_config, celery_cmd_runner, celery_put_syslocation
from net.models import Job, JobResult, Scripts, Equipment
from argus.models import ASTU
import subprocess
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
from pygments import highlight
from pygments.formatters import HtmlFormatter
import mistune
@transaction.non_atomic_requests
def celery_job_starter(destinations_ids, script_id):
"""
Starts celery job
:param destinations_ids:
:param script_id:
:return:
"""
if destinations_ids == list():
return False
job = Job()
job.script = Scripts.objects.get(id=script_id)
# We need to run celery task with some countdown.
COUNTDOWN = 3.0
if script_id == '1':
# ping
# task = ping_task.delay(destinations_ids)
task = ping_task.apply_async((destinations_ids,), track_started=True, countdown=COUNTDOWN)
elif script_id == '2':
# task = long_job_task.delay()
task = long_job_task.apply_async(track_started=True, countdown=COUNTDOWN)
elif script_id == '3':
# task = login_suggest_task.delay(destinations_ids)
task = login_suggest_task.apply_async((destinations_ids,), track_started=True, countdown=COUNTDOWN)
elif script_id == '999':
# task = celery_scan_nets_with_fping.delay(subnets=destinations_ids)
task = celery_scan_nets_with_fping.apply_async(kwargs={'subnets': destinations_ids}, track_started=True,
countdown=COUNTDOWN)
destinations_ids = list()
elif script_id == '1000':
# task = celery_discover_vendor.delay(subnets=destinations_ids)
task = celery_discover_vendor.apply_async(kwargs={'subnets': destinations_ids}, track_started=True,
countdown=COUNTDOWN)
destinations_ids = list()
elif script_id == '1001':
# task = celery_discover_vendor.delay(subnets=destinations_ids)
task = celery_get_config.apply_async(kwargs={'subnets': destinations_ids}, track_started=True,
countdown=COUNTDOWN)
destinations_ids = list()
elif script_id == '1002':
task = celery_put_syslocation.apply_async(args=(destinations_ids, ), track_started=True,
countdown=COUNTDOWN)
destinations_ids = list()
else:
return False
job.celery_id = task.task_id
job.status = 'PENDING'
job.save()
# Adding NE id's to Job table, so we can put NE's to template
for ne in destinations_ids:
job.ne_ids.add(ASTU.objects.get(pk=ne))
job.save()
pass
def scan_nets_with_fping(subnets):
found, new = 0, 0 # Found Alive IP's and created ones
for subnet in subnets:
proc = subprocess.Popen(["/usr/bin/sudo /sbin/fping -O 160 -a -q -r 0 -i 1 -g %s" % subnet], shell=True,
stdout=subprocess.PIPE)
proc.wait()
out = proc.stdout.read()
alive_list = out.decode().split('\n')[:-1] # everything but the last empty
for ip in alive_list:
obj, created = Equipment.objects.get_or_create(ne_ip=ip.split(' ')[0])
found += 1
if created:
new += 1
obj.hostname = None
obj.vendor = None
obj.model = None
obj.save()
return found, new
def discover_vendor(subnets):
"""
Does network element discovery and finds logins/passwords from credentials database
:param subnets: list with subnets to discover
:return: login_suggest_success_count, vendor_found_count
"""
login_suggest_success_count = 0
vendor_found_count = 0
for subnet in subnets:
# If we can't find "/" (slash) symbol in subnets, than user had entered the host only, and no subnet
if subnet.find("/") == -1:
# one host
hosts = Equipment.objects.filter(ne_ip=subnet)
else:
# subnet
hosts = Equipment.objects.filter(ne_ip__net_contained=subnet)
for host in hosts:
eq = GenericEquipment(host)
# need to adjust it? or 1 sec is enough?
eq.set_io_timeout(1)
if eq.suggest_login(resuggest=False):
login_suggest_success_count += 1
# Trying to login only if login guessing was successful
eq.do_login()
if eq.discover_vendor():
vendor_found_count += 1
eq.get_config()
eq.disconnect()
return login_suggest_success_count, vendor_found_count
# thx for nocproject.org for lexers
class NOCCiscoLexer(RegexLexer):
name = "Cisco.IOS"
tokens = {
"root": [
(r"^!.*", Comment),
(r"(description)(.*?)$", bygroups(Keyword, Comment)),
(r"(password|shared-secret|secret)(\s+[57]\s+)(\S+)", bygroups(Keyword, Number, String.Double)),
(r"(ca trustpoint\s+)(\S+)", bygroups(Keyword, String.Double)),
(r"^(interface|controller|router \S+|voice translation-\S+|voice-port)(.*?)$", bygroups(Keyword, Name.Attribute)),
(r"^(dial-peer\s+\S+\s+)(\S+)(.*?)$", bygroups(Keyword, Name.Attribute, Keyword)),
(r"^(vlan\s+)(\d+)$", bygroups(Keyword, Name.Attribute)),
(r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(/\d{1,2})?", Number), # IPv4 Address/Prefix
(r"49\.\d{4}\.\d{4}\.\d{4}\.\d{4}\.\d{2}", Number), # NSAP
(r"(\s+[0-9a-f]{4}\.[0-9a-f]{4}\.[0-9a-f]{4}\s+)", Number), # MAC Address
(r"^(?:no\s+)?\S+", Keyword),
(r"\s+\d+\s+\d*|,\d+|-\d+", Number),
(r".", Text),
],
}
class NOCJuniperLexer(RegexLexer):
name = "Juniper.JUNOS"
tokens = {
"root": [
(r"#.*$", Comment),
(r"//.*$", Comment),
(r"/\*", Comment, "comment"),
(r"\"", String.Double, "string"),
(r"inactive:", Error),
(r"(\S+\s+)(\S+\s+)({)", bygroups(Keyword, Name.Attribute, Punctuation)),
(r"(\S+\s+)({)", bygroups(Keyword, Punctuation)),
(r"https?://.*?[;]", String.Double),
(r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(/\d{1,2})?", Number), # IPv4 Address/Prefix
(r"49\.\d{4}\.\d{4}\.\d{4}\.\d{4}\.\d{2}", Number), # NSAP
(r"[;\[\]/:<>*{}]", Punctuation),
(r"\d+", Number),
(r".", Text)
],
"comment": [
(r"[^/*]", Comment),
(r"/\*", Comment, "#push"),
(r"\*/", Comment, "#pop"),
(r"[*/]", Comment)
],
"string": [
(r".*\"", String.Double, "#pop")
]
}
class NOCHuaweiLexer(RegexLexer):
name = "Huawei.VRP"
tokens = {
"root": [
(r"^#.*", Comment),
(r"(description)(.*?)$", bygroups(Keyword, Comment)),
(r"^(interface|ospf|bgp|isis|acl name)(.*?)$", bygroups(Keyword, Name.Attribute)),
(r"^(vlan\s+)(\d+)$", bygroups(Keyword, Name.Attribute)),
(r"^(vlan\s+)(\d+\s+)(to\s+)(\d+)$", bygroups(Keyword, Name.Attribute, Keyword, Name.Attribute)),
(r"^(?:undo\s+)?\S+", Keyword),
(r"(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})(/\d{1,2})?", Number), # IPv4 Address/Prefix
(r"49\.\d{4}\.\d{4}\.\d{4}\.\d{4}\.\d{2}", Number), # NSAP
(r"\d+", Number),
(r".", Text)
]
}
class HighlightRenderer(mistune.Renderer):
def __init__(self, vendor=None):
super().__init__()
self.vendor = vendor
def block_code(self, code, lang):
if self.vendor == 'Cisco':
lexer = NOCCiscoLexer()
elif self.vendor == 'Juniper':
lexer = NOCJuniperLexer()
elif self.vendor == 'Huawei':
lexer = NOCHuaweiLexer()
else:
lexer = NOCCiscoLexer()
formatter = HtmlFormatter()
return highlight(code, lexer, formatter)
def cmd_to_celery(vendor, ips, cmds):
task = celery_cmd_runner.apply_async(kwargs={'vendor': vendor, 'ips': ips, 'cmds': cmds})
pass
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nccl ops. See also the cc test for nccl_communicator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import os
import numpy as np
from tensorflow.contrib import nccl
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.platform import test
def _DeviceTensors(tensors, devices):
res = []
for t, d in zip(tensors, devices):
with ops.device(d):
res.append(array_ops.identity(t))
return res
def _NcclAllReduce(nccl_fun, tensors, devices):
return nccl_fun(_DeviceTensors(tensors, devices))
def _NcclReduce(nccl_fun, tensors, devices):
receiver = np.random.randint(0, len(devices))
with ops.device(devices[receiver]):
return [nccl_fun(_DeviceTensors(tensors, devices))]
def _NcclBroadcast(tensors, devices):
sender = np.random.randint(0, len(devices))
with ops.device(devices[sender]):
tensor = array_ops.identity(tensors[0])
broadcast = nccl.broadcast(tensor)
return _DeviceTensors([broadcast] * len(devices), devices)
class NcclTestCase(test.TestCase):
def _Test(self,
nccl_reduce,
numpy_fn,
dtypes=[np.float16, np.float32, np.int32, np.int64, np.float64],
device_sets=(['/device:GPU:1', '/device:GPU:2', '/device:GPU:0'],
['/device:GPU:1', '/device:GPU:0'])):
"""Tests that nccl_reduce does the same as reduction with numpy_fn.
Args:
nccl_reduce: A function taking a list of tensors and a list of devices,
and returns a list of reduced tensors and a list of ops to perform the
reduction.
numpy_fn: A function taking two tensors and returning the reduction of the
two.
device_sets: Tuple of virtual devices to run test on.
"""
# Enable NCCL printouts.
os.environ["NCCL_DEBUG"] = "INFO"
for dtype in dtypes:
# Create session inside outer loop to test use of
# same communicator across multiple sessions.
with self.test_session(use_gpu=True) as sess:
for devices in device_sets:
shape = (3, 4)
random = (np.random.random_sample(shape) - .5) * 1024
tensors = []
for _ in devices:
tensors.append(random.astype(dtype))
np_ans = tensors[0]
for t in tensors[1:]:
np_ans = numpy_fn(np_ans, t)
reduce_tensors = nccl_reduce(tensors, devices)
self.assertNotEmpty(reduce_tensors)
# Test shape inference.
for r in reduce_tensors:
self.assertEqual(shape, r.get_shape())
result_tensors = [array_ops.identity(t) for t in reduce_tensors]
# Check GPU availability *after* creating session, see b/68975239.
if not test.is_gpu_available():
# If no GPU is available, only test graph construction.
continue
# Test execution and results.
for t in sess.run(result_tensors):
self.assertAllClose(t, np_ans)
def _TestGradient(self, nccl_reduce, numpy_fn):
"""Tests the gradient of nccl_reduce.
Args:
nccl_reduce: A function taking a list of tensors and a list of devices,
and returns a list of reduced tensors and a list of ops to perform the
reduction.
numpy_fn: A function taking two tensors and returning the gradient of the
reduction of the two.
"""
def _Gradient(tensors, devices):
inputs = [array_ops.placeholder(t.dtype, t.shape) for t in tensors]
reduce_tensors = nccl_reduce(inputs, devices)
losses = _DeviceTensors(tensors, [t.device for t in reduce_tensors])
grads = gradients.gradients(
reduce_tensors, inputs, losses, colocate_gradients_with_ops=True)
return [g for g in grads if g is not None]
# int types are considered not 'trainable' and no gradients are generated.
self._Test(_Gradient, numpy_fn, dtypes=[np.float16, np.float32, np.float64])
class AllReduceTest(NcclTestCase):
def testAllReduce(self):
self._Test(partial(_NcclAllReduce, nccl.all_sum), lambda x, y: x + y)
self._Test(partial(_NcclAllReduce, nccl.all_prod), lambda x, y: x * y)
self._Test(partial(_NcclAllReduce, nccl.all_min), np.minimum)
self._Test(partial(_NcclAllReduce, nccl.all_max), np.maximum)
def testAllSumGrad(self):
self._TestGradient(
partial(_NcclAllReduce, nccl.all_sum), lambda x, y: x + y)
def testErrors(self):
with self.assertRaisesRegexp(ValueError, 'Device assignment required'):
nccl.all_sum([array_ops.identity(np.random.random_sample((3, 4)))])
with self.assertRaisesRegexp(ValueError, 'Must pass >0 tensors'):
nccl.all_sum([])
class SingleReduceTest(NcclTestCase):
def testSum(self):
self._Test(partial(_NcclReduce, nccl.reduce_sum), lambda x, y: x + y)
def testSumGrad(self):
self._TestGradient(partial(_NcclReduce, nccl.reduce_sum), lambda x, y: x)
class BroadcastTest(NcclTestCase):
def testBroadcast(self):
self._Test(_NcclBroadcast, lambda x, y: x)
def testBroadcastSingleDevice(self):
# Broadcasts on a single device are removed completely during rewrite.
self._Test(_NcclBroadcast, lambda x, y: x,
(['/device:GPU:0', '/device:GPU:0'],))
def testBroadcastToCpuError(self):
try:
# Broadcasts to CPU is not supported.
self._Test(_NcclBroadcast, lambda x, y: x,
(['/device:GPU:0', '/device:CPU:0'],))
except errors.NotFoundError as e:
self.assertRegexpMatches(
str(e), "No registered '_NcclBroadcastRecv' OpKernel for CPU devices")
else:
# Session isn't executed when no GPU is available.
if test.is_gpu_available():
self.fail("Didn't raise NotFoundError trying to broadcast to CPU")
class CombinedTest(NcclTestCase):
"""Test all-reduce vs. single-reduce plus broadcast in one session.run."""
def _Combined(self, tensors, devices):
all_reduce_tensors = _NcclAllReduce(nccl.all_sum, tensors, devices)
single_reduce_tensors = _NcclReduce(nccl.reduce_sum, tensors, devices)
broadcast_tensors = _NcclBroadcast(single_reduce_tensors, devices)
return all_reduce_tensors + broadcast_tensors
def testCombined(self):
self._Test(self._Combined, lambda x, y: x + y)
if __name__ == '__main__':
test.main()
| |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Community.when_created'
db.add_column('clusters_community', 'when_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True), keep_default=False)
# Adding field 'Community.created_by'
db.add_column('clusters_community', 'created_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='created_community', null=True, to=orm['auth.User']), keep_default=False)
# Adding field 'Community.when_changed'
db.add_column('clusters_community', 'when_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True), keep_default=False)
# Adding field 'Community.changed_by'
db.add_column('clusters_community', 'changed_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='changed_community', null=True, to=orm['auth.User']), keep_default=False)
# Adding field 'Cluster.when_created'
db.add_column('clusters_cluster', 'when_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True), keep_default=False)
# Adding field 'Cluster.created_by'
db.add_column('clusters_cluster', 'created_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='created_cluster', null=True, to=orm['auth.User']), keep_default=False)
# Adding field 'Cluster.when_changed'
db.add_column('clusters_cluster', 'when_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True), keep_default=False)
# Adding field 'Cluster.changed_by'
db.add_column('clusters_cluster', 'changed_by', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='changed_cluster', null=True, to=orm['auth.User']), keep_default=False)
def backwards(self, orm):
# Deleting field 'Community.when_created'
db.delete_column('clusters_community', 'when_created')
# Deleting field 'Community.created_by'
db.delete_column('clusters_community', 'created_by_id')
# Deleting field 'Community.when_changed'
db.delete_column('clusters_community', 'when_changed')
# Deleting field 'Community.changed_by'
db.delete_column('clusters_community', 'changed_by_id')
# Deleting field 'Cluster.when_created'
db.delete_column('clusters_cluster', 'when_created')
# Deleting field 'Cluster.created_by'
db.delete_column('clusters_cluster', 'created_by_id')
# Deleting field 'Cluster.when_changed'
db.delete_column('clusters_cluster', 'when_changed')
# Deleting field 'Cluster.changed_by'
db.delete_column('clusters_cluster', 'changed_by_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'clusters.agentfunction': {
'Meta': {'ordering': "('agent', 'function')", 'object_name': 'AgentFunction'},
'agent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'functions'", 'to': "orm['clusters.EconomicAgent']"}),
'function': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agents'", 'to': "orm['clusters.EconomicFunction']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'clusters.agentfunctionresourcetype': {
'Meta': {'ordering': "('agent_function', 'role', 'resource_type')", 'object_name': 'AgentFunctionResourceType'},
'agent_function': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'function_resources'", 'to': "orm['clusters.AgentFunction']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agent_functions'", 'to': "orm['clusters.EconomicResourceType']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'value': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'clusters.agentresourceflow': {
'Meta': {'ordering': "('from_function', 'to_function', 'resource_type')", 'object_name': 'AgentResourceFlow'},
'from_function': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outgoing_flows'", 'to': "orm['clusters.AgentFunction']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agent_flows'", 'to': "orm['clusters.EconomicResourceType']"}),
'to_function': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'incoming_flows'", 'to': "orm['clusters.AgentFunction']"}),
'value': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'clusters.agentresourcetype': {
'Meta': {'ordering': "('agent', 'role', 'resource_type')", 'object_name': 'AgentResourceType'},
'agent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': "orm['clusters.EconomicAgent']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agents'", 'to': "orm['clusters.EconomicResourceType']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'value': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'clusters.cluster': {
'Meta': {'object_name': 'Cluster'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'changed_cluster'", 'null': 'True', 'to': "orm['auth.User']"}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'clusters'", 'to': "orm['clusters.Community']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_cluster'", 'null': 'True', 'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'function_aspect_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'root_function': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'cluster_root'", 'null': 'True', 'to': "orm['clusters.EconomicFunction']"}),
'root_resource': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'cluster_root'", 'null': 'True', 'to': "orm['clusters.EconomicResourceType']"}),
'when_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'when_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'clusters.community': {
'Meta': {'ordering': "('name',)", 'object_name': 'Community'},
'agent_geographic_area_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'changed_community'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_community'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}),
'map_center': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'map_zoom_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'resource_aspect_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'unit_of_value': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'community_units'", 'null': 'True', 'to': "orm['clusters.Unit']"}),
'when_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'when_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'clusters.communityagent': {
'Meta': {'ordering': "('community', 'agent')", 'object_name': 'CommunityAgent'},
'agent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'communities'", 'to': "orm['clusters.EconomicAgent']"}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agents'", 'to': "orm['clusters.Community']"}),
'geographic_area': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region_latitude': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}),
'region_longitude': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'})
},
'clusters.communityresourcetype': {
'Meta': {'ordering': "('community', 'resource_type')", 'object_name': 'CommunityResourceType'},
'aspect': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': "orm['clusters.Community']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'communities'", 'to': "orm['clusters.EconomicResourceType']"})
},
'clusters.economicagent': {
'Meta': {'ordering': "('name',)", 'object_name': 'EconomicAgent'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'clusters.economicfunction': {
'Meta': {'ordering': "('cluster', 'name')", 'object_name': 'EconomicFunction'},
'aspect': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'cluster': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'functions'", 'to': "orm['clusters.Cluster']"}),
'color': ('django.db.models.fields.CharField', [], {'default': "'green'", 'max_length': '12'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'clusters.economicresourcetype': {
'Meta': {'ordering': "('name',)", 'object_name': 'EconomicResourceType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['clusters.EconomicResourceType']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'unit_of_quantity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'resource_units'", 'null': 'True', 'to': "orm['clusters.Unit']"})
},
'clusters.functionresourceflow': {
'Meta': {'ordering': "('from_function', 'to_function', 'resource_type')", 'object_name': 'FunctionResourceFlow'},
'from_function': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'outgoing_flows'", 'to': "orm['clusters.EconomicFunction']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'function_flows'", 'to': "orm['clusters.EconomicResourceType']"}),
'to_function': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'incoming_flows'", 'to': "orm['clusters.EconomicFunction']"}),
'value': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'clusters.functionresourcetype': {
'Meta': {'ordering': "('function', 'role', 'resource_type')", 'object_name': 'FunctionResourceType'},
'function': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': "orm['clusters.EconomicFunction']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '8', 'decimal_places': '2'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resource_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'functions'", 'to': "orm['clusters.EconomicResourceType']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'value': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'clusters.sitesettings': {
'Meta': {'object_name': 'SiteSettings'},
'featured_cluster': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'featured'", 'to': "orm['clusters.Cluster']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'clusters.unit': {
'Meta': {'object_name': 'Unit'},
'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '12'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['clusters']
| |
# (C) Datadog, Inc. 2012-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict
import copy
import re
import socket
import time
import urlparse
# 3rd party
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers
STATS_URL = "/;csv;norefresh"
EVENT_TYPE = SOURCE_TYPE_NAME = 'haproxy'
BUFSIZE = 8192
class Services(object):
BACKEND = 'BACKEND'
FRONTEND = 'FRONTEND'
ALL = (BACKEND, FRONTEND)
# Statuses that we normalize to and that are reported by
# `haproxy.count_per_status` by default (unless `collate_status_tags_per_host` is enabled)
ALL_STATUSES = (
'up', 'open', 'down', 'maint', 'nolb'
)
AVAILABLE = 'available'
UNAVAILABLE = 'unavailable'
COLLATED_STATUSES = (AVAILABLE, UNAVAILABLE)
BACKEND_STATUS_TO_COLLATED = {
'up': AVAILABLE,
'down': UNAVAILABLE,
'maint': UNAVAILABLE,
'nolb': UNAVAILABLE,
}
STATUS_TO_COLLATED = {
'up': AVAILABLE,
'open': AVAILABLE,
'down': UNAVAILABLE,
'maint': UNAVAILABLE,
'nolb': UNAVAILABLE,
}
STATUS_TO_SERVICE_CHECK = {
'up': AgentCheck.OK,
'down': AgentCheck.CRITICAL,
'no_check': AgentCheck.UNKNOWN,
'maint': AgentCheck.OK,
}
class HAProxy(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Host status needs to persist across all checks
self.host_status = defaultdict(lambda: defaultdict(lambda: None))
METRICS = {
"qcur": ("gauge", "queue.current"),
"scur": ("gauge", "session.current"),
"slim": ("gauge", "session.limit"),
"spct": ("gauge", "session.pct"), # Calculated as: (scur/slim)*100
"stot": ("rate", "session.rate"),
"bin": ("rate", "bytes.in_rate"),
"bout": ("rate", "bytes.out_rate"),
"dreq": ("rate", "denied.req_rate"),
"dresp": ("rate", "denied.resp_rate"),
"ereq": ("rate", "errors.req_rate"),
"econ": ("rate", "errors.con_rate"),
"eresp": ("rate", "errors.resp_rate"),
"wretr": ("rate", "warnings.retr_rate"),
"wredis": ("rate", "warnings.redis_rate"),
"req_rate": ("gauge", "requests.rate"), # HA Proxy 1.4 and higher
"hrsp_1xx": ("rate", "response.1xx"), # HA Proxy 1.4 and higher
"hrsp_2xx": ("rate", "response.2xx"), # HA Proxy 1.4 and higher
"hrsp_3xx": ("rate", "response.3xx"), # HA Proxy 1.4 and higher
"hrsp_4xx": ("rate", "response.4xx"), # HA Proxy 1.4 and higher
"hrsp_5xx": ("rate", "response.5xx"), # HA Proxy 1.4 and higher
"hrsp_other": ("rate", "response.other"), # HA Proxy 1.4 and higher
"qtime": ("gauge", "queue.time"), # HA Proxy 1.5 and higher
"ctime": ("gauge", "connect.time"), # HA Proxy 1.5 and higher
"rtime": ("gauge", "response.time"), # HA Proxy 1.5 and higher
"ttime": ("gauge", "session.time"), # HA Proxy 1.5 and higher
}
SERVICE_CHECK_NAME = 'haproxy.backend_up'
def check(self, instance):
url = instance.get('url')
self.log.debug('Processing HAProxy data for %s' % url)
parsed_url = urlparse.urlparse(url)
if parsed_url.scheme == 'unix':
data = self._fetch_socket_data(parsed_url.path)
else:
username = instance.get('username')
password = instance.get('password')
verify = not _is_affirmative(instance.get('disable_ssl_validation', False))
data = self._fetch_url_data(url, username, password, verify)
collect_aggregates_only = _is_affirmative(
instance.get('collect_aggregates_only', True)
)
collect_status_metrics = _is_affirmative(
instance.get('collect_status_metrics', False)
)
collect_status_metrics_by_host = _is_affirmative(
instance.get('collect_status_metrics_by_host', False)
)
collate_status_tags_per_host = _is_affirmative(
instance.get('collate_status_tags_per_host', False)
)
count_status_by_service = _is_affirmative(
instance.get('count_status_by_service', True)
)
tag_service_check_by_host = _is_affirmative(
instance.get('tag_service_check_by_host', False)
)
services_incl_filter = instance.get('services_include', [])
services_excl_filter = instance.get('services_exclude', [])
custom_tags = instance.get('tags', [])
process_events = instance.get('status_check', self.init_config.get('status_check', False))
self._process_data(
data, collect_aggregates_only, process_events,
url=url, collect_status_metrics=collect_status_metrics,
collect_status_metrics_by_host=collect_status_metrics_by_host,
tag_service_check_by_host=tag_service_check_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
collate_status_tags_per_host=collate_status_tags_per_host,
count_status_by_service=count_status_by_service,
custom_tags=custom_tags,
)
def _fetch_url_data(self, url, username, password, verify):
''' Hit a given http url and return the stats lines '''
# Try to fetch data from the stats URL
auth = (username, password)
url = "%s%s" % (url, STATS_URL)
self.log.debug("Fetching haproxy stats from url: %s" % url)
response = requests.get(url, auth=auth, headers=headers(self.agentConfig), verify=verify, timeout=self.default_integration_http_timeout)
response.raise_for_status()
return response.content.splitlines()
def _fetch_socket_data(self, socket_path):
''' Hit a given stats socket and return the stats lines '''
self.log.debug("Fetching haproxy stats from socket: %s" % socket_path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(socket_path)
sock.send("show stat\r\n")
response = ""
output = sock.recv(BUFSIZE)
while output:
response += output.decode("ASCII")
output = sock.recv(BUFSIZE)
sock.close()
return response.splitlines()
def _process_data(self, data, collect_aggregates_only, process_events, url=None,
collect_status_metrics=False, collect_status_metrics_by_host=False,
tag_service_check_by_host=False, services_incl_filter=None,
services_excl_filter=None, collate_status_tags_per_host=False,
count_status_by_service=True, custom_tags=[]):
''' Main data-processing loop. For each piece of useful data, we'll
either save a metric, save an event or both. '''
# Split the first line into an index of fields
# The line looks like:
# "# pxname,svname,qcur,qmax,scur,smax,slim,stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,wredis,status,weight,act,bck,chkfail,chkdown,lastchg,downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,type,rate,rate_lim,rate_max,"
fields = [f.strip() for f in data[0][2:].split(',') if f]
self.hosts_statuses = defaultdict(int)
back_or_front = None
# Skip the first line, go backwards to set back_or_front
for line in data[:0:-1]:
if not line.strip():
continue
# Store each line's values in a dictionary
data_dict = self._line_to_dict(fields, line)
if self._is_aggregate(data_dict):
back_or_front = data_dict['svname']
self._update_data_dict(data_dict, back_or_front)
self._update_hosts_statuses_if_needed(
collect_status_metrics, collect_status_metrics_by_host,
data_dict, self.hosts_statuses
)
if self._should_process(data_dict, collect_aggregates_only):
# update status
# Send the list of data to the metric and event callbacks
self._process_metrics(
data_dict, url,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
custom_tags=custom_tags
)
if process_events:
self._process_event(
data_dict, url,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
custom_tags=custom_tags
)
self._process_service_check(
data_dict, url,
tag_by_host=tag_service_check_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
custom_tags=custom_tags
)
if collect_status_metrics:
self._process_status_metric(
self.hosts_statuses, collect_status_metrics_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
collate_status_tags_per_host=collate_status_tags_per_host,
count_status_by_service=count_status_by_service,
custom_tags=custom_tags
)
self._process_backend_hosts_metric(
self.hosts_statuses,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
custom_tags=custom_tags
)
return data
def _line_to_dict(self, fields, line):
data_dict = {}
for i, val in enumerate(line.split(',')[:]):
if val:
try:
# Try converting to a long, if failure, just leave it
val = float(val)
except Exception:
pass
data_dict[fields[i]] = val
if 'status' in data_dict:
data_dict['status'] = self._normalize_status(data_dict['status'])
return data_dict
def _update_data_dict(self, data_dict, back_or_front):
"""
Adds spct if relevant, adds service
"""
data_dict['back_or_front'] = back_or_front
# The percentage of used sessions based on 'scur' and 'slim'
if 'slim' in data_dict and 'scur' in data_dict:
try:
data_dict['spct'] = (data_dict['scur'] / data_dict['slim']) * 100
except (TypeError, ZeroDivisionError):
pass
def _is_aggregate(self, data_dict):
return data_dict['svname'] in Services.ALL
def _update_hosts_statuses_if_needed(self, collect_status_metrics,
collect_status_metrics_by_host,
data_dict, hosts_statuses):
if data_dict['svname'] == Services.BACKEND:
return
if collect_status_metrics and 'status' in data_dict and 'pxname' in data_dict:
if collect_status_metrics_by_host and 'svname' in data_dict:
key = (data_dict['pxname'], data_dict['svname'], data_dict['status'])
else:
key = (data_dict['pxname'], data_dict['status'])
hosts_statuses[key] += 1
def _should_process(self, data_dict, collect_aggregates_only):
"""
if collect_aggregates_only, we process only the aggregates
else we process all except Services.BACKEND
"""
if collect_aggregates_only:
if self._is_aggregate(data_dict):
return True
return False
elif data_dict['svname'] == Services.BACKEND:
return False
return True
def _is_service_excl_filtered(self, service_name, services_incl_filter,
services_excl_filter):
if self._tag_match_patterns(service_name, services_excl_filter):
if self._tag_match_patterns(service_name, services_incl_filter):
return False
return True
return False
def _tag_match_patterns(self, tag, filters):
if not filters:
return False
for rule in filters:
if re.search(rule, tag):
return True
return False
@staticmethod
def _normalize_status(status):
"""
Try to normalize the HAProxy status as one of the statuses defined in `ALL_STATUSES`,
if it can't be matched return the status as-is in a tag-friendly format
ex: 'UP 1/2' -> 'up'
'no check' -> 'no_check'
"""
formatted_status = status.lower().replace(" ", "_")
for normalized_status in Services.ALL_STATUSES:
if formatted_status.startswith(normalized_status):
return normalized_status
return formatted_status
def _process_backend_hosts_metric(self, hosts_statuses, services_incl_filter=None,
services_excl_filter=None, custom_tags=[]):
agg_statuses = defaultdict(lambda: {status: 0 for status in Services.COLLATED_STATUSES})
for host_status, count in hosts_statuses.iteritems():
try:
service, hostname, status = host_status
except Exception:
service, status = host_status
if self._is_service_excl_filtered(service, services_incl_filter, services_excl_filter):
continue
collated_status = Services.BACKEND_STATUS_TO_COLLATED.get(status)
if collated_status:
agg_statuses[service][collated_status] += count
else:
# create the entries for this service anyway
agg_statuses[service]
for service in agg_statuses:
tags = ['service:%s' % service]
tags.extend(custom_tags)
self.gauge(
'haproxy.backend_hosts',
agg_statuses[service][Services.AVAILABLE],
tags=tags + ['available:true'])
self.gauge(
'haproxy.backend_hosts',
agg_statuses[service][Services.UNAVAILABLE],
tags=tags + ['available:false'])
return agg_statuses
def _process_status_metric(self, hosts_statuses, collect_status_metrics_by_host,
services_incl_filter=None, services_excl_filter=None,
collate_status_tags_per_host=False, count_status_by_service=True, custom_tags=[]):
agg_statuses_counter = defaultdict(lambda: {status: 0 for status in Services.COLLATED_STATUSES})
# Initialize `statuses_counter`: every value is a defaultdict initialized with the correct
# keys, which depends on the `collate_status_tags_per_host` option
reported_statuses = Services.ALL_STATUSES
if collate_status_tags_per_host:
reported_statuses = Services.COLLATED_STATUSES
reported_statuses_dict = defaultdict(int)
for reported_status in reported_statuses:
reported_statuses_dict[reported_status] = 0
statuses_counter = defaultdict(lambda: copy.copy(reported_statuses_dict))
for host_status, count in hosts_statuses.iteritems():
hostname = None
try:
service, hostname, status = host_status
except Exception:
if collect_status_metrics_by_host:
self.warning('`collect_status_metrics_by_host` is enabled but no host info\
could be extracted from HAProxy stats endpoint for {0}'.format(service))
service, status = host_status
if self._is_service_excl_filtered(service, services_incl_filter, services_excl_filter):
continue
tags = []
if count_status_by_service:
tags.append('service:%s' % service)
if hostname:
tags.append('backend:%s' % hostname)
tags.extend(custom_tags)
counter_status = status
if collate_status_tags_per_host:
# An unknown status will be sent as UNAVAILABLE
counter_status = Services.STATUS_TO_COLLATED.get(status, Services.UNAVAILABLE)
statuses_counter[tuple(tags)][counter_status] += count
# Compute aggregates with collated statuses. If collate_status_tags_per_host is enabled we
# already send collated statuses with fine-grained tags, so no need to compute/send these aggregates
if not collate_status_tags_per_host:
agg_tags = []
if count_status_by_service:
agg_tags.append('service:%s' % service)
# An unknown status will be sent as UNAVAILABLE
agg_statuses_counter[tuple(agg_tags)][Services.STATUS_TO_COLLATED.get(status, Services.UNAVAILABLE)] += count
for tags, count_per_status in statuses_counter.iteritems():
for status, count in count_per_status.iteritems():
self.gauge('haproxy.count_per_status', count, tags=tags + ('status:%s' % status, ))
# Send aggregates
for service_tags, service_agg_statuses in agg_statuses_counter.iteritems():
for status, count in service_agg_statuses.iteritems():
self.gauge("haproxy.count_per_status", count, tags=service_tags + ('status:%s' % status, ))
def _process_metrics(self, data, url, services_incl_filter=None,
services_excl_filter=None, custom_tags=[]):
"""
Data is a dictionary related to one host
(one line) extracted from the csv.
It should look like:
{'pxname':'dogweb', 'svname':'i-4562165', 'scur':'42', ...}
"""
hostname = data['svname']
service_name = data['pxname']
back_or_front = data['back_or_front']
tags = [
"type:%s" % back_or_front,
"instance_url:%s" % url,
"service:%s" % service_name,
]
tags.extend(custom_tags)
if self._is_service_excl_filtered(service_name, services_incl_filter,
services_excl_filter):
return
if back_or_front == Services.BACKEND:
tags.append('backend:%s' % hostname)
for key, value in data.items():
if HAProxy.METRICS.get(key):
suffix = HAProxy.METRICS[key][1]
name = "haproxy.%s.%s" % (back_or_front.lower(), suffix)
if HAProxy.METRICS[key][0] == 'rate':
self.rate(name, value, tags=tags)
else:
self.gauge(name, value, tags=tags)
def _process_event(self, data, url, services_incl_filter=None,
services_excl_filter=None, custom_tags=[]):
'''
Main event processing loop. An event will be created for a service
status change.
Service checks on the server side can be used to provide the same functionality
'''
hostname = data['svname']
service_name = data['pxname']
key = "%s:%s" % (hostname, service_name)
status = self.host_status[url][key]
if self._is_service_excl_filtered(service_name, services_incl_filter,
services_excl_filter):
return
if status is None:
self.host_status[url][key] = data['status']
return
if status != data['status'] and data['status'] in ('up', 'down'):
# If the status of a host has changed, we trigger an event
try:
lastchg = int(data['lastchg'])
except Exception:
lastchg = 0
# Create the event object
ev = self._create_event(
data['status'], hostname, lastchg, service_name,
data['back_or_front'], custom_tags=custom_tags
)
self.event(ev)
# Store this host status so we can check against it later
self.host_status[url][key] = data['status']
def _create_event(self, status, hostname, lastchg, service_name, back_or_front,
custom_tags=[]):
HAProxy_agent = self.hostname.decode('utf-8')
if status == 'down':
alert_type = "error"
title = "%s reported %s:%s %s" % (HAProxy_agent, service_name, hostname, status.upper())
else:
if status == "up":
alert_type = "success"
else:
alert_type = "info"
title = "%s reported %s:%s back and %s" % (HAProxy_agent, service_name, hostname, status.upper())
tags = ["service:%s" % service_name]
if back_or_front == Services.BACKEND:
tags.append('backend:%s' % hostname)
tags.extend(custom_tags)
return {
'timestamp': int(time.time() - lastchg),
'event_type': EVENT_TYPE,
'host': HAProxy_agent,
'msg_title': title,
'alert_type': alert_type,
"source_type_name": SOURCE_TYPE_NAME,
"event_object": hostname,
"tags": tags
}
def _process_service_check(self, data, url, tag_by_host=False,
services_incl_filter=None, services_excl_filter=None, custom_tags=[]):
''' Report a service check, tagged by the service and the backend.
Statuses are defined in `STATUS_TO_SERVICE_CHECK` mapping.
'''
service_name = data['pxname']
status = data['status']
haproxy_hostname = self.hostname.decode('utf-8')
check_hostname = haproxy_hostname if tag_by_host else ''
if self._is_service_excl_filtered(service_name, services_incl_filter,
services_excl_filter):
return
if status in Services.STATUS_TO_SERVICE_CHECK:
service_check_tags = ["service:%s" % service_name]
service_check_tags.extend(custom_tags)
hostname = data['svname']
if data['back_or_front'] == Services.BACKEND:
service_check_tags.append('backend:%s' % hostname)
status = Services.STATUS_TO_SERVICE_CHECK[status]
message = "%s reported %s:%s %s" % (haproxy_hostname, service_name,
hostname, status)
self.service_check(self.SERVICE_CHECK_NAME, status, message=message,
hostname=check_hostname, tags=service_check_tags)
| |
import datetime
from collections import Counter
from unittest import mock
from django.core.exceptions import ValidationError
from django.forms import (
BaseForm, CharField, DateField, FileField, Form, IntegerField,
SplitDateTimeField, formsets,
)
from django.forms.formsets import BaseFormSet, all_valid, formset_factory
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.test import SimpleTestCase
class Choice(Form):
choice = CharField()
votes = IntegerField()
ChoiceFormSet = formset_factory(Choice)
class FavoriteDrinkForm(Form):
name = CharField()
class BaseFavoriteDrinksFormSet(BaseFormSet):
def clean(self):
seen_drinks = []
for drink in self.cleaned_data:
if drink['name'] in seen_drinks:
raise ValidationError('You may only specify a drink once.')
seen_drinks.append(drink['name'])
# A FormSet that takes a list of favorite drinks and raises an error if
# there are any duplicates.
FavoriteDrinksFormSet = formset_factory(FavoriteDrinkForm, formset=BaseFavoriteDrinksFormSet, extra=3)
class CustomKwargForm(Form):
def __init__(self, *args, custom_kwarg, **kwargs):
self.custom_kwarg = custom_kwarg
super().__init__(*args, **kwargs)
class FormsFormsetTestCase(SimpleTestCase):
def make_choiceformset(
self, formset_data=None, formset_class=ChoiceFormSet,
total_forms=None, initial_forms=0, max_num_forms=0, min_num_forms=0, **kwargs):
"""
Make a ChoiceFormset from the given formset_data.
The data should be given as a list of (choice, votes) tuples.
"""
kwargs.setdefault('prefix', 'choices')
kwargs.setdefault('auto_id', False)
if formset_data is None:
return formset_class(**kwargs)
if total_forms is None:
total_forms = len(formset_data)
def prefixed(*args):
args = (kwargs['prefix'],) + args
return '-'.join(args)
data = {
prefixed('TOTAL_FORMS'): str(total_forms),
prefixed('INITIAL_FORMS'): str(initial_forms),
prefixed('MAX_NUM_FORMS'): str(max_num_forms),
prefixed('MIN_NUM_FORMS'): str(min_num_forms),
}
for i, (choice, votes) in enumerate(formset_data):
data[prefixed(str(i), 'choice')] = choice
data[prefixed(str(i), 'votes')] = votes
return formset_class(data, **kwargs)
def test_basic_formset(self):
"""
A FormSet constructor takes the same arguments as Form. Create a
FormSet for adding data. By default, it displays 1 blank form.
"""
formset = self.make_choiceformset()
self.assertHTMLEqual(
str(formset),
"""<input type="hidden" name="choices-TOTAL_FORMS" value="1">
<input type="hidden" name="choices-INITIAL_FORMS" value="0">
<input type="hidden" name="choices-MIN_NUM_FORMS" value="0">
<input type="hidden" name="choices-MAX_NUM_FORMS" value="1000">
<tr><th>Choice:</th><td><input type="text" name="choices-0-choice"></td></tr>
<tr><th>Votes:</th><td><input type="number" name="choices-0-votes"></td></tr>"""
)
# FormSet are treated similarly to Forms. FormSet has an is_valid()
# method, and a cleaned_data or errors attribute depending on whether
# all the forms passed validation. However, unlike a Form, cleaned_data
# and errors will be a list of dicts rather than a single dict.
formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}])
# If a FormSet wasn't passed any data, is_valid() and has_changed()
# return False.
formset = self.make_choiceformset()
self.assertFalse(formset.is_valid())
self.assertFalse(formset.has_changed())
def test_form_kwargs_formset(self):
"""
Custom kwargs set on the formset instance are passed to the
underlying forms.
"""
FormSet = formset_factory(CustomKwargForm, extra=2)
formset = FormSet(form_kwargs={'custom_kwarg': 1})
for form in formset:
self.assertTrue(hasattr(form, 'custom_kwarg'))
self.assertEqual(form.custom_kwarg, 1)
def test_form_kwargs_formset_dynamic(self):
"""Form kwargs can be passed dynamically in a formset."""
class DynamicBaseFormSet(BaseFormSet):
def get_form_kwargs(self, index):
return {'custom_kwarg': index}
DynamicFormSet = formset_factory(CustomKwargForm, formset=DynamicBaseFormSet, extra=2)
formset = DynamicFormSet(form_kwargs={'custom_kwarg': 'ignored'})
for i, form in enumerate(formset):
self.assertTrue(hasattr(form, 'custom_kwarg'))
self.assertEqual(form.custom_kwarg, i)
def test_form_kwargs_empty_form(self):
FormSet = formset_factory(CustomKwargForm)
formset = FormSet(form_kwargs={'custom_kwarg': 1})
self.assertTrue(hasattr(formset.empty_form, 'custom_kwarg'))
self.assertEqual(formset.empty_form.custom_kwarg, 1)
def test_formset_validation(self):
# FormSet instances can also have an error attribute if validation failed for
# any of the forms.
formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'votes': ['This field is required.']}])
def test_formset_validation_count(self):
"""
A formset's ManagementForm is validated once per FormSet.is_valid()
call and each form of the formset is cleaned once.
"""
def make_method_counter(func):
"""Add a counter to func for the number of times it's called."""
counter = Counter()
counter.call_count = 0
def mocked_func(*args, **kwargs):
counter.call_count += 1
return func(*args, **kwargs)
return mocked_func, counter
mocked_is_valid, is_valid_counter = make_method_counter(formsets.ManagementForm.is_valid)
mocked_full_clean, full_clean_counter = make_method_counter(BaseForm.full_clean)
formset = self.make_choiceformset([('Calexico', '100'), ('Any1', '42'), ('Any2', '101')])
with mock.patch('django.forms.formsets.ManagementForm.is_valid', mocked_is_valid), \
mock.patch('django.forms.forms.BaseForm.full_clean', mocked_full_clean):
self.assertTrue(formset.is_valid())
self.assertEqual(is_valid_counter.call_count, 1)
self.assertEqual(full_clean_counter.call_count, 4)
def test_formset_has_changed(self):
"""
FormSet.has_changed() is True if any data is passed to its forms, even
if the formset didn't validate.
"""
blank_formset = self.make_choiceformset([('', '')])
self.assertFalse(blank_formset.has_changed())
# invalid formset
invalid_formset = self.make_choiceformset([('Calexico', '')])
self.assertFalse(invalid_formset.is_valid())
self.assertTrue(invalid_formset.has_changed())
# valid formset
valid_formset = self.make_choiceformset([('Calexico', '100')])
self.assertTrue(valid_formset.is_valid())
self.assertTrue(valid_formset.has_changed())
def test_formset_initial_data(self):
"""
A FormSet can be prefilled with existing data by providing a list of
dicts to the `initial` argument. By default, an extra blank form is
included.
"""
formset = self.make_choiceformset(initial=[{'choice': 'Calexico', 'votes': 100}])
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>"""
)
def test_blank_form_unfilled(self):
"""A form that's displayed as blank may be submitted as blank."""
formset = self.make_choiceformset([('Calexico', '100'), ('', '')], initial_forms=1)
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}])
def test_second_form_partially_filled(self):
"""
If at least one field is filled out on a blank form, it will be
validated.
"""
formset = self.make_choiceformset([('Calexico', '100'), ('The Decemberists', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}])
def test_delete_prefilled_data(self):
"""
Deleting prefilled data is an error. Removing data from form fields
isn't the proper way to delete it.
"""
formset = self.make_choiceformset([('', ''), ('', '')], initial_forms=1)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors,
[{'votes': ['This field is required.'], 'choice': ['This field is required.']}, {}]
)
def test_displaying_more_than_one_blank_form(self):
"""
More than 1 empty form can be displayed using formset_factory's
`extra` argument.
"""
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice"></li>
<li>Votes: <input type="number" name="choices-0-votes"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>"""
)
# Since every form was displayed as blank, they are also accepted as
# blank. This may seem a little strange, but min_num is used to require
# a minimum number of forms to be completed.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': '',
'choices-0-votes': '',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{}, {}, {}])
def test_min_num_displaying_more_than_one_blank_form(self):
"""
More than 1 empty form can also be displayed using formset_factory's
min_num argument. It will (essentially) increment the extra argument.
"""
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=1)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
# Min_num forms are required; extra forms can be empty.
self.assertFalse(formset.forms[0].empty_permitted)
self.assertTrue(formset.forms[1].empty_permitted)
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice"></li>
<li>Votes: <input type="number" name="choices-0-votes"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>"""
)
def test_min_num_displaying_more_than_one_blank_form_with_zero_extra(self):
"""More than 1 empty form can be displayed using min_num."""
ChoiceFormSet = formset_factory(Choice, extra=0, min_num=3)
formset = ChoiceFormSet(auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice"></li>
<li>Votes: <input type="number" name="choices-0-votes"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>"""
)
def test_single_form_completed(self):
"""Just one form may be completed."""
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': '',
'choices-1-votes': '',
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual([form.cleaned_data for form in formset.forms], [{'votes': 100, 'choice': 'Calexico'}, {}, {}])
def test_formset_validate_max_flag(self):
"""
If validate_max is set and max_num is less than TOTAL_FORMS in the
data, a ValidationError is raised. MAX_NUM_FORMS in the data is
irrelevant here (it's output as a hint for the client but its value
in the returned data is not checked).
"""
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit at most 1 form.'])
self.assertEqual(
str(formset.non_form_errors()),
'<ul class="errorlist nonform"><li>Please submit at most 1 form.</li></ul>',
)
def test_formset_validate_min_flag(self):
"""
If validate_min is set and min_num is more than TOTAL_FORMS in the
data, a ValidationError is raised. MIN_NUM_FORMS in the data is
irrelevant here (it's output as a hint for the client but its value
in the returned data is not checked).
"""
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, min_num=3, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit at least 3 forms.'])
self.assertEqual(
str(formset.non_form_errors()),
'<ul class="errorlist nonform"><li>'
'Please submit at least 3 forms.</li></ul>',
)
def test_formset_validate_min_unchanged_forms(self):
"""
min_num validation doesn't consider unchanged forms with initial data
as "empty".
"""
initial = [
{'choice': 'Zero', 'votes': 0},
{'choice': 'One', 'votes': 0},
]
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '2',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '2',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1', # changed from initial
}
ChoiceFormSet = formset_factory(Choice, min_num=2, validate_min=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices', initial=initial)
self.assertFalse(formset.forms[0].has_changed())
self.assertTrue(formset.forms[1].has_changed())
self.assertTrue(formset.is_valid())
def test_formset_validate_min_excludes_empty_forms(self):
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '0',
}
ChoiceFormSet = formset_factory(Choice, extra=2, min_num=1, validate_min=True, can_delete=True)
formset = ChoiceFormSet(data, prefix='choices')
self.assertFalse(formset.has_changed())
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit at least 1 form.'])
def test_second_form_partially_filled_2(self):
"""A partially completed form is invalid."""
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-1-choice': 'The Decemberists',
'choices-1-votes': '', # missing value
'choices-2-choice': '',
'choices-2-votes': '',
}
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{}, {'votes': ['This field is required.']}, {}])
def test_more_initial_data(self):
"""
The extra argument works when the formset is pre-filled with initial
data.
"""
initial = [{'choice': 'Calexico', 'votes': 100}]
ChoiceFormSet = formset_factory(Choice, extra=3)
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>
<li>Choice: <input type="text" name="choices-1-choice"></li>
<li>Votes: <input type="number" name="choices-1-votes"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>
<li>Choice: <input type="text" name="choices-3-choice"></li>
<li>Votes: <input type="number" name="choices-3-votes"></li>"""
)
# Retrieving an empty form works. Tt shows up in the form list.
self.assertTrue(formset.empty_form.empty_permitted)
self.assertHTMLEqual(
formset.empty_form.as_ul(),
"""<li>Choice: <input type="text" name="choices-__prefix__-choice"></li>
<li>Votes: <input type="number" name="choices-__prefix__-votes"></li>"""
)
def test_formset_with_deletion(self):
"""
formset_factory's can_delete argument adds a boolean "delete" field to
each form. When that boolean field is True, the form will be in
formset.deleted_forms.
"""
ChoiceFormSet = formset_factory(Choice, can_delete=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE"></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie"></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900"></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE"></li>"""
)
# To delete something, set that form's special delete field to 'on'.
# Let's go ahead and delete Fergie.
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-DELETE': 'on',
'choices-2-choice': '',
'choices-2-votes': '',
'choices-2-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.forms],
[
{'votes': 100, 'DELETE': False, 'choice': 'Calexico'},
{'votes': 900, 'DELETE': True, 'choice': 'Fergie'},
{},
]
)
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{'votes': 900, 'DELETE': True, 'choice': 'Fergie'}]
)
def test_formset_with_deletion_remove_deletion_flag(self):
"""
If a form is filled with something and can_delete is also checked, that
form's errors shouldn't make the entire formset invalid since it's
going to be deleted.
"""
class CheckForm(Form):
field = IntegerField(min_value=100)
data = {
'check-TOTAL_FORMS': '3', # the number of forms rendered
'check-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'check-MAX_NUM_FORMS': '0', # max number of forms
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
'check-2-field': '',
'check-2-DELETE': '',
}
CheckFormSet = formset_factory(CheckForm, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
# If the deletion flag is removed, validation is enabled.
data['check-1-DELETE'] = ''
formset = CheckFormSet(data, prefix='check')
self.assertFalse(formset.is_valid())
def test_formset_with_deletion_invalid_deleted_form(self):
"""
deleted_forms works on a valid formset even if a deleted form would
have been invalid.
"""
FavoriteDrinkFormset = formset_factory(form=FavoriteDrinkForm, can_delete=True)
formset = FavoriteDrinkFormset({
'form-0-name': '',
'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0,
'form-MAX_NUM_FORMS': 1,
})
self.assertTrue(formset.is_valid())
self.assertEqual(formset._errors, [])
self.assertEqual(len(formset.deleted_forms), 1)
def test_formset_with_deletion_custom_widget(self):
class DeletionAttributeFormSet(BaseFormSet):
deletion_widget = HiddenInput
class DeletionMethodFormSet(BaseFormSet):
def get_deletion_widget(self):
return HiddenInput(attrs={'class': 'deletion'})
tests = [
(DeletionAttributeFormSet, '<input type="hidden" name="form-0-DELETE">'),
(
DeletionMethodFormSet,
'<input class="deletion" type="hidden" name="form-0-DELETE">',
),
]
for formset_class, delete_html in tests:
with self.subTest(formset_class=formset_class.__name__):
ArticleFormSet = formset_factory(
ArticleForm,
formset=formset_class,
can_delete=True,
)
formset = ArticleFormSet(auto_id=False)
self.assertHTMLEqual(
'\n'.join([form.as_ul() for form in formset.forms]),
(
f'<li>Title: <input type="text" name="form-0-title"></li>'
f'<li>Pub date: <input type="text" name="form-0-pub_date">'
f'{delete_html}</li>'
),
)
def test_formsets_with_ordering(self):
"""
formset_factory's can_order argument adds an integer field to each
form. When form validation succeeds, [form.cleaned_data for form in formset.forms]
will have the data in the correct order specified by the ordering
fields. If a number is duplicated in the set of ordering fields, for
instance form 0 and form 3 are both marked as 1, then the form index
used as a secondary ordering criteria. In order to put something at the
front of the list, you'd need to set its order to 0.
"""
ChoiceFormSet = formset_factory(Choice, can_order=True)
initial = [{'choice': 'Calexico', 'votes': 100}, {'choice': 'Fergie', 'votes': 900}]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1"></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie"></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900"></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2"></li>
<li>Choice: <input type="text" name="choices-2-choice"></li>
<li>Votes: <input type="number" name="choices-2-votes"></li>
<li>Order: <input type="number" name="choices-2-ORDER"></li>"""
)
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '2', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.ordered_forms],
[
{'votes': 500, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
],
)
def test_formsets_with_ordering_custom_widget(self):
class OrderingAttributeFormSet(BaseFormSet):
ordering_widget = HiddenInput
class OrderingMethodFormSet(BaseFormSet):
def get_ordering_widget(self):
return HiddenInput(attrs={'class': 'ordering'})
tests = (
(OrderingAttributeFormSet, '<input type="hidden" name="form-0-ORDER">'),
(OrderingMethodFormSet, '<input class="ordering" type="hidden" name="form-0-ORDER">'),
)
for formset_class, order_html in tests:
with self.subTest(formset_class=formset_class.__name__):
ArticleFormSet = formset_factory(ArticleForm, formset=formset_class, can_order=True)
formset = ArticleFormSet(auto_id=False)
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
(
'<li>Title: <input type="text" name="form-0-title"></li>'
'<li>Pub date: <input type="text" name="form-0-pub_date">'
'%s</li>' % order_html
),
)
def test_empty_ordered_fields(self):
"""
Ordering fields are allowed to be left blank. If they are left blank,
they'll be sorted below everything else.
"""
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '',
'choices-3-choice': 'Basia Bulat',
'choices-3-votes': '50',
'choices-3-ORDER': '',
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.ordered_forms],
[
{'votes': 100, 'ORDER': 1, 'choice': 'Calexico'},
{'votes': 900, 'ORDER': 2, 'choice': 'Fergie'},
{'votes': 500, 'ORDER': None, 'choice': 'The Decemberists'},
{'votes': 50, 'ORDER': None, 'choice': 'Basia Bulat'},
],
)
def test_ordering_blank_fieldsets(self):
"""Ordering works with blank fieldsets."""
data = {
'choices-TOTAL_FORMS': '3', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
}
ChoiceFormSet = formset_factory(Choice, can_order=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(formset.ordered_forms, [])
def test_formset_with_ordering_and_deletion(self):
"""FormSets with ordering + deletion."""
ChoiceFormSet = formset_factory(Choice, can_order=True, can_delete=True)
initial = [
{'choice': 'Calexico', 'votes': 100},
{'choice': 'Fergie', 'votes': 900},
{'choice': 'The Decemberists', 'votes': 500},
]
formset = ChoiceFormSet(initial=initial, auto_id=False, prefix='choices')
self.assertHTMLEqual(
'\n'.join(form.as_ul() for form in formset.forms),
"""<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>
<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>
<li>Order: <input type="number" name="choices-0-ORDER" value="1"></li>
<li>Delete: <input type="checkbox" name="choices-0-DELETE"></li>
<li>Choice: <input type="text" name="choices-1-choice" value="Fergie"></li>
<li>Votes: <input type="number" name="choices-1-votes" value="900"></li>
<li>Order: <input type="number" name="choices-1-ORDER" value="2"></li>
<li>Delete: <input type="checkbox" name="choices-1-DELETE"></li>
<li>Choice: <input type="text" name="choices-2-choice" value="The Decemberists"></li>
<li>Votes: <input type="number" name="choices-2-votes" value="500"></li>
<li>Order: <input type="number" name="choices-2-ORDER" value="3"></li>
<li>Delete: <input type="checkbox" name="choices-2-DELETE"></li>
<li>Choice: <input type="text" name="choices-3-choice"></li>
<li>Votes: <input type="number" name="choices-3-votes"></li>
<li>Order: <input type="number" name="choices-3-ORDER"></li>
<li>Delete: <input type="checkbox" name="choices-3-DELETE"></li>"""
)
# Let's delete Fergie, and put The Decemberists ahead of Calexico.
data = {
'choices-TOTAL_FORMS': '4', # the number of forms rendered
'choices-INITIAL_FORMS': '3', # the number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
'choices-0-ORDER': '1',
'choices-0-DELETE': '',
'choices-1-choice': 'Fergie',
'choices-1-votes': '900',
'choices-1-ORDER': '2',
'choices-1-DELETE': 'on',
'choices-2-choice': 'The Decemberists',
'choices-2-votes': '500',
'choices-2-ORDER': '0',
'choices-2-DELETE': '',
'choices-3-choice': '',
'choices-3-votes': '',
'choices-3-ORDER': '',
'choices-3-DELETE': '',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertEqual(
[form.cleaned_data for form in formset.ordered_forms],
[
{'votes': 500, 'DELETE': False, 'ORDER': 0, 'choice': 'The Decemberists'},
{'votes': 100, 'DELETE': False, 'ORDER': 1, 'choice': 'Calexico'},
],
)
self.assertEqual(
[form.cleaned_data for form in formset.deleted_forms],
[{'votes': 900, 'DELETE': True, 'ORDER': 2, 'choice': 'Fergie'}]
)
def test_invalid_deleted_form_with_ordering(self):
"""
Can get ordered_forms from a valid formset even if a deleted form
would have been invalid.
"""
FavoriteDrinkFormset = formset_factory(form=FavoriteDrinkForm, can_delete=True, can_order=True)
formset = FavoriteDrinkFormset({
'form-0-name': '',
'form-0-DELETE': 'on', # no name!
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 1,
'form-MIN_NUM_FORMS': 0,
'form-MAX_NUM_FORMS': 1
})
self.assertTrue(formset.is_valid())
self.assertEqual(formset.ordered_forms, [])
def test_clean_hook(self):
"""
FormSets have a clean() hook for doing extra validation that isn't tied
to any form. It follows the same pattern as the clean() hook on Forms.
"""
# Start out with a some duplicate data.
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
# Any errors raised by formset.clean() are available via the
# formset.non_form_errors() method.
for error in formset.non_form_errors():
self.assertEqual(str(error), 'You may only specify a drink once.')
# The valid case still works.
data['drinks-1-name'] = 'Bloody Mary'
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertTrue(formset.is_valid())
self.assertEqual(formset.non_form_errors(), [])
def test_limiting_max_forms(self):
"""Limiting the maximum number of forms with max_num."""
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the extra parameter.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3)
formset = LimitedFavoriteDrinkFormSet()
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" id="id_form-0-name"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name"></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th>
<td><input type="text" name="form-2-name" id="id_form-2-name"></td></tr>"""
)
# If max_num is 0 then no form is rendered at all.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
self.assertEqual(formset.forms, [])
def test_limited_max_forms_two(self):
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=5, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th><td>
<input type="text" name="form-0-name" id="id_form-0-name"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name"></td></tr>"""
)
def test_limiting_extra_lest_than_max_num(self):
"""max_num has no effect when extra is less than max_num."""
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet()
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" id="id_form-0-name"></td></tr>"""
)
def test_max_num_with_initial_data(self):
# When not passed, max_num will take a high default value, leaving the
# number of forms only controlled by the value of the initial and extra
# parameters.
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1)
formset = LimitedFavoriteDrinkFormSet(initial=[{'name': 'Fernet and Coke'}])
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" value="Fernet and Coke" id="id_form-0-name"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name"></td></tr>"""
)
def test_max_num_zero(self):
"""
If max_num is 0 then no form is rendered at all, regardless of extra,
unless initial data is present.
"""
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet()
self.assertEqual(formset.forms, [])
def test_max_num_zero_with_initial(self):
# initial trumps max_num
initial = [
{'name': 'Fernet and Coke'},
{'name': 'Bloody Mary'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=0)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input id="id_form-0-name" name="form-0-name" type="text" value="Fernet and Coke"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary"></td></tr>"""
)
def test_more_initial_than_max_num(self):
"""
More initial forms than max_num results in all initial forms being
displayed (but no extra forms).
"""
initial = [
{'name': 'Gin Tonic'},
{'name': 'Bloody Mary'},
{'name': 'Jack and Coke'},
]
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=1, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=initial)
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input id="id_form-0-name" name="form-0-name" type="text" value="Gin Tonic"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input id="id_form-1-name" name="form-1-name" type="text" value="Bloody Mary"></td></tr>
<tr><th><label for="id_form-2-name">Name:</label></th>
<td><input id="id_form-2-name" name="form-2-name" type="text" value="Jack and Coke"></td></tr>"""
)
def test_default_absolute_max(self):
# absolute_max defaults to 2 * DEFAULT_MAX_NUM if max_num is None.
data = {
'form-TOTAL_FORMS': 2001,
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
formset = FavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
['Please submit at most 1000 forms.'],
)
self.assertEqual(formset.absolute_max, 2000)
def test_absolute_max(self):
data = {
'form-TOTAL_FORMS': '2001',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
AbsoluteMaxFavoriteDrinksFormSet = formset_factory(
FavoriteDrinkForm,
absolute_max=3000,
)
formset = AbsoluteMaxFavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), True)
self.assertEqual(len(formset.forms), 2001)
# absolute_max provides a hard limit.
data['form-TOTAL_FORMS'] = '3001'
formset = AbsoluteMaxFavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 3000)
self.assertEqual(
formset.non_form_errors(),
['Please submit at most 1000 forms.'],
)
def test_absolute_max_with_max_num(self):
data = {
'form-TOTAL_FORMS': '1001',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
LimitedFavoriteDrinksFormSet = formset_factory(
FavoriteDrinkForm,
max_num=30,
absolute_max=1000,
)
formset = LimitedFavoriteDrinksFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 1000)
self.assertEqual(
formset.non_form_errors(),
['Please submit at most 30 forms.'],
)
def test_absolute_max_invalid(self):
msg = "'absolute_max' must be greater or equal to 'max_num'."
for max_num in [None, 31]:
with self.subTest(max_num=max_num):
with self.assertRaisesMessage(ValueError, msg):
formset_factory(FavoriteDrinkForm, max_num=max_num, absolute_max=30)
def test_more_initial_form_result_in_one(self):
"""
One form from initial and extra=3 with max_num=2 results in the one
initial form and one extra.
"""
LimitedFavoriteDrinkFormSet = formset_factory(FavoriteDrinkForm, extra=3, max_num=2)
formset = LimitedFavoriteDrinkFormSet(initial=[{'name': 'Gin Tonic'}])
self.assertHTMLEqual(
'\n'.join(str(form) for form in formset.forms),
"""<tr><th><label for="id_form-0-name">Name:</label></th>
<td><input type="text" name="form-0-name" value="Gin Tonic" id="id_form-0-name"></td></tr>
<tr><th><label for="id_form-1-name">Name:</label></th>
<td><input type="text" name="form-1-name" id="id_form-1-name"></td></tr>"""
)
def test_management_form_prefix(self):
"""The management form has the correct prefix."""
formset = FavoriteDrinksFormSet()
self.assertEqual(formset.management_form.prefix, 'form')
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '0',
}
formset = FavoriteDrinksFormSet(data=data)
self.assertEqual(formset.management_form.prefix, 'form')
formset = FavoriteDrinksFormSet(initial={})
self.assertEqual(formset.management_form.prefix, 'form')
def test_non_form_errors(self):
data = {
'drinks-TOTAL_FORMS': '2', # the number of forms rendered
'drinks-INITIAL_FORMS': '0', # the number of forms with initial data
'drinks-MIN_NUM_FORMS': '0', # min number of forms
'drinks-MAX_NUM_FORMS': '0', # max number of forms
'drinks-0-name': 'Gin and Tonic',
'drinks-1-name': 'Gin and Tonic',
}
formset = FavoriteDrinksFormSet(data, prefix='drinks')
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['You may only specify a drink once.'])
self.assertEqual(
str(formset.non_form_errors()),
'<ul class="errorlist nonform"><li>'
'You may only specify a drink once.</li></ul>',
)
def test_formset_iteration(self):
"""Formset instances are iterable."""
ChoiceFormset = formset_factory(Choice, extra=3)
formset = ChoiceFormset()
# An iterated formset yields formset.forms.
forms = list(formset)
self.assertEqual(forms, formset.forms)
self.assertEqual(len(formset), len(forms))
# A formset may be indexed to retrieve its forms.
self.assertEqual(formset[0], forms[0])
with self.assertRaises(IndexError):
formset[3]
# Formsets can override the default iteration order
class BaseReverseFormSet(BaseFormSet):
def __iter__(self):
return reversed(self.forms)
def __getitem__(self, idx):
return super().__getitem__(len(self) - idx - 1)
ReverseChoiceFormset = formset_factory(Choice, BaseReverseFormSet, extra=3)
reverse_formset = ReverseChoiceFormset()
# __iter__() modifies the rendering order.
# Compare forms from "reverse" formset with forms from original formset
self.assertEqual(str(reverse_formset[0]), str(forms[-1]))
self.assertEqual(str(reverse_formset[1]), str(forms[-2]))
self.assertEqual(len(reverse_formset), len(forms))
def test_formset_nonzero(self):
"""A formsets without any forms evaluates as True."""
ChoiceFormset = formset_factory(Choice, extra=0)
formset = ChoiceFormset()
self.assertEqual(len(formset.forms), 0)
self.assertTrue(formset)
def test_formset_splitdatetimefield(self):
"""
Formset works with SplitDateTimeField(initial=datetime.datetime.now).
"""
class SplitDateTimeForm(Form):
when = SplitDateTimeField(initial=datetime.datetime.now)
SplitDateTimeFormSet = formset_factory(SplitDateTimeForm)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-when_0': '1904-06-16',
'form-0-when_1': '15:51:33',
}
formset = SplitDateTimeFormSet(data)
self.assertTrue(formset.is_valid())
def test_formset_error_class(self):
"""Formset's forms use the formset's error_class."""
class CustomErrorList(ErrorList):
pass
formset = FavoriteDrinksFormSet(error_class=CustomErrorList)
self.assertEqual(formset.forms[0].error_class, CustomErrorList)
def test_formset_calls_forms_is_valid(self):
"""Formsets call is_valid() on each form."""
class AnotherChoice(Choice):
def is_valid(self):
self.is_valid_called = True
return super().is_valid()
AnotherChoiceFormSet = formset_factory(AnotherChoice)
data = {
'choices-TOTAL_FORMS': '1', # number of forms rendered
'choices-INITIAL_FORMS': '0', # number of forms with initial data
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '0', # max number of forms
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
formset = AnotherChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertTrue(formset.is_valid())
self.assertTrue(all(form.is_valid_called for form in formset.forms))
def test_hard_limit_on_instantiated_forms(self):
"""A formset has a hard limit on the number of forms instantiated."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 2
ChoiceFormSet = formset_factory(Choice, max_num=1)
# someone fiddles with the mgmt form data...
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# But we still only instantiate 3 forms
self.assertEqual(len(formset.forms), 3)
# and the formset isn't valid
self.assertFalse(formset.is_valid())
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_increase_hard_limit(self):
"""Can increase the built-in forms limit via a higher max_num."""
# reduce the default limit of 1000 temporarily for testing
_old_DEFAULT_MAX_NUM = formsets.DEFAULT_MAX_NUM
try:
formsets.DEFAULT_MAX_NUM = 3
# for this form, we want a limit of 4
ChoiceFormSet = formset_factory(Choice, max_num=4)
formset = ChoiceFormSet(
{
'choices-TOTAL_FORMS': '4',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0', # min number of forms
'choices-MAX_NUM_FORMS': '4',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
'choices-2-choice': 'Two',
'choices-2-votes': '2',
'choices-3-choice': 'Three',
'choices-3-votes': '3',
},
prefix='choices',
)
# Four forms are instantiated and no exception is raised
self.assertEqual(len(formset.forms), 4)
finally:
formsets.DEFAULT_MAX_NUM = _old_DEFAULT_MAX_NUM
def test_non_form_errors_run_full_clean(self):
"""
If non_form_errors() is called without calling is_valid() first,
it should ensure that full_clean() is called.
"""
class BaseCustomFormSet(BaseFormSet):
def clean(self):
raise ValidationError("This is a non-form error")
ChoiceFormSet = formset_factory(Choice, formset=BaseCustomFormSet)
data = {
'choices-TOTAL_FORMS': '1',
'choices-INITIAL_FORMS': '0',
}
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIsInstance(formset.non_form_errors(), ErrorList)
self.assertEqual(list(formset.non_form_errors()), ['This is a non-form error'])
def test_validate_max_ignores_forms_marked_for_deletion(self):
class CheckForm(Form):
field = IntegerField()
data = {
'check-TOTAL_FORMS': '2',
'check-INITIAL_FORMS': '0',
'check-MAX_NUM_FORMS': '1',
'check-0-field': '200',
'check-0-DELETE': '',
'check-1-field': '50',
'check-1-DELETE': 'on',
}
CheckFormSet = formset_factory(CheckForm, max_num=1, validate_max=True, can_delete=True)
formset = CheckFormSet(data, prefix='check')
self.assertTrue(formset.is_valid())
def test_formset_total_error_count(self):
"""A valid formset should have 0 total errors."""
data = [ # formset_data, expected error count
([('Calexico', '100')], 0),
([('Calexico', '')], 1),
([('', 'invalid')], 2),
([('Calexico', '100'), ('Calexico', '')], 1),
([('Calexico', ''), ('Calexico', '')], 2),
]
for formset_data, expected_error_count in data:
formset = self.make_choiceformset(formset_data)
self.assertEqual(formset.total_error_count(), expected_error_count)
def test_formset_total_error_count_with_non_form_errors(self):
data = {
'choices-TOTAL_FORMS': '2', # the number of forms rendered
'choices-INITIAL_FORMS': '0', # the number of forms with initial data
'choices-MAX_NUM_FORMS': '2', # max number of forms - should be ignored
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice, extra=1, max_num=1, validate_max=True)
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 1)
data['choices-1-votes'] = ''
formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertEqual(formset.total_error_count(), 2)
def test_html_safe(self):
formset = self.make_choiceformset()
self.assertTrue(hasattr(formset, '__html__'))
self.assertEqual(str(formset), formset.__html__())
def test_can_delete_extra_formset_forms(self):
ChoiceFormFormset = formset_factory(form=Choice, can_delete=True, extra=2)
formset = ChoiceFormFormset()
self.assertEqual(len(formset), 2)
self.assertIn('DELETE', formset.forms[0].fields)
self.assertIn('DELETE', formset.forms[1].fields)
def test_disable_delete_extra_formset_forms(self):
ChoiceFormFormset = formset_factory(
form=Choice,
can_delete=True,
can_delete_extra=False,
extra=2,
)
formset = ChoiceFormFormset()
self.assertEqual(len(formset), 2)
self.assertNotIn('DELETE', formset.forms[0].fields)
self.assertNotIn('DELETE', formset.forms[1].fields)
formset = ChoiceFormFormset(initial=[{'choice': 'Zero', 'votes': '1'}])
self.assertEqual(len(formset), 3)
self.assertIn('DELETE', formset.forms[0].fields)
self.assertNotIn('DELETE', formset.forms[1].fields)
self.assertNotIn('DELETE', formset.forms[2].fields)
formset = ChoiceFormFormset(data={
'form-0-choice': 'Zero',
'form-0-votes': '0',
'form-0-DELETE': 'on',
'form-1-choice': 'One',
'form-1-votes': '1',
'form-2-choice': '',
'form-2-votes': '',
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '1',
}, initial=[{'choice': 'Zero', 'votes': '1'}])
self.assertEqual(formset.cleaned_data, [
{'choice': 'Zero', 'votes': 0, 'DELETE': True},
{'choice': 'One', 'votes': 1},
{},
])
self.assertIs(formset._should_delete_form(formset.forms[0]), True)
self.assertIs(formset._should_delete_form(formset.forms[1]), False)
self.assertIs(formset._should_delete_form(formset.forms[2]), False)
class FormsetAsTagTests(SimpleTestCase):
def setUp(self):
data = {
'choices-TOTAL_FORMS': '1',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-MAX_NUM_FORMS': '0',
'choices-0-choice': 'Calexico',
'choices-0-votes': '100',
}
self.formset = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.management_form_html = (
'<input type="hidden" name="choices-TOTAL_FORMS" value="1">'
'<input type="hidden" name="choices-INITIAL_FORMS" value="0">'
'<input type="hidden" name="choices-MIN_NUM_FORMS" value="0">'
'<input type="hidden" name="choices-MAX_NUM_FORMS" value="0">'
)
def test_as_table(self):
self.assertHTMLEqual(
self.formset.as_table(),
self.management_form_html + (
'<tr><th>Choice:</th><td>'
'<input type="text" name="choices-0-choice" value="Calexico"></td></tr>'
'<tr><th>Votes:</th><td>'
'<input type="number" name="choices-0-votes" value="100"></td></tr>'
)
)
def test_as_p(self):
self.assertHTMLEqual(
self.formset.as_p(),
self.management_form_html + (
'<p>Choice: <input type="text" name="choices-0-choice" value="Calexico"></p>'
'<p>Votes: <input type="number" name="choices-0-votes" value="100"></p>'
)
)
def test_as_ul(self):
self.assertHTMLEqual(
self.formset.as_ul(),
self.management_form_html + (
'<li>Choice: <input type="text" name="choices-0-choice" value="Calexico"></li>'
'<li>Votes: <input type="number" name="choices-0-votes" value="100"></li>'
)
)
class ArticleForm(Form):
title = CharField()
pub_date = DateField()
ArticleFormSet = formset_factory(ArticleForm)
class TestIsBoundBehavior(SimpleTestCase):
def test_no_data_error(self):
formset = ArticleFormSet({})
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
[
'ManagementForm data is missing or has been tampered with. '
'Missing fields: form-TOTAL_FORMS, form-INITIAL_FORMS. '
'You may need to file a bug report if the issue persists.',
],
)
self.assertEqual(formset.errors, [])
# Can still render the formset.
self.assertEqual(
str(formset),
'<tr><td colspan="2">'
'<ul class="errorlist nonfield">'
'<li>(Hidden field TOTAL_FORMS) This field is required.</li>'
'<li>(Hidden field INITIAL_FORMS) This field is required.</li>'
'</ul>'
'<input type="hidden" name="form-TOTAL_FORMS" id="id_form-TOTAL_FORMS">'
'<input type="hidden" name="form-INITIAL_FORMS" id="id_form-INITIAL_FORMS">'
'<input type="hidden" name="form-MIN_NUM_FORMS" id="id_form-MIN_NUM_FORMS">'
'<input type="hidden" name="form-MAX_NUM_FORMS" id="id_form-MAX_NUM_FORMS">'
'</td></tr>\n'
)
def test_management_form_invalid_data(self):
data = {
'form-TOTAL_FORMS': 'two',
'form-INITIAL_FORMS': 'one',
}
formset = ArticleFormSet(data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(
formset.non_form_errors(),
[
'ManagementForm data is missing or has been tampered with. '
'Missing fields: form-TOTAL_FORMS, form-INITIAL_FORMS. '
'You may need to file a bug report if the issue persists.',
],
)
self.assertEqual(formset.errors, [])
# Can still render the formset.
self.assertEqual(
str(formset),
'<tr><td colspan="2">'
'<ul class="errorlist nonfield">'
'<li>(Hidden field TOTAL_FORMS) Enter a whole number.</li>'
'<li>(Hidden field INITIAL_FORMS) Enter a whole number.</li>'
'</ul>'
'<input type="hidden" name="form-TOTAL_FORMS" value="two" id="id_form-TOTAL_FORMS">'
'<input type="hidden" name="form-INITIAL_FORMS" value="one" id="id_form-INITIAL_FORMS">'
'<input type="hidden" name="form-MIN_NUM_FORMS" id="id_form-MIN_NUM_FORMS">'
'<input type="hidden" name="form-MAX_NUM_FORMS" id="id_form-MAX_NUM_FORMS">'
'</td></tr>\n',
)
def test_customize_management_form_error(self):
formset = ArticleFormSet({}, error_messages={'missing_management_form': 'customized'})
self.assertIs(formset.is_valid(), False)
self.assertEqual(formset.non_form_errors(), ['customized'])
self.assertEqual(formset.errors, [])
def test_with_management_data_attrs_work_fine(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
}
formset = ArticleFormSet(data)
self.assertEqual(0, formset.initial_form_count())
self.assertEqual(1, formset.total_form_count())
self.assertTrue(formset.is_bound)
self.assertTrue(formset.forms[0].is_bound)
self.assertTrue(formset.is_valid())
self.assertTrue(formset.forms[0].is_valid())
self.assertEqual([{}], formset.cleaned_data)
def test_form_errors_are_caught_by_formset(self):
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
'form-1-title': 'Test',
'form-1-pub_date': '', # <-- this date is missing but required
}
formset = ArticleFormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual([{}, {'pub_date': ['This field is required.']}], formset.errors)
def test_empty_forms_are_unbound(self):
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-0-title': 'Test',
'form-0-pub_date': '1904-06-16',
}
unbound_formset = ArticleFormSet()
bound_formset = ArticleFormSet(data)
empty_forms = [
unbound_formset.empty_form,
bound_formset.empty_form
]
# Empty forms should be unbound
self.assertFalse(empty_forms[0].is_bound)
self.assertFalse(empty_forms[1].is_bound)
# The empty forms should be equal.
self.assertHTMLEqual(empty_forms[0].as_p(), empty_forms[1].as_p())
class TestEmptyFormSet(SimpleTestCase):
def test_empty_formset_is_valid(self):
"""An empty formset still calls clean()"""
class EmptyFsetWontValidate(BaseFormSet):
def clean(self):
raise ValidationError('Clean method called')
EmptyFsetWontValidateFormset = formset_factory(FavoriteDrinkForm, extra=0, formset=EmptyFsetWontValidate)
formset = EmptyFsetWontValidateFormset(
data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '0'},
prefix="form",
)
formset2 = EmptyFsetWontValidateFormset(
data={'form-INITIAL_FORMS': '0', 'form-TOTAL_FORMS': '1', 'form-0-name': 'bah'},
prefix="form",
)
self.assertFalse(formset.is_valid())
self.assertFalse(formset2.is_valid())
def test_empty_formset_media(self):
"""Media is available on empty formset."""
class MediaForm(Form):
class Media:
js = ('some-file.js',)
self.assertIn('some-file.js', str(formset_factory(MediaForm, extra=0)().media))
def test_empty_formset_is_multipart(self):
"""is_multipart() works with an empty formset."""
class FileForm(Form):
file = FileField()
self.assertTrue(formset_factory(FileForm, extra=0)().is_multipart())
class AllValidTests(SimpleTestCase):
def test_valid(self):
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-0-choice': 'Zero',
'choices-0-votes': '0',
'choices-1-choice': 'One',
'choices-1-votes': '1',
}
ChoiceFormSet = formset_factory(Choice)
formset1 = ChoiceFormSet(data, auto_id=False, prefix='choices')
formset2 = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIs(all_valid((formset1, formset2)), True)
expected_errors = [{}, {}]
self.assertEqual(formset1._errors, expected_errors)
self.assertEqual(formset2._errors, expected_errors)
def test_invalid(self):
"""all_valid() validates all forms, even when some are invalid."""
data = {
'choices-TOTAL_FORMS': '2',
'choices-INITIAL_FORMS': '0',
'choices-MIN_NUM_FORMS': '0',
'choices-0-choice': 'Zero',
'choices-0-votes': '',
'choices-1-choice': 'One',
'choices-1-votes': '',
}
ChoiceFormSet = formset_factory(Choice)
formset1 = ChoiceFormSet(data, auto_id=False, prefix='choices')
formset2 = ChoiceFormSet(data, auto_id=False, prefix='choices')
self.assertIs(all_valid((formset1, formset2)), False)
expected_errors = [{'votes': ['This field is required.']}, {'votes': ['This field is required.']}]
self.assertEqual(formset1._errors, expected_errors)
self.assertEqual(formset2._errors, expected_errors)
| |
"""
The sys module provides information about the available functions on the minion
"""
import fnmatch
import logging
import salt.loader
import salt.runner
import salt.state
import salt.utils.args
import salt.utils.doc
import salt.utils.schema
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "sys"
__proxyenabled__ = ["*"]
def __virtual__():
"""
Return as sys
"""
return __virtualname__
def doc(*args):
"""
Return the docstrings for all modules. Optionally, specify a module or a
function to narrow the selection.
The strings are aggregated into a single document on the master for easy
reading.
Multiple modules/functions can be specified.
CLI Example:
.. code-block:: bash
salt '*' sys.doc
salt '*' sys.doc sys
salt '*' sys.doc sys.doc
salt '*' sys.doc network.traceroute user.info
Modules can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.doc 'sys.*'
salt '*' sys.doc 'sys.list_*'
"""
docs = {}
if not args:
for fun in __salt__:
docs[fun] = __salt__[fun].__doc__
return salt.utils.doc.strip_rst(docs)
for module in args:
_use_fnmatch = False
if "*" in module:
target_mod = module
_use_fnmatch = True
elif module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
target_mod = module + "." if not module.endswith(".") else module
else:
target_mod = ""
if _use_fnmatch:
for fun in fnmatch.filter(__salt__, target_mod):
docs[fun] = __salt__[fun].__doc__
else:
for fun in __salt__:
if fun == module or fun.startswith(target_mod):
docs[fun] = __salt__[fun].__doc__
return salt.utils.doc.strip_rst(docs)
def state_doc(*args):
"""
Return the docstrings for all states. Optionally, specify a state or a
function to narrow the selection.
The strings are aggregated into a single document on the master for easy
reading.
Multiple states/functions can be specified.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.state_doc
salt '*' sys.state_doc service
salt '*' sys.state_doc service.running
salt '*' sys.state_doc service.running ipables.append
State names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.state_doc 'service.*' 'iptables.*'
"""
st_ = salt.state.State(__opts__)
docs = {}
if not args:
for fun in st_.states:
state = fun.split(".")[0]
if state not in docs:
if hasattr(st_.states[fun], "__globals__"):
docs[state] = st_.states[fun].__globals__["__doc__"]
docs[fun] = st_.states[fun].__doc__
return salt.utils.doc.strip_rst(docs)
for module in args:
_use_fnmatch = False
if "*" in module:
target_mod = module
_use_fnmatch = True
elif module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
target_mod = module + "." if not module.endswith(".") else module
else:
target_mod = ""
if _use_fnmatch:
for fun in fnmatch.filter(st_.states, target_mod):
state = fun.split(".")[0]
if hasattr(st_.states[fun], "__globals__"):
docs[state] = st_.states[fun].__globals__["__doc__"]
docs[fun] = st_.states[fun].__doc__
else:
for fun in st_.states:
if fun == module or fun.startswith(target_mod):
state = module.split(".")[0]
if state not in docs:
if hasattr(st_.states[fun], "__globals__"):
docs[state] = st_.states[fun].__globals__["__doc__"]
docs[fun] = st_.states[fun].__doc__
return salt.utils.doc.strip_rst(docs)
def runner_doc(*args):
"""
Return the docstrings for all runners. Optionally, specify a runner or a
function to narrow the selection.
The strings are aggregated into a single document on the master for easy
reading.
Multiple runners/functions can be specified.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.runner_doc
salt '*' sys.runner_doc cache
salt '*' sys.runner_doc cache.grains
salt '*' sys.runner_doc cache.grains mine.get
Runner names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.runner_doc 'cache.clear_*'
"""
run_ = salt.runner.Runner(__opts__)
docs = {}
if not args:
for fun in run_.functions:
docs[fun] = run_.functions[fun].__doc__
return salt.utils.doc.strip_rst(docs)
for module in args:
_use_fnmatch = False
if "*" in module:
target_mod = module
_use_fnmatch = True
elif module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
target_mod = module + "." if not module.endswith(".") else module
else:
target_mod = ""
if _use_fnmatch:
for fun in fnmatch.filter(run_.functions, target_mod):
docs[fun] = run_.functions[fun].__doc__
else:
for fun in run_.functions:
if fun == module or fun.startswith(target_mod):
docs[fun] = run_.functions[fun].__doc__
return salt.utils.doc.strip_rst(docs)
def returner_doc(*args):
"""
Return the docstrings for all returners. Optionally, specify a returner or a
function to narrow the selection.
The strings are aggregated into a single document on the master for easy
reading.
Multiple returners/functions can be specified.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.returner_doc
salt '*' sys.returner_doc sqlite3
salt '*' sys.returner_doc sqlite3.get_fun
salt '*' sys.returner_doc sqlite3.get_fun etcd.get_fun
Returner names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.returner_doc 'sqlite3.get_*'
"""
returners_ = salt.loader.returners(__opts__, [])
docs = {}
if not args:
for fun in returners_:
docs[fun] = returners_[fun].__doc__
return salt.utils.doc.strip_rst(docs)
for module in args:
_use_fnmatch = False
if "*" in module:
target_mod = module
_use_fnmatch = True
elif module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
target_mod = module + "." if not module.endswith(".") else module
else:
target_mod = ""
if _use_fnmatch:
for fun in returners_:
if fun == module or fun.startswith(target_mod):
docs[fun] = returners_[fun].__doc__
else:
for fun in returners_.keys():
if fun == module or fun.startswith(target_mod):
docs[fun] = returners_[fun].__doc__
return salt.utils.doc.strip_rst(docs)
def renderer_doc(*args):
"""
Return the docstrings for all renderers. Optionally, specify a renderer or a
function to narrow the selection.
The strings are aggregated into a single document on the master for easy
reading.
Multiple renderers can be specified.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.renderer_doc
salt '*' sys.renderer_doc cheetah
salt '*' sys.renderer_doc jinja json
Renderer names can be specified as globs.
.. code-block:: bash
salt '*' sys.renderer_doc 'c*' 'j*'
"""
renderers_ = salt.loader.render(__opts__, [])
docs = {}
if not args:
for func in renderers_.keys():
docs[func] = renderers_[func].__doc__
return salt.utils.doc.strip_rst(docs)
for module in args:
if "*" in module or "." in module:
for func in fnmatch.filter(renderers_, module):
docs[func] = renderers_[func].__doc__
else:
moduledot = module + "."
for func in renderers_.keys():
if func.startswith(moduledot):
docs[func] = renderers_[func].__doc__
return salt.utils.doc.strip_rst(docs)
def list_functions(*args, **kwargs): # pylint: disable=unused-argument
"""
List the functions for all modules. Optionally, specify a module or modules
from which to list.
CLI Example:
.. code-block:: bash
salt '*' sys.list_functions
salt '*' sys.list_functions sys
salt '*' sys.list_functions sys user
.. versionadded:: 0.12.0
.. code-block:: bash
salt '*' sys.list_functions 'module.specific_function'
Function names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_functions 'sys.list_*'
"""
# ## NOTE: **kwargs is used here to prevent a traceback when garbage
# ## arguments are tacked on to the end.
if not args:
# We're being asked for all functions
return sorted(__salt__)
names = set()
for module in args:
if "*" in module or "." in module:
for func in fnmatch.filter(__salt__, module):
names.add(func)
else:
# "sys" should just match sys without also matching sysctl
moduledot = module + "."
for func in __salt__:
if func.startswith(moduledot):
names.add(func)
return sorted(names)
def list_modules(*args):
"""
List the modules loaded on the minion
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_modules
Module names can be specified as globs.
.. code-block:: bash
salt '*' sys.list_modules 's*'
"""
modules = set()
if not args:
for func in __salt__:
modules.add(func.split(".")[0])
return sorted(modules)
for module in args:
if "*" in module:
for func in fnmatch.filter(__salt__, module):
modules.add(func.split(".")[0])
else:
for func in __salt__:
mod_test = func.split(".")[0]
if mod_test == module:
modules.add(mod_test)
return sorted(modules)
def reload_modules():
"""
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
"""
# This function is actually handled inside the minion.py file, the function
# is caught before it ever gets here. Therefore, the docstring above is
# only for the online docs, and ANY CHANGES made to it must also be made in
# each of the gen_modules() funcs in minion.py.
return True
def argspec(module=""):
"""
Return the argument specification of functions in Salt execution
modules.
CLI Example:
.. code-block:: bash
salt '*' sys.argspec pkg.install
salt '*' sys.argspec sys
salt '*' sys.argspec
Module names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.argspec 'pkg.*'
"""
return salt.utils.args.argspec_report(__salt__, module)
def state_argspec(module=""):
"""
Return the argument specification of functions in Salt state
modules.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.state_argspec pkg.installed
salt '*' sys.state_argspec file
salt '*' sys.state_argspec
State names can be specified as globs.
.. code-block:: bash
salt '*' sys.state_argspec 'pkg.*'
"""
st_ = salt.state.State(__opts__)
return salt.utils.args.argspec_report(st_.states, module)
def returner_argspec(module=""):
"""
Return the argument specification of functions in Salt returner
modules.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.returner_argspec xmpp
salt '*' sys.returner_argspec xmpp smtp
salt '*' sys.returner_argspec
Returner names can be specified as globs.
.. code-block:: bash
salt '*' sys.returner_argspec 'sqlite3.*'
"""
returners_ = salt.loader.returners(__opts__, [])
return salt.utils.args.argspec_report(returners_, module)
def runner_argspec(module=""):
"""
Return the argument specification of functions in Salt runner
modules.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.runner_argspec state
salt '*' sys.runner_argspec http
salt '*' sys.runner_argspec
Runner names can be specified as globs.
.. code-block:: bash
salt '*' sys.runner_argspec 'winrepo.*'
"""
run_ = salt.runner.Runner(__opts__)
return salt.utils.args.argspec_report(run_.functions, module)
def list_state_functions(*args, **kwargs): # pylint: disable=unused-argument
"""
List the functions for all state modules. Optionally, specify a state
module or modules from which to list.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_state_functions
salt '*' sys.list_state_functions file
salt '*' sys.list_state_functions pkg user
State function names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_state_functions 'file.*'
salt '*' sys.list_state_functions 'file.s*'
.. versionadded:: 2016.9
.. code-block:: bash
salt '*' sys.list_state_functions 'module.specific_function'
"""
# NOTE: **kwargs is used here to prevent a traceback when garbage
# arguments are tacked on to the end.
st_ = salt.state.State(__opts__)
if not args:
# We're being asked for all functions
return sorted(st_.states)
names = set()
for module in args:
if "*" in module or "." in module:
for func in fnmatch.filter(st_.states, module):
names.add(func)
else:
# "sys" should just match sys without also matching sysctl
moduledot = module + "."
for func in st_.states:
if func.startswith(moduledot):
names.add(func)
return sorted(names)
def list_state_modules(*args):
"""
List the modules loaded on the minion
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_state_modules
State module names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_state_modules 'mysql_*'
"""
st_ = salt.state.State(__opts__)
modules = set()
if not args:
for func in st_.states:
log.debug("func %s", func)
modules.add(func.split(".")[0])
return sorted(modules)
for module in args:
if "*" in module:
for func in fnmatch.filter(st_.states, module):
modules.add(func.split(".")[0])
else:
for func in st_.states:
mod_test = func.split(".")[0]
if mod_test == module:
modules.add(mod_test)
return sorted(modules)
def list_runners(*args):
"""
List the runners loaded on the minion
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_runners
Runner names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_runners 'm*'
"""
run_ = salt.runner.Runner(__opts__)
runners = set()
if not args:
for func in run_.functions:
runners.add(func.split(".")[0])
return sorted(runners)
for module in args:
if "*" in module:
for func in fnmatch.filter(run_.functions, module):
runners.add(func.split(".")[0])
else:
for func in run_.functions:
mod_test = func.split(".")[0]
if mod_test == module:
runners.add(mod_test)
return sorted(runners)
def list_runner_functions(*args, **kwargs): # pylint: disable=unused-argument
"""
List the functions for all runner modules. Optionally, specify a runner
module or modules from which to list.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_runner_functions
salt '*' sys.list_runner_functions state
salt '*' sys.list_runner_functions state virt
Runner function names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_runner_functions 'state.*' 'virt.*'
"""
# ## NOTE: **kwargs is used here to prevent a traceback when garbage
# ## arguments are tacked on to the end.
run_ = salt.runner.Runner(__opts__)
if not args:
# We're being asked for all functions
return sorted(run_.functions)
names = set()
for module in args:
if "*" in module or "." in module:
for func in fnmatch.filter(run_.functions, module):
names.add(func)
else:
# "sys" should just match sys without also matching sysctl
moduledot = module + "."
for func in run_.functions:
if func.startswith(moduledot):
names.add(func)
return sorted(names)
def list_returners(*args):
"""
List the returners loaded on the minion
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_returners
Returner names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_returners 's*'
"""
returners_ = salt.loader.returners(__opts__, [])
returners = set()
if not args:
for func in returners_.keys():
returners.add(func.split(".")[0])
return sorted(returners)
for module in args:
if "*" in module:
for func in fnmatch.filter(returners_, module):
returners.add(func.split(".")[0])
else:
for func in returners_:
mod_test = func.split(".")[0]
if mod_test == module:
returners.add(mod_test)
return sorted(returners)
def list_returner_functions(*args, **kwargs): # pylint: disable=unused-argument
"""
List the functions for all returner modules. Optionally, specify a returner
module or modules from which to list.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_returner_functions
salt '*' sys.list_returner_functions mysql
salt '*' sys.list_returner_functions mysql etcd
Returner names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_returner_functions 'sqlite3.get_*'
"""
# NOTE: **kwargs is used here to prevent a traceback when garbage
# arguments are tacked on to the end.
returners_ = salt.loader.returners(__opts__, [])
if not args:
# We're being asked for all functions
return sorted(returners_)
names = set()
for module in args:
if "*" in module or "." in module:
for func in fnmatch.filter(returners_, module):
names.add(func)
else:
# "sys" should just match sys without also matching sysctl
moduledot = module + "."
for func in returners_:
if func.startswith(moduledot):
names.add(func)
return sorted(names)
def list_renderers(*args):
"""
List the renderers loaded on the minion
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.list_renderers
Render names can be specified as globs.
.. code-block:: bash
salt '*' sys.list_renderers 'yaml*'
"""
renderers_ = salt.loader.render(__opts__, [])
renderers = set()
if not args:
for rend in renderers_.keys():
renderers.add(rend)
return sorted(renderers)
for module in args:
for rend in fnmatch.filter(renderers_, module):
renderers.add(rend)
return sorted(renderers)
def _argspec_to_schema(mod, spec):
args = spec["args"]
defaults = spec["defaults"] or []
args_req = args[: len(args) - len(defaults)]
args_defaults = list(zip(args[-len(defaults) :], defaults))
types = {
"title": mod,
"description": mod,
}
for i in args_req:
types[i] = salt.utils.schema.OneOfItem(
items=(
salt.utils.schema.BooleanItem(title=i, description=i, required=True),
salt.utils.schema.IntegerItem(title=i, description=i, required=True),
salt.utils.schema.NumberItem(title=i, description=i, required=True),
salt.utils.schema.StringItem(title=i, description=i, required=True),
# S.ArrayItem(title=i, description=i, required=True),
# S.DictItem(title=i, description=i, required=True),
)
)
for i, j in args_defaults:
types[i] = salt.utils.schema.OneOfItem(
items=(
salt.utils.schema.BooleanItem(title=i, description=i, default=j),
salt.utils.schema.IntegerItem(title=i, description=i, default=j),
salt.utils.schema.NumberItem(title=i, description=i, default=j),
salt.utils.schema.StringItem(title=i, description=i, default=j),
# S.ArrayItem(title=i, description=i, default=j),
# S.DictItem(title=i, description=i, default=j),
)
)
return type(mod, (salt.utils.schema.Schema,), types).serialize()
def state_schema(module=""):
"""
Return a JSON Schema for the given state function(s)
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' sys.state_schema
salt '*' sys.state_schema pkg.installed
"""
specs = state_argspec(module)
schemas = []
for state_mod, state_spec in specs.items():
schemas.append(_argspec_to_schema(state_mod, state_spec))
return schemas
| |
#!/usr/bin/env python
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'scipy'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'scipy'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.23.4'):
raise Exception('Building SciPy requires Cython >= 0.23.4')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main import '
'setuptools_main as main; sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception("Cython either isn't installed or it failed.")
except OSError:
raise OSError('Cython needs to be installed')
def process_tempita_pyx(fromfile, tofile):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError:
raise Exception('Building SciPy requires Tempita: '
'pip install --user Tempita')
from_filename = tempita.Template.from_filename
template = from_filename(fromfile, encoding=sys.getdefaultencoding())
pyxcontent = template.substitute()
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.pyx.in')] + '.pyx'
with open(pyxfile, "w") as f:
f.write(pyxcontent)
process_pyx(pyxfile, tofile)
rules = {
# fromext : function
'.pyx': process_pyx,
'.pyx.in': process_tempita_pyx
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
if outhash == "None":
outhash = None
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in sorted(hash_db.items()):
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
if topath:
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
else:
to_hash = None
return (from_hash, to_hash)
def get_pxi_dependencies(fullfrompath):
fullfromdir = os.path.dirname(fullfrompath)
dependencies = []
with open(fullfrompath, 'r') as f:
for line in f:
line = [token.strip('\'\" \n') for token in line.split(' ')]
if line[0] == "include":
dependencies.append(os.path.join(fullfromdir, line[1]))
return dependencies
def process(path, fromfile, tofile, processor_function, hash_db, pxi_hashes):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
file_changed = False
else:
file_changed = True
pxi_changed = False
pxi_dependencies = get_pxi_dependencies(fullfrompath)
for pxi in pxi_dependencies:
pxi_hash = get_hash(pxi, None)
if pxi_hash == hash_db.get(normpath(pxi), None):
continue
else:
pxi_hashes[normpath(pxi)] = pxi_hash
pxi_changed = True
if not file_changed and not pxi_changed:
print('%s has not changed' % fullfrompath)
return
orig_cwd = os.getcwd()
try:
os.chdir(path)
print('Processing %s' % fullfrompath)
processor_function(fromfile, tofile)
finally:
os.chdir(orig_cwd)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
# Keep changed .pxi hashes in a separate dict until the end
# because if we update hash_db and multiple files include the same
# .pxi file the changes won't be detected.
pxi_hashes = {}
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db, pxi_hashes)
hash_db.update(pxi_hashes)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
| |
import os
import imp
import inspect
import time
import sys
import traceback
import commands
import threading
import json
import pdb
import pprint
from datetime import datetime
from collections import defaultdict
from core.models import *
from django.db.models import F, Q
from django.db import connection
#from openstack.manager import OpenStackManager
from openstack.driver import OpenStackDriver
from util.logger import Logger, logging, logger
#from timeout import timeout
from xos.config import Config, XOS_DIR
from observer.steps import *
from syncstep import SyncStep
from toposort import toposort
from observer.error_mapper import *
from openstack_observer.openstacksyncstep import OpenStackSyncStep
debug_mode = False
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
logger = Logger(level=logging.INFO)
class StepNotReady(Exception):
pass
class NoOpDriver:
def __init__(self):
self.enabled = True
self.dependency_graph = None
STEP_STATUS_WORKING=1
STEP_STATUS_OK=2
STEP_STATUS_KO=3
def invert_graph(g):
ig = {}
for k,v in g.items():
for v0 in v:
try:
ig[v0].append(k)
except:
ig=[k]
return ig
class XOSObserver:
#sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivilege,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
sync_steps = []
def __init__(self):
# The Condition object that gets signalled by Feefie events
self.step_lookup = {}
self.load_sync_step_modules()
self.load_sync_steps()
self.event_cond = threading.Condition()
self.driver_kind = getattr(Config(), "observer_driver", "openstack")
if self.driver_kind=="openstack":
self.driver = OpenStackDriver()
else:
self.driver = NoOpDriver()
def wait_for_event(self, timeout):
self.event_cond.acquire()
self.event_cond.wait(timeout)
self.event_cond.release()
def wake_up(self):
logger.info('Wake up routine called. Event cond %r'%self.event_cond)
self.event_cond.acquire()
self.event_cond.notify()
self.event_cond.release()
def load_sync_step_modules(self, step_dir=None):
if step_dir is None:
if hasattr(Config(), "observer_steps_dir"):
step_dir = Config().observer_steps_dir
else:
step_dir = XOS_DIR + "/observer/steps"
for fn in os.listdir(step_dir):
pathname = os.path.join(step_dir,fn)
if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
module = imp.load_source(fn[:-3],pathname)
for classname in dir(module):
c = getattr(module, classname, None)
# make sure 'c' is a descendent of SyncStep and has a
# provides field (this eliminates the abstract base classes
# since they don't have a provides)
if inspect.isclass(c) and (issubclass(c, SyncStep) or issubclass(c,OpenStackSyncStep)) and hasattr(c,"provides") and (c not in self.sync_steps):
self.sync_steps.append(c)
logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
# print 'loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps])
def load_sync_steps(self):
dep_path = Config().observer_dependency_graph
logger.info('Loading model dependency graph from %s' % dep_path)
try:
# This contains dependencies between records, not sync steps
self.model_dependency_graph = json.loads(open(dep_path).read())
for left,lst in self.model_dependency_graph.items():
new_lst = []
for k in lst:
try:
tup = (k,k.lower())
new_lst.append(tup)
deps = self.model_dependency_graph[k]
except:
self.model_dependency_graph[k] = []
self.model_dependency_graph[left] = new_lst
except Exception,e:
raise e
try:
backend_path = Config().observer_pl_dependency_graph
logger.info('Loading backend dependency graph from %s' % backend_path)
# This contains dependencies between backend records
self.backend_dependency_graph = json.loads(open(backend_path).read())
for k,v in self.backend_dependency_graph.items():
try:
self.model_dependency_graph[k].extend(v)
except KeyError:
self.model_dependency_graphp[k] = v
except Exception,e:
logger.info('Backend dependency graph not loaded')
# We can work without a backend graph
self.backend_dependency_graph = {}
provides_dict = {}
for s in self.sync_steps:
self.step_lookup[s.__name__] = s
for m in s.provides:
try:
provides_dict[m.__name__].append(s.__name__)
except KeyError:
provides_dict[m.__name__]=[s.__name__]
step_graph = {}
for k,v in self.model_dependency_graph.items():
try:
for source in provides_dict[k]:
if (not v):
step_graph[source] = []
for m,_ in v:
try:
for dest in provides_dict[m]:
# no deps, pass
try:
if (dest not in step_graph[source]):
step_graph[source].append(dest)
except:
step_graph[source]=[dest]
except KeyError:
pass
except KeyError:
pass
# no dependencies, pass
self.dependency_graph = step_graph
self.deletion_dependency_graph = invert_graph(step_graph)
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(step_graph)
self.ordered_steps = toposort(self.dependency_graph, map(lambda s:s.__name__,self.sync_steps))
#self.ordered_steps = ['SyncRoles', 'SyncControllerSites', 'SyncControllerSitePrivileges','SyncImages', 'SyncControllerImages','SyncControllerUsers','SyncControllerUserSitePrivileges','SyncControllerSlices', 'SyncControllerSlicePrivileges', 'SyncControllerUserSlicePrivileges', 'SyncControllerNetworks','SyncSlivers']
#self.ordered_steps = ['SyncControllerSites','SyncControllerUsers','SyncControllerSlices','SyncControllerNetworks']
print "Order of steps=",self.ordered_steps
self.load_run_times()
def check_duration(self, step, duration):
try:
if (duration > step.deadline):
logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration))
except AttributeError:
# S doesn't have a deadline
pass
def update_run_time(self, step, deletion):
if (not deletion):
self.last_run_times[step.__name__]=time.time()
else:
self.last_deletion_run_times[step.__name__]=time.time()
def check_schedule(self, step, deletion):
last_run_times = self.last_run_times if not deletion else self.last_deletion_run_times
time_since_last_run = time.time() - last_run_times.get(step.__name__, 0)
try:
if (time_since_last_run < step.requested_interval):
raise StepNotReady
except AttributeError:
logger.info('Step %s does not have requested_interval set'%step.__name__)
raise StepNotReady
def load_run_times(self):
try:
jrun_times = open('/tmp/observer_run_times').read()
self.last_run_times = json.loads(jrun_times)
except:
self.last_run_times={}
for e in self.ordered_steps:
self.last_run_times[e]=0
try:
jrun_times = open('/tmp/observer_deletion_run_times').read()
self.last_deletion_run_times = json.loads(jrun_times)
except:
self.last_deletion_run_times={}
for e in self.ordered_steps:
self.last_deletion_run_times[e]=0
def save_run_times(self):
run_times = json.dumps(self.last_run_times)
open('/tmp/observer_run_times','w').write(run_times)
deletion_run_times = json.dumps(self.last_deletion_run_times)
open('/tmp/observer_deletion_run_times','w').write(deletion_run_times)
def check_class_dependency(self, step, failed_steps):
step.dependenices = []
for obj in step.provides:
lst = self.model_dependency_graph.get(obj.__name__, [])
nlst = map(lambda(a,b):b,lst)
step.dependenices.extend(nlst)
for failed_step in failed_steps:
if (failed_step in step.dependencies):
raise StepNotReady
def sync(self, S, deletion):
try:
step = self.step_lookup[S]
start_time=time.time()
logger.info("Starting to work on step %s" % step.__name__)
dependency_graph = self.dependency_graph if not deletion else self.deletion_dependency_graph
# Wait for step dependencies to be met
try:
deps = self.dependency_graph[S]
has_deps = True
except KeyError:
has_deps = False
go = True
failed_dep = None
if (has_deps):
for d in deps:
if d==step.__name__:
logger.info(" step %s self-wait skipped" % step.__name__)
go = True
continue
cond = self.step_conditions[d]
cond.acquire()
if (self.step_status[d] is STEP_STATUS_WORKING):
logger.info(" step %s wait on dep %s" % (step.__name__, d))
cond.wait()
elif self.step_status[d] == STEP_STATUS_OK:
go = True
else:
go = False
failed_dep = d
cond.release()
if (not go):
break
else:
go = True
if (not go):
print bcolors.FAIL + "Step %r skipped on %r" % (step,failed_dep) + bcolors.ENDC
# SMBAKER: sync_step was not defined here, so I changed
# this from 'sync_step' to 'step'. Verify.
self.failed_steps.append(step)
my_status = STEP_STATUS_KO
else:
sync_step = step(driver=self.driver,error_map=self.error_mapper)
sync_step. __name__= step.__name__
sync_step.dependencies = []
try:
mlist = sync_step.provides
for m in mlist:
lst = self.model_dependency_graph[m.__name__]
nlst = map(lambda(a,b):b,lst)
sync_step.dependencies.extend(nlst)
except KeyError:
pass
sync_step.debug_mode = debug_mode
should_run = False
try:
# Various checks that decide whether
# this step runs or not
self.check_class_dependency(sync_step, self.failed_steps) # dont run Slices if Sites failed
self.check_schedule(sync_step, deletion) # dont run sync_network_routes if time since last run < 1 hour
should_run = True
except StepNotReady:
logger.info('Step not ready: %s'%sync_step.__name__)
self.failed_steps.append(sync_step)
my_status = STEP_STATUS_KO
except Exception,e:
logger.error('%r' % e)
logger.log_exc("sync step failed: %r. Deletion: %r"%(sync_step,deletion))
self.failed_steps.append(sync_step)
my_status = STEP_STATUS_KO
if (should_run):
try:
duration=time.time() - start_time
logger.info('Executing step %s' % sync_step.__name__)
print bcolors.OKBLUE + "Executing step %s" % sync_step.__name__ + bcolors.ENDC
failed_objects = sync_step(failed=list(self.failed_step_objects), deletion=deletion)
self.check_duration(sync_step, duration)
if failed_objects:
self.failed_step_objects.update(failed_objects)
logger.info("Step %r succeeded" % step)
print bcolors.OKGREEN + "Step %r succeeded" % step + bcolors.ENDC
my_status = STEP_STATUS_OK
self.update_run_time(sync_step,deletion)
except Exception,e:
print bcolors.FAIL + "Model step %r failed" % (step) + bcolors.ENDC
logger.error('Model step %r failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' % (step, e))
logger.log_exc(e)
self.failed_steps.append(S)
my_status = STEP_STATUS_KO
else:
logger.info("Step %r succeeded due to non-run" % step)
my_status = STEP_STATUS_OK
try:
my_cond = self.step_conditions[S]
my_cond.acquire()
self.step_status[S]=my_status
my_cond.notify_all()
my_cond.release()
except KeyError,e:
logger.info('Step %r is a leaf' % step)
pass
finally:
connection.close()
def run(self):
if not self.driver.enabled:
return
if (self.driver_kind=="openstack") and (not self.driver.has_openstack):
return
while True:
try:
loop_start = time.time()
error_map_file = getattr(Config(), "error_map_path", XOS_DIR + "/error_map.txt")
self.error_mapper = ErrorMapper(error_map_file)
# Set of whole steps that failed
self.failed_steps = []
# Set of individual objects within steps that failed
self.failed_step_objects = set()
# Set up conditions and step status
# This is needed for steps to run in parallel
# while obeying dependencies.
providers = set()
for v in self.dependency_graph.values():
if (v):
providers.update(v)
self.step_conditions = {}
self.step_status = {}
for p in list(providers):
self.step_conditions[p] = threading.Condition()
self.step_status[p] = STEP_STATUS_WORKING
logger.info('Waiting for event')
tBeforeWait = time.time()
self.wait_for_event(timeout=30)
logger.info('Observer woke up')
# Two passes. One for sync, the other for deletion.
for deletion in [False,True]:
threads = []
logger.info('Deletion=%r...'%deletion)
schedule = self.ordered_steps if not deletion else reversed(self.ordered_steps)
for S in schedule:
thread = threading.Thread(target=self.sync, args=(S, deletion))
logger.info('Deletion=%r...'%deletion)
threads.append(thread)
# Start threads
for t in threads:
t.start()
# Wait for all threads to finish before continuing with the run loop
for t in threads:
t.join()
self.save_run_times()
loop_end = time.time()
open('/tmp/observer_last_run','w').write(json.dumps({'last_run': loop_end, 'last_duration':loop_end - loop_start}))
except Exception, e:
logger.error('Core error. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' % e)
logger.log_exc("Exception in observer run loop")
traceback.print_exc()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import pytz
import hypothesis as h
import hypothesis.strategies as st
import hypothesis.extra.numpy as npst
import hypothesis.extra.pytz as tzst
import numpy as np
import pyarrow as pa
# TODO(kszucs): alphanum_text, surrogate_text
custom_text = st.text(
alphabet=st.characters(
min_codepoint=0x41,
max_codepoint=0x7E
)
)
null_type = st.just(pa.null())
bool_type = st.just(pa.bool_())
binary_type = st.just(pa.binary())
string_type = st.just(pa.string())
large_binary_type = st.just(pa.large_binary())
large_string_type = st.just(pa.large_string())
fixed_size_binary_type = st.builds(
pa.binary,
st.integers(min_value=0, max_value=16)
)
binary_like_types = st.one_of(
binary_type,
string_type,
large_binary_type,
large_string_type,
fixed_size_binary_type
)
signed_integer_types = st.sampled_from([
pa.int8(),
pa.int16(),
pa.int32(),
pa.int64()
])
unsigned_integer_types = st.sampled_from([
pa.uint8(),
pa.uint16(),
pa.uint32(),
pa.uint64()
])
integer_types = st.one_of(signed_integer_types, unsigned_integer_types)
floating_types = st.sampled_from([
pa.float16(),
pa.float32(),
pa.float64()
])
decimal128_type = st.builds(
pa.decimal128,
precision=st.integers(min_value=1, max_value=38),
scale=st.integers(min_value=1, max_value=38)
)
decimal256_type = st.builds(
pa.decimal256,
precision=st.integers(min_value=1, max_value=76),
scale=st.integers(min_value=1, max_value=76)
)
numeric_types = st.one_of(integer_types, floating_types,
decimal128_type, decimal256_type)
date_types = st.sampled_from([
pa.date32(),
pa.date64()
])
time_types = st.sampled_from([
pa.time32('s'),
pa.time32('ms'),
pa.time64('us'),
pa.time64('ns')
])
timestamp_types = st.builds(
pa.timestamp,
unit=st.sampled_from(['s', 'ms', 'us', 'ns']),
tz=tzst.timezones()
)
duration_types = st.builds(
pa.duration,
st.sampled_from(['s', 'ms', 'us', 'ns'])
)
interval_types = st.just(pa.month_day_nano_interval())
temporal_types = st.one_of(
date_types,
time_types,
timestamp_types,
duration_types,
interval_types
)
primitive_types = st.one_of(
null_type,
bool_type,
numeric_types,
temporal_types,
binary_like_types
)
metadata = st.dictionaries(st.text(), st.text())
@st.composite
def fields(draw, type_strategy=primitive_types):
name = draw(custom_text)
typ = draw(type_strategy)
if pa.types.is_null(typ):
nullable = True
else:
nullable = draw(st.booleans())
meta = draw(metadata)
return pa.field(name, type=typ, nullable=nullable, metadata=meta)
def list_types(item_strategy=primitive_types):
return (
st.builds(pa.list_, item_strategy) |
st.builds(pa.large_list, item_strategy) |
st.builds(
pa.list_,
item_strategy,
st.integers(min_value=0, max_value=16)
)
)
@st.composite
def struct_types(draw, item_strategy=primitive_types):
fields_strategy = st.lists(fields(item_strategy))
fields_rendered = draw(fields_strategy)
field_names = [field.name for field in fields_rendered]
# check that field names are unique, see ARROW-9997
h.assume(len(set(field_names)) == len(field_names))
return pa.struct(fields_rendered)
def dictionary_types(key_strategy=None, value_strategy=None):
key_strategy = key_strategy or signed_integer_types
value_strategy = value_strategy or st.one_of(
bool_type,
integer_types,
st.sampled_from([pa.float32(), pa.float64()]),
binary_type,
string_type,
fixed_size_binary_type,
)
return st.builds(pa.dictionary, key_strategy, value_strategy)
@st.composite
def map_types(draw, key_strategy=primitive_types,
item_strategy=primitive_types):
key_type = draw(key_strategy)
h.assume(not pa.types.is_null(key_type))
value_type = draw(item_strategy)
return pa.map_(key_type, value_type)
# union type
# extension type
def schemas(type_strategy=primitive_types, max_fields=None):
children = st.lists(fields(type_strategy), max_size=max_fields)
return st.builds(pa.schema, children)
all_types = st.deferred(
lambda: (
primitive_types |
list_types() |
struct_types() |
dictionary_types() |
map_types() |
list_types(all_types) |
struct_types(all_types)
)
)
all_fields = fields(all_types)
all_schemas = schemas(all_types)
_default_array_sizes = st.integers(min_value=0, max_value=20)
@st.composite
def _pylist(draw, value_type, size, nullable=True):
arr = draw(arrays(value_type, size=size, nullable=False))
return arr.to_pylist()
@st.composite
def _pymap(draw, key_type, value_type, size, nullable=True):
length = draw(size)
keys = draw(_pylist(key_type, size=length, nullable=False))
values = draw(_pylist(value_type, size=length, nullable=nullable))
return list(zip(keys, values))
@st.composite
def arrays(draw, type, size=None, nullable=True):
if isinstance(type, st.SearchStrategy):
ty = draw(type)
elif isinstance(type, pa.DataType):
ty = type
else:
raise TypeError('Type must be a pyarrow DataType')
if isinstance(size, st.SearchStrategy):
size = draw(size)
elif size is None:
size = draw(_default_array_sizes)
elif not isinstance(size, int):
raise TypeError('Size must be an integer')
if pa.types.is_null(ty):
h.assume(nullable)
value = st.none()
elif pa.types.is_boolean(ty):
value = st.booleans()
elif pa.types.is_integer(ty):
values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,)))
return pa.array(values, type=ty)
elif pa.types.is_floating(ty):
values = draw(npst.arrays(ty.to_pandas_dtype(), shape=(size,)))
# Workaround ARROW-4952: no easy way to assert array equality
# in a NaN-tolerant way.
values[np.isnan(values)] = -42.0
return pa.array(values, type=ty)
elif pa.types.is_decimal(ty):
# TODO(kszucs): properly limit the precision
# value = st.decimals(places=type.scale, allow_infinity=False)
h.reject()
elif pa.types.is_time(ty):
value = st.times()
elif pa.types.is_date(ty):
value = st.dates()
elif pa.types.is_timestamp(ty):
min_int64 = -(2**63)
max_int64 = 2**63 - 1
min_datetime = datetime.datetime.fromtimestamp(min_int64 // 10**9)
max_datetime = datetime.datetime.fromtimestamp(max_int64 // 10**9)
try:
offset_hours = int(ty.tz)
tz = pytz.FixedOffset(offset_hours * 60)
except ValueError:
tz = pytz.timezone(ty.tz)
value = st.datetimes(timezones=st.just(tz), min_value=min_datetime,
max_value=max_datetime)
elif pa.types.is_duration(ty):
value = st.timedeltas()
elif pa.types.is_binary(ty) or pa.types.is_large_binary(ty):
value = st.binary()
elif pa.types.is_string(ty) or pa.types.is_large_string(ty):
value = st.text()
elif pa.types.is_fixed_size_binary(ty):
value = st.binary(min_size=ty.byte_width, max_size=ty.byte_width)
elif pa.types.is_list(ty):
value = _pylist(ty.value_type, size=size, nullable=nullable)
elif pa.types.is_large_list(ty):
value = _pylist(ty.value_type, size=size, nullable=nullable)
elif pa.types.is_fixed_size_list(ty):
value = _pylist(ty.value_type, size=ty.list_size, nullable=nullable)
elif pa.types.is_dictionary(ty):
values = _pylist(ty.value_type, size=size, nullable=nullable)
return pa.array(draw(values), type=ty)
elif pa.types.is_map(ty):
value = _pymap(ty.key_type, ty.item_type, size=_default_array_sizes,
nullable=nullable)
elif pa.types.is_struct(ty):
h.assume(len(ty) > 0)
fields, child_arrays = [], []
for field in ty:
fields.append(field)
child_arrays.append(draw(arrays(field.type, size=size)))
return pa.StructArray.from_arrays(child_arrays, fields=fields)
else:
raise NotImplementedError(ty)
if nullable:
value = st.one_of(st.none(), value)
values = st.lists(value, min_size=size, max_size=size)
return pa.array(draw(values), type=ty)
@st.composite
def chunked_arrays(draw, type, min_chunks=0, max_chunks=None, chunk_size=None):
if isinstance(type, st.SearchStrategy):
type = draw(type)
# TODO(kszucs): remove it, field metadata is not kept
h.assume(not pa.types.is_struct(type))
chunk = arrays(type, size=chunk_size)
chunks = st.lists(chunk, min_size=min_chunks, max_size=max_chunks)
return pa.chunked_array(draw(chunks), type=type)
@st.composite
def record_batches(draw, type, rows=None, max_fields=None):
if isinstance(rows, st.SearchStrategy):
rows = draw(rows)
elif rows is None:
rows = draw(_default_array_sizes)
elif not isinstance(rows, int):
raise TypeError('Rows must be an integer')
schema = draw(schemas(type, max_fields=max_fields))
children = [draw(arrays(field.type, size=rows)) for field in schema]
# TODO(kszucs): the names and schema arguments are not consistent with
# Table.from_array's arguments
return pa.RecordBatch.from_arrays(children, names=schema)
@st.composite
def tables(draw, type, rows=None, max_fields=None):
if isinstance(rows, st.SearchStrategy):
rows = draw(rows)
elif rows is None:
rows = draw(_default_array_sizes)
elif not isinstance(rows, int):
raise TypeError('Rows must be an integer')
schema = draw(schemas(type, max_fields=max_fields))
children = [draw(arrays(field.type, size=rows)) for field in schema]
return pa.Table.from_arrays(children, schema=schema)
all_arrays = arrays(all_types)
all_chunked_arrays = chunked_arrays(all_types)
all_record_batches = record_batches(all_types)
all_tables = tables(all_types)
# Define the same rules as above for pandas tests by excluding certain types
# from the generation because of known issues.
pandas_compatible_primitive_types = st.one_of(
null_type,
bool_type,
integer_types,
st.sampled_from([pa.float32(), pa.float64()]),
decimal128_type,
date_types,
time_types,
# Need to exclude timestamp and duration types otherwise hypothesis
# discovers ARROW-10210
# timestamp_types,
# duration_types
interval_types,
binary_type,
string_type,
large_binary_type,
large_string_type,
)
# Need to exclude floating point types otherwise hypothesis discovers
# ARROW-10211
pandas_compatible_dictionary_value_types = st.one_of(
bool_type,
integer_types,
binary_type,
string_type,
fixed_size_binary_type,
)
def pandas_compatible_list_types(
item_strategy=pandas_compatible_primitive_types
):
# Need to exclude fixed size list type otherwise hypothesis discovers
# ARROW-10194
return (
st.builds(pa.list_, item_strategy) |
st.builds(pa.large_list, item_strategy)
)
pandas_compatible_types = st.deferred(
lambda: st.one_of(
pandas_compatible_primitive_types,
pandas_compatible_list_types(pandas_compatible_primitive_types),
struct_types(pandas_compatible_primitive_types),
dictionary_types(
value_strategy=pandas_compatible_dictionary_value_types
),
pandas_compatible_list_types(pandas_compatible_types),
struct_types(pandas_compatible_types)
)
)
| |
#Random Forest Classifier
#Brendan Cordy, 2014
from random import randrange
from random import shuffle
from operator import itemgetter
from math import log
#Datasets------------------------------------------------------------------------------------------------
class DataCSV(object):
def __init__(self, str_data, vrs, cls, ind):
self.str_data = str_data
self.variables = vrs
self.classes = cls
self.indivs = ind
#Input data with classifications given, to be used for training.
@classmethod
def from_training_data(cls, filename):
with open(filename,'r') as input_file:
input_data = [line.rstrip('\n').rstrip('\r').split(',') for line in input_file]
#Ignore empty rows (an extra newline at the end of the file will trigger this).
no_empty_rows = [x for x in input_data if x != ['']]
str_data = no_empty_rows
#Extract variable names from the top row.
variables = str_data[0]
#Convert strings representing values of quantitative variables to floats. The last entry in each
#row will be the class name, so don't attempt to convert that to a float.
indivs = [[float(val) for val in line[:-1]]+[line[-1]] for line in str_data[1:]]
#Extract class names and remove duplicates.
classes = list(set([row[-1] for row in indivs]))
print "Data for " + str(len(indivs)) + " individuals with " + str(len(variables)-1) + " quantitative variables."
print "Classes: " + str(classes)
return cls(str_data, variables, classes, indivs)
#Input data without classifications given. A list of possible classes must be provided.
@classmethod
def from_prediction_data(cls, filename, classes):
with open(filename,'r') as input_file:
input_data = [line.rstrip('\n').rstrip('\r').split(',') for line in input_file]
#Ignore empty rows (an extra newline at the end of the file will trigger this).
no_empty_rows = [x for x in input_data if x != ['']]
str_data = no_empty_rows
#Extract variable names from the top row.
variables = str_data[0]
#Convert strings representing values of quantitative variables to floats.
indivs = [[float(val) for val in line] for line in str_data[1:]]
return cls(str_data, variables, classes, indivs)
#Return a random sample (with replacement) of n individuals from the data set.
def to_sample(self):
return Sample(self.indivs, self.variables)
#Return a random sample (with replacement) of n individuals from the data set.
def random_sample(self, n):
sample_data = []
for i in range(0, n):
j = randrange(0, len(self.indivs))
sample_data.append(self.indivs[j])
return Sample(sample_data, self.variables)
#Randomly reorder the individuals in the data set.
def randomize_order(self):
shuffle(self.indivs)
#Partition in the individuals into k more or less equally sized disjoint subsets. Return the ith such subset
#as a new dataset, and its complement as another. Used for cross-validation.
def validation_partition(self, i, k):
first_row = (i - 1) * (len(self.indivs) / k)
#If this is the last fold (last subset), make sure to include all the data.
if i == k:
last_row = len(self.indivs)
else:
last_row = (i) * (len(self.indivs) / k)
valid_data = DataCSV(
self.str_data[0] + self.str_data[first_row:last_row],
self.variables,
self.classes,
self.indivs[first_row:last_row]
)
train_data = DataCSV(
self.str_data[0] + self.str_data[0:first_row] + self.str_data[last_row:len(self.str_data)],
self.variables,
self.classes,
self.indivs[0:first_row] + self.indivs[last_row:len(self.indivs)]
)
return train_data, valid_data
#Write the data as a csv file.
def write(self, filename):
lines = []
variables_row = (','.join(self.variables))
lines.append(variables_row)
#Convert all data values to strings and build list of lines.
for indiv in self.indivs:
for index,value in enumerate(indiv):
indiv[index] = str(value)
indiv_row = (','.join(indiv))
lines.append(indiv_row)
with open(filename, 'w') as out_file:
out_file.write('\n'.join(lines))
#Samples-------------------------------------------------------------------------------------------------
class Sample(object):
#Collect classes present in the sample by extracting each individual's class, then removing duplicates.
def __init__(self, sample_indivs, variables):
self.indivs = sample_indivs
self.variables = variables
self.classes = list(set([x[-1] for x in sample_indivs]))
#Calculate the split with the largest information gain, and return the two samples obtained by splitting.
def find_best_split(self):
best_var, best_index_all, best_info_gain_all = 0, 0, 0
#Loop over all non-class variables (columns) in the data set, to find the best one to split on.
for column in range(0, len(self.variables) - 1):
#Sort the sample by that variable
self.indivs.sort(key=itemgetter(column))
best_split_index, best_info_gain = 0, 0
#Evaluate every possible splitting index.
for i in range(1, len(self.indivs)):
info_gain = self.eval_split(i)
if info_gain > best_info_gain:
best_split_index, best_info_gain = i, info_gain
#If this variable's best split is the best of all splits yet observed, keep it.
if best_info_gain > best_info_gain_all:
best_var, best_index_all, bext_info_gain_all = column, best_split_index, best_info_gain
#Return the best variable and the value to split on, as well as the two halves after the split. Note
#that the split vales are rounded to four decimal places. In data sets with variables that range over
#a very small set of values this should be changed.
self.indivs.sort(key=itemgetter(best_var))
#The split value is the median of the values in the two individuals closest to the split.
split_value = round(0.5 * (self.indivs[best_index_all][best_var] + self.indivs[best_index_all - 1][best_var]), 4)
left_side_sample = Sample(self.indivs[:best_index_all], self.variables)
right_side_sample = Sample(self.indivs[best_index_all:], self.variables)
return best_var, split_value, left_side_sample, right_side_sample
#Evaluate split using information gain: Compute the entropy of that class distribution in the sample,
#and compute the weighted sum of the entropies of the two resulting class distributions on each side of
#the split. A higher difference mean more information gain, and a better split.
def eval_split(self, index):
counts_total, counts_left, counts_right = [], [], []
#Compute proportion of individuals on each side of the split (assume the data is already sorted).
prop_left = index/float(len(self.indivs))
prop_right = (len(self.indivs) - index)/float(len(self.indivs))
#Tally up the counts of classes prior to splitting, and the counts on each side of the split.
for i in range(0, len(self.classes)):
counts_total.append(sum(indiv.count(self.classes[i]) for indiv in self.indivs))
counts_left.append(sum(indiv.count(self.classes[i]) for indiv in self.indivs[:index]))
counts_right.append(sum(indiv.count(self.classes[i]) for indiv in self.indivs[index:]))
#Calculate entropies and return information gain.
return entropy(counts_total) - ((prop_left * entropy(counts_left)) + (prop_right * entropy(counts_right)))
#DecisionTree--------------------------------------------------------------------------------------------
class DecisionTree(object):
def __init__(self, sample, depth):
#If there is only one class in the sample, this node is a leaf labelled with that class.
if len(sample.classes) == 1:
self.leaf = True
self.cls = sample.classes[0]
#If this node is at maximum depth, it's a leaf labelled with the most common class in the sample.
elif depth == 0:
self.leaf = True
self.cls = max(sample.classes, key=sample.classes.count)
#Otherwise, find the best split and create two new subtrees.
else:
self.leaf = False
self.split_var, self.split_val, left_sample, right_sample = sample.find_best_split()
self.left = DecisionTree(left_sample, depth - 1)
self.right = DecisionTree(right_sample, depth - 1)
def classify(self, indiv):
if self.leaf:
return self.cls
else:
if indiv[self.split_var] <= self.split_val:
return self.left.classify(indiv)
else:
return self.right.classify(indiv)
#RandomForest--------------------------------------------------------------------------------------------
class RandomForest(object):
#Construct a forest of decision trees built with random samples of a given size from a given dataset.
def __init__(self, data, num_trees, max_depth, sample_size):
self.data = data
self.classes = data.classes
self.num_trees = num_trees
self.max_depth = max_depth
self.sample_size = sample_size
self.trees = []
for i in range(num_trees):
sample = data.random_sample(sample_size)
self.trees.append(DecisionTree(sample, max_depth))
def classify(self, indiv):
#Create a dictionary to tally up the votes of each of the decision trees.
votes = {x : 0 for x in self.classes}
for tree in self.trees:
votes[tree.classify(indiv)] += 1
#Find the class with the largest number of votes.
winning_class = max(votes.iteritems(), key=itemgetter(1))[0]
return winning_class
def cross_validate(self, k):
self.data.randomize_order()
correct_class_rates = []
#Partition individuals in the dataset into disjoint subsets of size k.
for i in range(1, k + 1):
train_subset, valid_subset = self.data.validation_partition(i,k)
#Create a random forest with the larger training portion of the partition.
subset_forest = RandomForest(train_subset, self.num_trees, self.max_depth, self.sample_size)
#Classify the individuals in the validation part of the partition using the new forest.
count_cor, count_inc = 0, 0
for indiv in valid_subset.indivs:
#Check if the predicted class matches.
if subset_forest.classify(indiv) == indiv[-1]:
count_cor += 1
else:
count_inc += 1
total = count_cor + count_inc
correct_class_rates.append(100 * (float(count_cor) / total))
print "Fold " + str(i) + ": " + str(count_cor) + " of " + str(total) + " individuals classified correctly."
avg_correct_rate = sum(correct_class_rates) / float(len(correct_class_rates))
print "Overall success rate: " + str(round(avg_correct_rate, 1)) + "%"
def write_predictions(self, filename):
prediction_data = DataCSV.from_prediction_data(filename, self.classes)
#Add the class as a new variable in the prediction data.
prediction_data.variables.append(self.data.variables[-1])
#Add the classification for each individual.
for indiv in prediction_data.indivs:
indiv.append(self.classify(indiv))
prediction_data.write(filename)
print "Classified " + str(len(prediction_data.indivs)) + " individuals."
#Top-Level Helper Functions------------------------------------------------------------------------------
#Compute the entropy of a list of frequencies.
def entropy(freq_list):
#Scale the list so its sum is one.
prob_list = [x / float(sum(freq_list)) for x in freq_list]
ent = 0
#Compute each term in the sum which defines information entropy. We need to deal with the indeterminate
#form which arises when one of the frequencies is zero separately to avoid a domain issue.
for x in prob_list:
if x > 0:
ent += -x * log(x,2)
elif x == 0:
ent += float(0)
return ent
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras import initializers
from tensorflow.python.keras import models
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of integer scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return int(fan_in), int(fan_out)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KerasInitializersTest(test.TestCase):
def _runner(self, init, shape, target_mean=None, target_std=None,
target_max=None, target_min=None):
variable = backend.variable(init(shape))
output = backend.get_value(variable)
# Test serialization (assumes deterministic behavior).
config = init.get_config()
reconstructed_init = init.__class__.from_config(config)
variable = backend.variable(reconstructed_init(shape))
output_2 = backend.get_value(variable)
self.assertAllClose(output, output_2, atol=1e-4)
def test_uniform(self):
tensor_shape = (9, 6, 7)
with self.cached_session():
self._runner(
initializers.RandomUniformV2(minval=-1, maxval=1, seed=124),
tensor_shape,
target_mean=0.,
target_max=1,
target_min=-1)
def test_normal(self):
tensor_shape = (8, 12, 99)
with self.cached_session():
self._runner(
initializers.RandomNormalV2(mean=0, stddev=1, seed=153),
tensor_shape,
target_mean=0.,
target_std=1)
def test_truncated_normal(self):
tensor_shape = (12, 99, 7)
with self.cached_session():
self._runner(
initializers.TruncatedNormalV2(mean=0, stddev=1, seed=126),
tensor_shape,
target_mean=0.,
target_max=2,
target_min=-2)
def test_constant(self):
tensor_shape = (5, 6, 4)
with self.cached_session():
self._runner(
initializers.ConstantV2(2.),
tensor_shape,
target_mean=2,
target_max=2,
target_min=2)
def test_lecun_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = _compute_fans(tensor_shape)
std = np.sqrt(1. / fan_in)
self._runner(
initializers.LecunUniformV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_glorot_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, fan_out = _compute_fans(tensor_shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._runner(
initializers.GlorotUniformV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_he_uniform(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = _compute_fans(tensor_shape)
std = np.sqrt(2. / fan_in)
self._runner(
initializers.HeUniformV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_lecun_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = _compute_fans(tensor_shape)
std = np.sqrt(1. / fan_in)
self._runner(
initializers.LecunNormalV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_glorot_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, fan_out = _compute_fans(tensor_shape)
std = np.sqrt(2. / (fan_in + fan_out))
self._runner(
initializers.GlorotNormalV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_he_normal(self):
tensor_shape = (5, 6, 4, 2)
with self.cached_session():
fan_in, _ = _compute_fans(tensor_shape)
std = np.sqrt(2. / fan_in)
self._runner(
initializers.HeNormalV2(seed=123),
tensor_shape,
target_mean=0.,
target_std=std)
def test_orthogonal(self):
tensor_shape = (20, 20)
with self.cached_session():
self._runner(
initializers.OrthogonalV2(seed=123), tensor_shape, target_mean=0.)
def test_identity(self):
with self.cached_session():
tensor_shape = (3, 4, 5)
with self.assertRaises(ValueError):
self._runner(
initializers.IdentityV2(),
tensor_shape,
target_mean=1. / tensor_shape[0],
target_max=1.)
tensor_shape = (3, 3)
self._runner(
initializers.IdentityV2(),
tensor_shape,
target_mean=1. / tensor_shape[0],
target_max=1.)
def test_zero(self):
tensor_shape = (4, 5)
with self.cached_session():
self._runner(
initializers.ZerosV2(), tensor_shape, target_mean=0., target_max=0.)
def test_one(self):
tensor_shape = (4, 5)
with self.cached_session():
self._runner(
initializers.OnesV2(), tensor_shape, target_mean=1., target_max=1.)
def test_default_random_uniform(self):
ru = initializers.get('uniform')
self.assertEqual(ru.minval, -0.05)
self.assertEqual(ru.maxval, 0.05)
def test_default_random_normal(self):
rn = initializers.get('normal')
self.assertEqual(rn.mean, 0.0)
self.assertEqual(rn.stddev, 0.05)
def test_default_truncated_normal(self):
tn = initializers.get('truncated_normal')
self.assertEqual(tn.mean, 0.0)
self.assertEqual(tn.stddev, 0.05)
def test_custom_initializer_saving(self):
def my_initializer(shape, dtype=None):
return array_ops.ones(shape, dtype=dtype)
inputs = input_layer.Input((10,))
outputs = core.Dense(1, kernel_initializer=my_initializer)(inputs)
model = models.Model(inputs, outputs)
model2 = model.from_config(
model.get_config(), custom_objects={'my_initializer': my_initializer})
self.assertEqual(model2.layers[1].kernel_initializer, my_initializer)
@testing_utils.run_v2_only
def test_load_external_variance_scaling_v2(self):
external_serialized_json = {
'class_name': 'VarianceScaling',
'config': {
'distribution': 'normal',
'mode': 'fan_avg',
'scale': 1.0,
'seed': None
}
}
initializer = initializers.deserialize(external_serialized_json)
self.assertEqual(initializer.distribution, 'truncated_normal')
def test_partition(self):
with self.cached_session():
partition_enabled_initializers = [
initializers.ZerosV2(),
initializers.OnesV2(),
initializers.RandomUniformV2(),
initializers.RandomNormalV2(),
initializers.TruncatedNormalV2(),
initializers.LecunUniformV2(),
initializers.GlorotUniformV2(),
initializers.HeUniformV2()
]
for initializer in partition_enabled_initializers:
got = initializer(
shape=(4, 2), partition_shape=(2, 2), partition_offset=(0, 0))
self.assertEqual(got.shape, (2, 2))
partition_forbidden_initializers = [
initializers.OrthogonalV2(),
initializers.IdentityV2()
]
for initializer in partition_forbidden_initializers:
with self.assertRaisesRegex(
ValueError,
"initializer doesn't support partition-related arguments"):
initializer(
shape=(4, 2), partition_shape=(2, 2), partition_offset=(0, 0))
if __name__ == '__main__':
test.main()
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse
from machina.core.db.models import get_model
from machina.core.loading import get_class
from machina.test.factories import GroupFactory
from machina.test.factories import PostFactory
from machina.test.factories import UserFactory
from machina.test.factories import create_category_forum
from machina.test.factories import create_forum
from machina.test.factories import create_topic
from machina.test.testcases import BaseClientTestCase
ForumProfile = get_model('forum_member', 'ForumProfile')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
assign_perm = get_class('forum_permission.shortcuts', 'assign_perm')
remove_perm = get_class('forum_permission.shortcuts', 'remove_perm')
class TestUserPostsView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Add some users
self.u1 = UserFactory.create()
self.g1 = GroupFactory.create()
self.u1.groups.add(self.g1)
self.user.groups.add(self.g1)
# Permission handler
self.perm_handler = PermissionHandler()
self.top_level_cat_1 = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat_1)
self.forum_2 = create_forum(parent=self.top_level_cat_1)
self.forum_3 = create_forum(parent=self.top_level_cat_1)
self.topic_1 = create_topic(forum=self.forum_2, poster=self.u1)
self.topic_1_post_1 = PostFactory.create(topic=self.topic_1, poster=self.u1)
self.topic_1_post_2 = PostFactory.create(topic=self.topic_1, poster=self.user)
self.topic_2 = create_topic(forum=self.forum_1, poster=self.user)
self.topic_2_post_1 = PostFactory.create(topic=self.topic_2, poster=self.user)
self.topic_2_post_2 = PostFactory.create(topic=self.topic_2, poster=self.u1)
self.topic_3 = create_topic(forum=self.forum_2, poster=self.u1)
self.topic_3_post_1 = PostFactory.create(topic=self.topic_3, poster=self.u1)
self.topic_4 = create_topic(forum=self.forum_2, poster=self.user)
self.topic_4_post_1 = PostFactory.create(topic=self.topic_4, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.g1, self.top_level_cat_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
assign_perm('can_read_forum', self.g1, self.forum_2)
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_member:user_posts', args=(self.user.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_displays_only_posts_that_can_be_read_by_the_current_user(self):
# Setup
correct_url = reverse('forum_member:user_posts', args=(self.u1.pk, ))
remove_perm('can_read_forum', self.g1, self.forum_1)
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert list(response.context['posts']) == [self.topic_3_post_1, self.topic_1_post_1, ]
class TestForumProfileDetailView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Add some users
self.u1 = UserFactory.create()
self.g1 = GroupFactory.create()
self.u1.groups.add(self.g1)
self.user.groups.add(self.g1)
# Permission handler
self.perm_handler = PermissionHandler()
self.top_level_cat_1 = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat_1)
self.forum_2 = create_forum(parent=self.top_level_cat_1)
self.forum_3 = create_forum(parent=self.top_level_cat_1)
self.topic_1 = create_topic(forum=self.forum_2, poster=self.u1)
self.topic_1_post_1 = PostFactory.create(topic=self.topic_1, poster=self.u1)
self.topic_1_post_2 = PostFactory.create(topic=self.topic_1, poster=self.user)
self.topic_2 = create_topic(forum=self.forum_1, poster=self.user)
self.topic_2_post_1 = PostFactory.create(topic=self.topic_2, poster=self.user)
self.topic_2_post_2 = PostFactory.create(topic=self.topic_2, poster=self.u1)
self.topic_3 = create_topic(forum=self.forum_2, poster=self.u1)
self.topic_3_post_1 = PostFactory.create(topic=self.topic_3, poster=self.u1)
self.topic_4 = create_topic(forum=self.forum_2, poster=self.user)
self.topic_4_post_1 = PostFactory.create(topic=self.topic_4, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.g1, self.top_level_cat_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
assign_perm('can_read_forum', self.g1, self.forum_2)
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_member:profile', kwargs={'pk': self.user.pk})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_includes_the_topics_count_in_the_context(self):
# Setup
correct_url = reverse('forum_member:profile', kwargs={'pk': self.user.pk})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert response.context['topics_count'] == 2
def test_includes_the_recent_posts_of_the_user_in_the_context(self):
# Setup
correct_url = reverse('forum_member:profile', kwargs={'pk': self.user.pk})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert list(response.context['recent_posts']) == [
self.topic_4_post_1,
self.topic_2_post_1,
self.topic_1_post_2,
]
def test_recent_posts_are_determined_using_current_user_permissions(self):
# Setup
self.user.groups.clear()
assign_perm('can_read_forum', self.user, self.top_level_cat_1)
assign_perm('can_read_forum', self.user, self.forum_2)
correct_url = reverse('forum_member:profile', kwargs={'pk': self.u1.pk})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert list(response.context['recent_posts']) == [
self.topic_3_post_1,
self.topic_1_post_1,
]
class TestForumProfileUpdateView(BaseClientTestCase):
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_member:profile_update')
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_cannot_be_accessed_by_unauthenticated_users(self):
# Setup
self.client.logout()
correct_url = reverse('forum_member:profile_update')
# Run
response = self.client.get(correct_url, follow=False)
# Check
assert response.status_code == 302
def test_can_update_forum_profile(self):
# Setup
correct_url = reverse('forum_member:profile_update')
# Run
with open(settings.MEDIA_ROOT + 'attachment.jpg', 'rb') as upload_file:
post_data = {
'signature': '**Test**',
'avatar': SimpleUploadedFile(upload_file.name, upload_file.read()),
}
response = self.client.post(correct_url, post_data, follow=False)
# Check
assert response.status_code == 302
profile = ForumProfile.objects.get(user=self.user)
assert profile.signature.raw == '**Test**'
assert profile.avatar.file is not None
class TestTopicSubscribeView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Add some users
self.u1 = UserFactory.create()
self.g1 = GroupFactory.create()
self.u1.groups.add(self.g1)
self.user.groups.add(self.g1)
# Permission handler
self.perm_handler = PermissionHandler()
self.top_level_cat_1 = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat_1)
self.topic_1 = create_topic(forum=self.forum_1, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.g1, self.top_level_cat_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_member:topic_subscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_can_add_a_topic_to_the_user_subscription_list(self):
# Setup
correct_url = reverse('forum_member:topic_subscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.post(correct_url, follow=False)
# Check
assert response.status_code == 302
assert self.topic_1 in self.user.topic_subscriptions.all()
def test_cannot_be_browsed_by_anonymous_users(self):
# Setup
self.client.logout()
correct_url = reverse('forum_member:topic_subscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=False)
# Check
assert response.status_code == 302
def test_cannot_be_browsed_by_users_that_do_not_have_the_appropriate_permission(self):
# Setup
remove_perm('can_read_forum', self.g1, self.forum_1)
correct_url = reverse('forum_member:topic_subscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
def test_cannot_be_browsed_if_the_user_has_already_subscribed_to_the_topic(self):
# Setup
self.topic_1.subscribers.add(self.user)
correct_url = reverse('forum_member:topic_subscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
class TestTopicUnsubscribeView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Add some users
self.u1 = UserFactory.create()
self.g1 = GroupFactory.create()
self.u1.groups.add(self.g1)
self.user.groups.add(self.g1)
# Permission handler
self.perm_handler = PermissionHandler()
self.top_level_cat_1 = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat_1)
self.topic_1 = create_topic(forum=self.forum_1, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.g1, self.top_level_cat_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
def test_browsing_works(self):
# Setup
self.topic_1.subscribers.add(self.user)
correct_url = reverse('forum_member:topic_unsubscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_can_remove_a_topic_from_the_user_subscription_list(self):
# Setup
self.topic_1.subscribers.add(self.user)
correct_url = reverse('forum_member:topic_unsubscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.post(correct_url, follow=False)
# Check
assert response.status_code == 302
assert not self.user.topic_subscriptions.all()
def test_cannot_be_browsed_by_anonymous_users(self):
# Setup
self.client.logout()
correct_url = reverse('forum_member:topic_unsubscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=False)
# Check
assert response.status_code == 302
def test_cannot_be_browsed_by_users_that_do_not_have_the_appropriate_permission(self):
# Setup
remove_perm('can_read_forum', self.g1, self.forum_1)
correct_url = reverse('forum_member:topic_unsubscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
def test_cannot_be_browsed_if_the_user_has_not_subscribed_to_the_topic(self):
# Setup
correct_url = reverse('forum_member:topic_unsubscribe', args=(self.topic_1.pk, ))
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
class TestTopicSubscribtionListView(BaseClientTestCase):
@pytest.fixture(autouse=True)
def setup(self):
# Add some users
self.u1 = UserFactory.create()
self.g1 = GroupFactory.create()
self.u1.groups.add(self.g1)
self.user.groups.add(self.g1)
# Permission handler
self.perm_handler = PermissionHandler()
self.top_level_cat_1 = create_category_forum()
self.forum_1 = create_forum(parent=self.top_level_cat_1)
self.forum_2 = create_forum(parent=self.top_level_cat_1)
self.forum_3 = create_forum(parent=self.top_level_cat_1)
self.topic_1 = create_topic(forum=self.forum_2, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.u1)
PostFactory.create(topic=self.topic_1, poster=self.user)
self.topic_2 = create_topic(forum=self.forum_1, poster=self.user)
PostFactory.create(topic=self.topic_2, poster=self.user)
PostFactory.create(topic=self.topic_2, poster=self.u1)
self.topic_3 = create_topic(forum=self.forum_2, poster=self.u1)
PostFactory.create(topic=self.topic_3, poster=self.u1)
self.topic_4 = create_topic(forum=self.forum_2, poster=self.user)
PostFactory.create(topic=self.topic_4, poster=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.g1, self.top_level_cat_1)
assign_perm('can_read_forum', self.g1, self.forum_1)
assign_perm('can_read_forum', self.g1, self.forum_2)
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_member:user_subscriptions')
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_cannot_be_browsed_by_anonymous_users(self):
# Setup
correct_url = reverse('forum_member:user_subscriptions')
self.client.logout()
# Run
response = self.client.get(correct_url, follow=False)
# Check
assert response.status_code == 302
def test_displays_only_topics_the_user_is_subscribed_to(self):
# Setup
self.user.topic_subscriptions.add(self.topic_2)
correct_url = reverse('forum_member:user_subscriptions')
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert list(response.context_data['topics']) == [self.topic_2, ]
| |
import asyncio
import pytest
from click.testing import CliRunner
pytest.importorskip("requests")
import os
from multiprocessing import cpu_count
from time import sleep
import requests
from dask.utils import tmpfile
import distributed.cli.dask_worker
from distributed import Client
from distributed.compatibility import LINUX, to_thread
from distributed.deploy.utils import nprocesses_nthreads
from distributed.metrics import time
from distributed.utils import parse_ports, sync
from distributed.utils_test import gen_cluster, popen, requires_ipv6
def test_nanny_worker_ports(loop):
with popen(["dask-scheduler", "--port", "9359", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:9359",
"--host",
"127.0.0.1",
"--worker-port",
"9684",
"--nanny-port",
"5273",
"--no-dashboard",
]
):
with Client("127.0.0.1:9359", loop=loop) as c:
start = time()
while True:
d = sync(c.loop, c.scheduler.identity)
if d["workers"]:
break
else:
assert time() - start < 60
sleep(0.1)
assert (
d["workers"]["tcp://127.0.0.1:9684"]["nanny"]
== "tcp://127.0.0.1:5273"
)
@pytest.mark.slow
def test_nanny_worker_port_range(loop):
with popen(["dask-scheduler", "--port", "9359", "--no-dashboard"]) as sched:
n_workers = 3
worker_port = "9684:9686"
nanny_port = "9688:9690"
with popen(
[
"dask-worker",
"127.0.0.1:9359",
"--nworkers",
f"{n_workers}",
"--host",
"127.0.0.1",
"--worker-port",
worker_port,
"--nanny-port",
nanny_port,
"--no-dashboard",
]
):
with Client("127.0.0.1:9359", loop=loop) as c:
start = time()
while len(c.scheduler_info()["workers"]) < n_workers:
sleep(0.1)
assert time() - start < 60
def get_port(dask_worker):
return dask_worker.port
expected_worker_ports = set(parse_ports(worker_port))
worker_ports = c.run(get_port)
assert set(worker_ports.values()) == expected_worker_ports
expected_nanny_ports = set(parse_ports(nanny_port))
nanny_ports = c.run(get_port, nanny=True)
assert set(nanny_ports.values()) == expected_nanny_ports
def test_nanny_worker_port_range_too_many_workers_raises(loop):
with popen(["dask-scheduler", "--port", "9359", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:9359",
"--nworkers",
"3",
"--host",
"127.0.0.1",
"--worker-port",
"9684:9685",
"--nanny-port",
"9686:9687",
"--no-dashboard",
]
) as worker:
assert any(
b"Could not start" in worker.stderr.readline() for _ in range(100)
)
def test_memory_limit(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:8786",
"--memory-limit",
"2e3MB",
"--no-dashboard",
]
):
with Client("127.0.0.1:8786", loop=loop) as c:
while not c.nthreads():
sleep(0.1)
info = c.scheduler_info()
[d] = info["workers"].values()
assert isinstance(d["memory_limit"], int)
assert d["memory_limit"] == 2e9
def test_no_nanny(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
["dask-worker", "127.0.0.1:8786", "--no-nanny", "--no-dashboard"]
) as worker:
assert any(b"Registered" in worker.stderr.readline() for i in range(15))
@pytest.mark.slow
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@gen_cluster(client=True, nthreads=[])
async def test_no_reconnect(c, s, nanny):
with popen(
[
"dask-worker",
s.address,
"--no-reconnect",
nanny,
"--no-dashboard",
]
) as worker:
# roundtrip works
assert await c.submit(lambda x: x + 1, 10) == 11
(comm,) = s.stream_comms.values()
comm.abort()
# worker terminates as soon as the connection is aborted
await to_thread(worker.communicate, timeout=5)
assert worker.returncode == 0
@pytest.mark.slow
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@gen_cluster(client=True, nthreads=[])
async def test_reconnect(c, s, nanny):
with popen(
[
"dask-worker",
s.address,
"--reconnect",
nanny,
"--no-dashboard",
]
) as worker:
# roundtrip works
await c.submit(lambda x: x + 1, 10) == 11
(comm,) = s.stream_comms.values()
comm.abort()
# roundtrip still works, which means the worker reconnected
assert await c.submit(lambda x: x + 1, 11) == 12
# closing the scheduler cleanly does terminate the worker
await s.close()
await to_thread(worker.communicate, timeout=5)
assert worker.returncode == 0
def test_resources(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
[
"dask-worker",
"tcp://127.0.0.1:8786",
"--no-dashboard",
"--resources",
"A=1 B=2,C=3",
]
):
with Client("127.0.0.1:8786", loop=loop) as c:
while not c.scheduler_info()["workers"]:
sleep(0.1)
info = c.scheduler_info()
worker = list(info["workers"].values())[0]
assert worker["resources"] == {"A": 1, "B": 2, "C": 3}
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
def test_local_directory(loop, nanny):
with tmpfile() as fn:
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:8786",
nanny,
"--no-dashboard",
"--local-directory",
fn,
]
):
with Client("127.0.0.1:8786", loop=loop, timeout=10) as c:
start = time()
while not c.scheduler_info()["workers"]:
sleep(0.1)
assert time() < start + 8
info = c.scheduler_info()
worker = list(info["workers"].values())[0]
assert worker["local_directory"].startswith(fn)
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
def test_scheduler_file(loop, nanny):
with tmpfile() as fn:
with popen(["dask-scheduler", "--no-dashboard", "--scheduler-file", fn]):
with popen(
["dask-worker", "--scheduler-file", fn, nanny, "--no-dashboard"]
):
with Client(scheduler_file=fn, loop=loop) as c:
start = time()
while not c.scheduler_info()["workers"]:
sleep(0.1)
assert time() < start + 10
def test_scheduler_address_env(loop, monkeypatch):
monkeypatch.setenv("DASK_SCHEDULER_ADDRESS", "tcp://127.0.0.1:8786")
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(["dask-worker", "--no-dashboard"]):
with Client(os.environ["DASK_SCHEDULER_ADDRESS"], loop=loop) as c:
start = time()
while not c.scheduler_info()["workers"]:
sleep(0.1)
assert time() < start + 10
def test_nworkers_requires_nanny(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
["dask-worker", "127.0.0.1:8786", "--nworkers=2", "--no-nanny"]
) as worker:
assert any(
b"Failed to launch worker" in worker.stderr.readline()
for i in range(15)
)
def test_nworkers_negative(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(["dask-worker", "127.0.0.1:8786", "--nworkers=-1"]):
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
c.wait_for_workers(cpu_count(), timeout="10 seconds")
def test_nworkers_auto(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(["dask-worker", "127.0.0.1:8786", "--nworkers=auto"]):
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
procs, _ = nprocesses_nthreads()
c.wait_for_workers(procs, timeout="10 seconds")
def test_nworkers_expands_name(loop):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(["dask-worker", "127.0.0.1:8786", "--nworkers", "2", "--name", "0"]):
with popen(["dask-worker", "127.0.0.1:8786", "--nworkers", "2"]):
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
start = time()
while len(c.scheduler_info()["workers"]) < 4:
sleep(0.2)
assert time() < start + 30
info = c.scheduler_info()
names = [d["name"] for d in info["workers"].values()]
foos = [n for n in names if n.startswith("0-")]
assert len(foos) == 2
assert len(set(names)) == 4
def test_worker_cli_nprocs_renamed_to_nworkers(loop):
n_workers = 2
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
["dask-worker", "127.0.0.1:8786", f"--nprocs={n_workers}"]
) as worker:
assert any(
b"renamed to --nworkers" in worker.stderr.readline() for i in range(15)
)
with Client("tcp://127.0.0.1:8786", loop=loop) as c:
c.wait_for_workers(n_workers, timeout="30 seconds")
def test_worker_cli_nworkers_with_nprocs_is_an_error():
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
["dask-worker", "127.0.0.1:8786", "--nprocs=2", "--nworkers=2"]
) as worker:
assert any(
b"Both --nprocs and --nworkers" in worker.stderr.readline()
for i in range(15)
)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@pytest.mark.parametrize(
"listen_address", ["tcp://0.0.0.0:39837", "tcp://127.0.0.2:39837"]
)
def test_contact_listen_address(loop, nanny, listen_address):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:8786",
nanny,
"--no-dashboard",
"--contact-address",
"tcp://127.0.0.2:39837",
"--listen-address",
listen_address,
]
):
with Client("127.0.0.1:8786") as client:
while not client.nthreads():
sleep(0.1)
info = client.scheduler_info()
assert "tcp://127.0.0.2:39837" in info["workers"]
# roundtrip works
assert client.submit(lambda x: x + 1, 10).result() == 11
def func(dask_worker):
return dask_worker.listener.listen_address
assert client.run(func) == {"tcp://127.0.0.2:39837": listen_address}
@requires_ipv6
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@pytest.mark.parametrize("listen_address", ["tcp://:39838", "tcp://[::1]:39838"])
def test_listen_address_ipv6(loop, nanny, listen_address):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
[
"dask-worker",
"127.0.0.1:8786",
nanny,
"--no-dashboard",
"--listen-address",
listen_address,
]
):
# IPv4 used by default for name of global listener; IPv6 used by default when
# listening only on IPv6.
bind_all = "[::1]" not in listen_address
expected_ip = "127.0.0.1" if bind_all else "[::1]"
expected_name = f"tcp://{expected_ip}:39838"
expected_listen = "tcp://0.0.0.0:39838" if bind_all else listen_address
with Client("127.0.0.1:8786") as client:
while not client.nthreads():
sleep(0.1)
info = client.scheduler_info()
assert expected_name in info["workers"]
assert client.submit(lambda x: x + 1, 10).result() == 11
def func(dask_worker):
return dask_worker.listener.listen_address
assert client.run(func) == {expected_name: expected_listen}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@pytest.mark.parametrize("host", ["127.0.0.2", "0.0.0.0"])
def test_respect_host_listen_address(loop, nanny, host):
with popen(["dask-scheduler", "--no-dashboard"]):
with popen(
["dask-worker", "127.0.0.1:8786", nanny, "--no-dashboard", "--host", host]
) as worker:
with Client("127.0.0.1:8786") as client:
while not client.nthreads():
sleep(0.1)
client.scheduler_info()
# roundtrip works
assert client.submit(lambda x: x + 1, 10).result() == 11
def func(dask_worker):
return dask_worker.listener.listen_address
listen_addresses = client.run(func)
assert all(host in v for v in listen_addresses.values())
def test_dashboard_non_standard_ports(loop):
pytest.importorskip("bokeh")
try:
import jupyter_server_proxy # noqa: F401
proxy_exists = True
except ImportError:
proxy_exists = False
with popen(["dask-scheduler", "--port", "3449"]):
with popen(
[
"dask-worker",
"tcp://127.0.0.1:3449",
"--dashboard-address",
":4833",
"--host",
"127.0.0.1",
]
):
with Client("127.0.0.1:3449", loop=loop) as c:
c.wait_for_workers(1)
pass
response = requests.get("http://127.0.0.1:4833/status")
assert response.ok
redirect_resp = requests.get("http://127.0.0.1:4833/main")
redirect_resp.ok
# TEST PROXYING WORKS
if proxy_exists:
url = "http://127.0.0.1:8787/proxy/4833/127.0.0.1/status"
response = requests.get(url)
assert response.ok
with pytest.raises(Exception):
requests.get("http://localhost:4833/status/")
def test_version_option():
runner = CliRunner()
result = runner.invoke(distributed.cli.dask_worker.main, ["--version"])
assert result.exit_code == 0
@pytest.mark.slow
@pytest.mark.parametrize("no_nanny", [True, False])
def test_worker_timeout(no_nanny):
runner = CliRunner()
args = ["192.168.1.100:7777", "--death-timeout=1"]
if no_nanny:
args.append("--no-nanny")
result = runner.invoke(distributed.cli.dask_worker.main, args)
assert result.exit_code != 0
def test_bokeh_deprecation():
pytest.importorskip("bokeh")
runner = CliRunner()
with pytest.warns(UserWarning, match="dashboard"):
try:
runner.invoke(distributed.cli.dask_worker.main, ["--bokeh"])
except ValueError:
# didn't pass scheduler
pass
with pytest.warns(UserWarning, match="dashboard"):
try:
runner.invoke(distributed.cli.dask_worker.main, ["--no-bokeh"])
except ValueError:
# didn't pass scheduler
pass
@gen_cluster(nthreads=[])
async def test_integer_names(s):
with popen(["dask-worker", s.address, "--name", "123"]):
while not s.workers:
await asyncio.sleep(0.01)
[ws] = s.workers.values()
assert ws.name == 123
@pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
@gen_cluster(client=True, nthreads=[])
async def test_worker_class(c, s, tmp_path, nanny):
# Create module with custom worker class
WORKER_CLASS_TEXT = """
from distributed.worker import Worker
class MyWorker(Worker):
pass
"""
tmpdir = str(tmp_path)
tmpfile = str(tmp_path / "myworker.py")
with open(tmpfile, "w") as f:
f.write(WORKER_CLASS_TEXT)
# Put module on PYTHONPATH
env = os.environ.copy()
if "PYTHONPATH" in env:
env["PYTHONPATH"] = tmpdir + ":" + env["PYTHONPATH"]
else:
env["PYTHONPATH"] = tmpdir
with popen(
[
"dask-worker",
s.address,
nanny,
"--worker-class",
"myworker.MyWorker",
],
env=env,
):
await c.wait_for_workers(1)
def worker_type(dask_worker):
return type(dask_worker).__name__
worker_types = await c.run(worker_type)
assert all(name == "MyWorker" for name in worker_types.values())
@gen_cluster(nthreads=[], client=True)
async def test_preload_config(c, s):
# Ensure dask-worker pulls the preload from the Dask config if
# not specified via a command line option
preload_text = """
def dask_setup(worker):
worker.foo = 'setup'
"""
env = os.environ.copy()
env["DASK_DISTRIBUTED__WORKER__PRELOAD"] = preload_text
with popen(
[
"dask-worker",
s.address,
],
env=env,
):
await c.wait_for_workers(1)
[foo] = (await c.run(lambda dask_worker: dask_worker.foo)).values()
assert foo == "setup"
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for converting TensorFlow control flow op to Relay."""
import pytest
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError:
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
import numpy as np
from tvm import nd
from tvm import relay
from tvm.relay.frontend.tensorflow import from_tensorflow
def check_equal(graph, tf_out, input_map=None):
mod, params = from_tensorflow(graph.as_graph_def(add_shapes=True))
if input_map is not None:
params.update(input_map)
ex = relay.create_executor("vm", mod=mod)
relay_out = ex.evaluate()(**params)
if isinstance(relay_out, nd.NDArray):
np.testing.assert_allclose(tf_out, relay_out.asnumpy())
else:
if not isinstance(tf_out, (list, tuple)):
tf_out = [tf_out]
for x, y in zip(tf_out, [r.asnumpy() for r in relay_out]):
np.testing.assert_allclose(x, y)
def test_vanilla_loop():
graph = tf.Graph()
with graph.as_default():
i = tf.constant(0, name="while/constant")
def c(i):
return tf.less(i, 10)
def b(i):
return tf.add(i, 1)
r = tf.while_loop(c, b, [i])
with tf.Session() as sess:
tf_out = sess.run(r)
check_equal(graph, tf_out)
def test_callnode_loop_vars():
graph = tf.Graph()
with graph.as_default():
i = tf.add(tf.constant(0), 1)
def c(i):
return tf.less(i, 10)
def b(i):
return tf.add(i, 1)
r = tf.while_loop(c, b, [i])
with tf.Session() as sess:
tf_out = sess.run(r)
check_equal(graph, tf_out)
def test_loop_2_vars():
graph = tf.Graph()
with graph.as_default():
i0 = tf.constant(0)
j0 = tf.ones([2, 2])
def c(i, j):
return i < 10
def b(i, j):
return [tf.add(i, 1), j]
i1, i2 = tf.while_loop(c, b, loop_vars=[i0, j0])
i1 += tf.constant(1337)
with tf.Session() as sess:
tf_out = sess.run(i1)
check_equal(graph, tf_out)
def test_loop_3_vars():
graph = tf.Graph()
with graph.as_default():
i0 = tf.constant(1)
j0 = tf.constant(2)
k0 = tf.constant(4)
def c(i, j, k):
return i < 10
def b(i, j, k):
return [i + 1, j * k, k + i]
r = tf.while_loop(c, b, loop_vars=[i0, j0, k0])
with tf.Session() as sess:
tf_out = sess.run(r)
check_equal(graph, tf_out)
def test_loop_conditions():
graph = tf.Graph()
with graph.as_default():
i = tf.constant(1)
j = tf.constant(1)
k = tf.constant(5)
def c(i, j, k):
return tf.equal(
tf.not_equal(tf.less(i + j, 10), tf.less(j * k, 100)), tf.greater_equal(k, i + j)
)
def b(i, j, k):
return [i + j, j + k, k + 1]
r = tf.while_loop(c, b, loop_vars=[i, j, k])
with tf.Session() as sess:
tf_out = sess.run(r)
check_equal(graph, tf_out)
@pytest.mark.skip
def test_loop_bodies():
graph = tf.Graph()
with graph.as_default():
def body(x):
a = tf.constant(np.array([[5, 6], [7, 8]]), dtype=tf.int32)
b = tf.constant(np.array([[1, 2], [3, 4]]), dtype=tf.int32)
c = a + b
return tf.nn.relu(x + c)
def condition(x):
return tf.reduce_sum(x) < 100
x = tf.constant(0, shape=[2, 2])
r = tf.while_loop(condition, body, [x])
with tf.Session() as sess:
tf_out = sess.run(r)
check_equal(graph, tf_out)
def test_nested_loop():
graph = tf.Graph()
with graph.as_default():
def body(x):
def nest_body(c):
return tf.multiply(c, 2)
def cd(c):
return tf.less(c, 10)
c = tf.constant(2)
res = tf.while_loop(cd, nest_body, loop_vars=[c])
return tf.nn.relu(x + res)
def condition(x):
return tf.greater(x, 100)
x = tf.constant(3)
r = tf.while_loop(condition, body, loop_vars=[x])
with tf.Session() as sess:
tf_out = sess.run(r)
check_equal(graph, tf_out)
def test_vanilla_cond():
graph = tf.Graph()
with graph.as_default():
i = tf.constant(1)
j = tf.constant(4)
def f1():
return tf.multiply(1, 17)
def f2():
return tf.add(4, 23)
r = tf.cond(tf.less(i, j), f1, f2)
with tf.Session(graph=graph) as sess:
tf_out = sess.run(r)
check_equal(graph, tf_out)
def test_multiple_cond_vars():
graph = tf.Graph()
with graph.as_default():
x1 = tf.constant(7)
x2 = tf.constant(12)
z = tf.constant(20)
r = tf.cond(tf.less(tf.add(x1, x2), 10), lambda: tf.add(10, 2), lambda: tf.square(5))
with tf.Session() as sess:
tf_out = sess.run(r)
check_equal(graph, tf_out)
def test_cond_fn_parameters():
graph = tf.Graph()
with graph.as_default():
def fn1(x, y):
return tf.multiply(5, 6)
def fn2(x, y):
return tf.add(3, 4)
i = tf.constant(1)
j = tf.constant(2)
k = tf.constant(3)
r = tf.cond(tf.less(i, j), lambda: fn1(i, k), lambda: fn2(j, k))
with tf.Session() as sess:
tf_out = sess.run(r, feed_dict={i: 1, j: 2, k: 3})
check_equal(graph, tf_out)
def test_nested_cond():
graph = tf.Graph()
with graph.as_default():
def fn1(a, b):
def nest_fn1():
return tf.add(1, 2)
def nest_fn2():
return tf.subtract(10, 5)
res = tf.cond(tf.less(1, 2), nest_fn1, nest_fn2)
return tf.multiply(tf.add(87, res), 10)
def fn2(a, b):
return tf.add(10, 10)
x = tf.constant(5)
y = tf.constant(6)
z = tf.constant(7)
pred = tf.less(x, y)
r = tf.cond(pred, lambda: fn1(x, y), lambda: fn2(y, z))
with tf.Session() as sess:
tf_out = sess.run(r, feed_dict={x: 1, y: 2, z: 3, pred: True})
check_equal(graph, tf_out)
def test_loop_in_cond():
graph = tf.Graph()
with graph.as_default():
def fn1(a, b):
i = tf.constant(0)
def cd(i):
return tf.less(i, 10)
def bd(i):
return tf.add(i, 1)
res = tf.while_loop(cd, bd, [i])
return tf.multiply(tf.add(20, res), 10)
def fn2(a, b):
return tf.add(10, 20)
x = tf.constant(7)
y = tf.constant(20)
z = tf.constant(10)
pred = tf.less(x, y)
r = tf.cond(pred, lambda: fn1(x, y), lambda: fn2(y, z))
with tf.Session() as sess:
tf_out = sess.run(r, feed_dict={x: 1, y: 2, z: 3, pred: True})
check_equal(graph, tf_out)
def test_cond_in_loop():
graph = tf.Graph()
with graph.as_default():
def body(x):
x = tf.constant(7)
z = tf.constant(20)
res = tf.cond(tf.less(x, 10), lambda: tf.add(10, 20), lambda: tf.square(10))
return tf.multiply(res, x)
x = tf.constant(21)
def condition(x):
return tf.less(x, 100)
r = tf.while_loop(condition, body, loop_vars=[x])
with tf.Session() as sess:
tf_out = sess.run(r)
check_equal(graph, tf_out)
def test_vanilla_loop_bound():
graph = tf.Graph()
with graph.as_default():
dshape = (2, 10)
dtype = "float32"
dname = "data"
np_data = np.random.uniform(size=dshape).astype(dtype)
data = tf.placeholder(shape=dshape, dtype=dtype, name=dname)
x = tf.slice(data, [1, 4], [1, 4])
outer = x + 5.0
def body(x, y):
res = tf.cond(tf.less(y, 10), lambda: tf.add(10.0, 20.0), lambda: tf.square(10.0))
z = tf.constant(7)
res = tf.cond(tf.less(z, 10), lambda: res * 5, lambda: res + 10)
return tf.multiply(res, x * outer), y + 1
y = tf.constant(0)
def condition(x, y):
return tf.less(y, 20)
r = tf.while_loop(condition, body, loop_vars=[x, y])
with tf.Session() as sess:
tf_out = sess.run(r, feed_dict={"%s:0" % dname: np_data})
check_equal(graph, tf_out, {dname: np_data})
def test_nested_loop_bound():
graph = tf.Graph()
with graph.as_default():
dshape = (2, 10)
dtype = "float32"
dname = "data"
np_data = np.random.uniform(size=dshape).astype(dtype)
data = tf.placeholder(shape=dshape, dtype=dtype, name=dname)
x = tf.slice(data, [1, 4], [1, 4])
outer = x + 5.0
def body(x, y):
res = tf.cond(tf.less(y, 10), lambda: tf.add(10.0, 20.0), lambda: tf.square(10.0))
def nested_body(nx, ny):
return nx + 1, res + 2.0
def nested_cond(nx, ny):
return tf.less(nx, 15)
nx = tf.constant(0)
ny = tf.constant(0.0)
nested_res = tf.while_loop(nested_cond, nested_body, loop_vars=[nx, ny])
res = res + nested_res[1]
z = tf.constant(7)
res = tf.cond(tf.less(z, 10), lambda: res * 5, lambda: res + 10)
return tf.multiply(res, x * outer), y + 1
y = tf.constant(0)
def condition(x, y):
return tf.less(y, 20)
r = tf.while_loop(condition, body, loop_vars=[x, y])
with tf.Session() as sess:
tf_out = sess.run(r, feed_dict={"%s:0" % dname: np_data})
check_equal(graph, tf_out, {dname: np_data})
def test_switch():
graph = tf.Graph()
with graph.as_default():
data_np = np.random.uniform(0, 5, size=(2, 4, 5, 1)).astype("float32")
dname = "data"
flag_name = "flag"
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name=dname)
split = tf.split(data, 2, axis=0)
flag = tf.placeholder(shape={}, dtype=tf.bool, name=flag_name)
output_false, output_true = control_flow_ops.switch(split[1], flag)
with tf.Session() as sess:
tf_out = sess.run(output_false, feed_dict={data.name: data_np, flag.name: False})
check_equal(graph, tf_out, {dname: data_np, flag_name: False})
def test_loop_tuple_input():
graph = tf.Graph()
with graph.as_default():
data_np = np.random.uniform(0, 5, size=(2, 4, 5, 1)).astype("float32")
dname = "data"
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name=dname)
split = tf.split(data, 2, axis=0)
def body(x, y):
return x + 2, y + 1
start = tf.constant(0)
def condition(x, y):
return tf.less(y, 20)
r = tf.while_loop(condition, body, loop_vars=[split[1], start])
with tf.Session() as sess:
tf_out = sess.run(r, feed_dict={data.name: data_np})
check_equal(graph, tf_out, {dname: data_np})
if __name__ == "__main__":
# tf.while_loop
test_vanilla_loop()
test_loop_2_vars()
test_loop_3_vars()
test_loop_conditions()
# TODO(@jroesch): Need to fix memory alloc to support closure
# test_loop_bodies()
test_callnode_loop_vars()
# tf.cond
test_vanilla_cond()
test_multiple_cond_vars()
test_cond_fn_parameters()
# nested cases
test_nested_loop()
test_nested_cond()
test_loop_in_cond()
test_cond_in_loop()
test_vanilla_loop_bound()
test_nested_loop_bound()
test_switch()
test_loop_tuple_input()
| |
#! /usr/bin/env python
#
# example2_mpl.py -- Simple, configurable FITS viewer using a matplotlib
# QtAgg backend for Ginga and embedded in a Qt program.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
Usage:
example2_mpl.py [fits file]
You need Qt4 with python bindings (or pyside) installed to run this example.
"""
from __future__ import print_function
import sys, os
from ginga.qtw.QtHelp import QtGui, QtCore
from ginga import AstroImage
from matplotlib.figure import Figure
from ginga.mplw.ImageViewCanvasMpl import ImageViewCanvas
from ginga.mplw.FigureCanvasQt import FigureCanvas
from ginga.misc import log
from ginga import colors
class FitsViewer(QtGui.QMainWindow):
def __init__(self, logger):
super(FitsViewer, self).__init__()
self.logger = logger
self.drawcolors = colors.get_colors()
fig = Figure()
w = FigureCanvas(fig)
fi = ImageViewCanvas(logger)
fi.enable_autocuts('on')
fi.set_autocut_params('zscale')
fi.enable_autozoom('on')
fi.enable_draw(False)
fi.set_callback('drag-drop', self.drop_file)
fi.set_callback('none-move', self.motion)
fi.set_bg(0.2, 0.2, 0.2)
fi.ui_setActive(True)
self.fitsimage = fi
fi.set_figure(fig)
bd = fi.get_bindings()
bd.enable_all(True)
# canvas that we will draw on
DrawingCanvas = fi.getDrawClass('drawingcanvas')
canvas = DrawingCanvas()
canvas.enable_draw(True)
#canvas.enable_edit(True)
canvas.set_drawtype('rectangle', color='lightblue')
canvas.setSurface(fi)
self.canvas = canvas
# add canvas to view
fi.add(canvas)
canvas.ui_setActive(True)
w.resize(512, 512)
vbox = QtGui.QVBoxLayout()
vbox.setContentsMargins(QtCore.QMargins(2, 2, 2, 2))
vbox.setSpacing(1)
vbox.addWidget(w, stretch=1)
self.readout = QtGui.QLabel("")
vbox.addWidget(self.readout, stretch=0,
alignment=QtCore.Qt.AlignCenter)
hbox = QtGui.QHBoxLayout()
hbox.setContentsMargins(QtCore.QMargins(4, 2, 4, 2))
wdrawtype = QtGui.QComboBox()
self.drawtypes = fi.get_drawtypes()
for name in self.drawtypes:
wdrawtype.addItem(name)
index = self.drawtypes.index('rectangle')
wdrawtype.setCurrentIndex(index)
wdrawtype.activated.connect(self.set_drawparams)
self.wdrawtype = wdrawtype
wdrawcolor = QtGui.QComboBox()
for name in self.drawcolors:
wdrawcolor.addItem(name)
index = self.drawcolors.index('lightblue')
wdrawcolor.setCurrentIndex(index)
wdrawcolor.activated.connect(self.set_drawparams)
self.wdrawcolor = wdrawcolor
wfill = QtGui.QCheckBox("Fill")
wfill.stateChanged.connect(self.set_drawparams)
self.wfill = wfill
walpha = QtGui.QDoubleSpinBox()
walpha.setRange(0.0, 1.0)
walpha.setSingleStep(0.1)
walpha.setValue(1.0)
walpha.valueChanged.connect(self.set_drawparams)
self.walpha = walpha
wclear = QtGui.QPushButton("Clear Canvas")
wclear.clicked.connect(self.clear_canvas)
wopen = QtGui.QPushButton("Open File")
wopen.clicked.connect(self.open_file)
wquit = QtGui.QPushButton("Quit")
wquit.clicked.connect(self.close)
hbox.addStretch(1)
for w in (wopen, wdrawtype, wdrawcolor, wfill,
QtGui.QLabel('Alpha:'), walpha, wclear, wquit):
hbox.addWidget(w, stretch=0)
hw = QtGui.QWidget()
hw.setLayout(hbox)
vbox.addWidget(hw, stretch=0)
vw = QtGui.QWidget()
self.setCentralWidget(vw)
vw.setLayout(vbox)
def set_drawparams(self, kind):
index = self.wdrawtype.currentIndex()
kind = self.drawtypes[index]
index = self.wdrawcolor.currentIndex()
fill = (self.wfill.checkState() != 0)
alpha = self.walpha.value()
params = { 'color': self.drawcolors[index],
'alpha': alpha,
}
if kind in ('circle', 'rectangle', 'polygon', 'triangle',
'righttriangle', 'ellipse', 'square', 'box'):
params['fill'] = fill
params['fillalpha'] = alpha
self.canvas.set_drawtype(kind, **params)
def clear_canvas(self):
self.canvas.deleteAllObjects()
def load_file(self, filepath):
image = AstroImage.AstroImage(logger=self.logger)
image.load_file(filepath)
self.fitsimage.set_image(image)
self.setWindowTitle(filepath)
def open_file(self):
res = QtGui.QFileDialog.getOpenFileName(self, "Open FITS file",
".", "FITS files (*.fits)")
if isinstance(res, tuple):
fileName = res[0]
else:
fileName = str(res)
if len(fileName) != 0:
self.load_file(fileName)
def drop_file(self, fitsimage, paths):
fileName = paths[0]
#print(fileName)
self.load_file(fileName)
def motion(self, fitsimage, button, data_x, data_y):
# Get the value under the data coordinates
try:
#value = fitsimage.get_data(data_x, data_y)
# We report the value across the pixel, even though the coords
# change halfway across the pixel
value = fitsimage.get_data(int(data_x+0.5), int(data_y+0.5))
except Exception:
value = None
fits_x, fits_y = data_x + 1, data_y + 1
# Calculate WCS RA
try:
# NOTE: image function operates on DATA space coords
image = fitsimage.get_image()
if image is None:
# No image loaded
return
ra_txt, dec_txt = image.pixtoradec(fits_x, fits_y,
format='str', coords='fits')
except Exception as e:
self.logger.warning("Bad coordinate conversion: %s" % (
str(e)))
ra_txt = 'BAD WCS'
dec_txt = 'BAD WCS'
text = "RA: %s DEC: %s X: %.2f Y: %.2f Value: %s" % (
ra_txt, dec_txt, fits_x, fits_y, value)
self.readout.setText(text)
def main(options, args):
QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication(args)
app.connect(app, QtCore.SIGNAL('lastWindowClosed()'),
app, QtCore.SLOT('quit()'))
logger = log.get_logger(name="example2", options=options)
w = FitsViewer(logger)
w.resize(524, 540)
w.show()
app.setActiveWindow(w)
w.raise_()
w.activateWindow()
if len(args) > 0:
w.load_file(args[0])
app.exec_()
if __name__ == "__main__":
# Parse command line options with nifty optparse module
from optparse import OptionParser
usage = "usage: %prog [options] cmd [args]"
optprs = OptionParser(usage=usage, version=('%%prog'))
optprs.add_option("--debug", dest="debug", default=False, action="store_true",
help="Enter the pdb debugger on main()")
optprs.add_option("--log", dest="logfile", metavar="FILE",
help="Write logging output to FILE")
optprs.add_option("--loglevel", dest="loglevel", metavar="LEVEL",
type='int', default=None,
help="Set logging level to LEVEL")
optprs.add_option("--stderr", dest="logstderr", default=False,
action="store_true",
help="Copy logging also to stderr")
optprs.add_option("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
(options, args) = optprs.parse_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
# END
| |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,re
from waflib import Utils,Task,Errors,Logs,Node
from waflib.TaskGen import feature,before_method
re_bibunit=re.compile(r'\\(?P<type>putbib)\[(?P<file>[^\[\]]*)\]',re.M)
def bibunitscan(self):
node=self.inputs[0]
nodes=[]
if not node:return nodes
code=node.read()
for match in re_bibunit.finditer(code):
path=match.group('file')
if path:
for k in('','.bib'):
Logs.debug('tex: trying %s%s',path,k)
fi=node.parent.find_resource(path+k)
if fi:
nodes.append(fi)
else:
Logs.debug('tex: could not find %s',path)
Logs.debug('tex: found the following bibunit files: %s',nodes)
return nodes
exts_deps_tex=['','.ltx','.tex','.bib','.pdf','.png','.eps','.ps','.sty']
exts_tex=['.ltx','.tex']
re_tex=re.compile(r'\\(?P<type>usepackage|RequirePackage|include|bibliography([^\[\]{}]*)|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P<file>[^{}]*)}',re.M)
g_bibtex_re=re.compile('bibdata',re.M)
g_glossaries_re=re.compile('\\@newglossary',re.M)
class tex(Task.Task):
bibtex_fun,_=Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}',shell=False)
bibtex_fun.__doc__="""
Execute the program **bibtex**
"""
makeindex_fun,_=Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}',shell=False)
makeindex_fun.__doc__="""
Execute the program **makeindex**
"""
makeglossaries_fun,_=Task.compile_fun('${MAKEGLOSSARIES} ${SRCFILE}',shell=False)
makeglossaries_fun.__doc__="""
Execute the program **makeglossaries**
"""
def exec_command(self,cmd,**kw):
if self.env.PROMPT_LATEX:
kw['stdout']=kw['stderr']=None
return super(tex,self).exec_command(cmd,**kw)
def scan_aux(self,node):
nodes=[node]
re_aux=re.compile(r'\\@input{(?P<file>[^{}]*)}',re.M)
def parse_node(node):
code=node.read()
for match in re_aux.finditer(code):
path=match.group('file')
found=node.parent.find_or_declare(path)
if found and found not in nodes:
Logs.debug('tex: found aux node %r',found)
nodes.append(found)
parse_node(found)
parse_node(node)
return nodes
def scan(self):
node=self.inputs[0]
nodes=[]
names=[]
seen=[]
if not node:return(nodes,names)
def parse_node(node):
if node in seen:
return
seen.append(node)
code=node.read()
global re_tex
for match in re_tex.finditer(code):
multibib=match.group('type')
if multibib and multibib.startswith('bibliography'):
multibib=multibib[len('bibliography'):]
if multibib.startswith('style'):
continue
else:
multibib=None
for path in match.group('file').split(','):
if path:
add_name=True
found=None
for k in exts_deps_tex:
for up in self.texinputs_nodes:
Logs.debug('tex: trying %s%s',path,k)
found=up.find_resource(path+k)
if found:
break
for tsk in self.generator.tasks:
if not found or found in tsk.outputs:
break
else:
nodes.append(found)
add_name=False
for ext in exts_tex:
if found.name.endswith(ext):
parse_node(found)
break
if found and multibib and found.name.endswith('.bib'):
try:
self.multibibs.append(found)
except AttributeError:
self.multibibs=[found]
if add_name:
names.append(path)
parse_node(node)
for x in nodes:
x.parent.get_bld().mkdir()
Logs.debug("tex: found the following : %s and names %s",nodes,names)
return(nodes,names)
def check_status(self,msg,retcode):
if retcode!=0:
raise Errors.WafError('%r command exit status %r'%(msg,retcode))
def info(self,*k,**kw):
try:
info=self.generator.bld.conf.logger.info
except AttributeError:
info=Logs.info
info(*k,**kw)
def bibfile(self):
for aux_node in self.aux_nodes:
try:
ct=aux_node.read()
except EnvironmentError:
Logs.error('Error reading %s: %r',aux_node.abspath())
continue
if g_bibtex_re.findall(ct):
self.info('calling bibtex')
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS':self.texinputs(),'BSTINPUTS':self.texinputs()})
self.env.SRCFILE=aux_node.name[:-4]
self.check_status('error when calling bibtex',self.bibtex_fun())
for node in getattr(self,'multibibs',[]):
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'BIBINPUTS':self.texinputs(),'BSTINPUTS':self.texinputs()})
self.env.SRCFILE=node.name[:-4]
self.check_status('error when calling bibtex',self.bibtex_fun())
def bibunits(self):
try:
bibunits=bibunitscan(self)
except OSError:
Logs.error('error bibunitscan')
else:
if bibunits:
fn=['bu'+str(i)for i in range(1,len(bibunits)+1)]
if fn:
self.info('calling bibtex on bibunits')
for f in fn:
self.env.env={'BIBINPUTS':self.texinputs(),'BSTINPUTS':self.texinputs()}
self.env.SRCFILE=f
self.check_status('error when calling bibtex',self.bibtex_fun())
def makeindex(self):
self.idx_node=self.inputs[0].change_ext('.idx')
try:
idx_path=self.idx_node.abspath()
os.stat(idx_path)
except OSError:
self.info('index file %s absent, not calling makeindex',idx_path)
else:
self.info('calling makeindex')
self.env.SRCFILE=self.idx_node.name
self.env.env={}
self.check_status('error when calling makeindex %s'%idx_path,self.makeindex_fun())
def bibtopic(self):
p=self.inputs[0].parent.get_bld()
if os.path.exists(os.path.join(p.abspath(),'btaux.aux')):
self.aux_nodes+=p.ant_glob('*[0-9].aux')
def makeglossaries(self):
src_file=self.inputs[0].abspath()
base_file=os.path.basename(src_file)
base,_=os.path.splitext(base_file)
for aux_node in self.aux_nodes:
try:
ct=aux_node.read()
except EnvironmentError:
Logs.error('Error reading %s: %r',aux_node.abspath())
continue
if g_glossaries_re.findall(ct):
if not self.env.MAKEGLOSSARIES:
raise Errors.WafError("The program 'makeglossaries' is missing!")
Logs.warn('calling makeglossaries')
self.env.SRCFILE=base
self.check_status('error when calling makeglossaries %s'%base,self.makeglossaries_fun())
return
def texinputs(self):
return os.pathsep.join([k.abspath()for k in self.texinputs_nodes])+os.pathsep
def run(self):
env=self.env
if not env.PROMPT_LATEX:
env.append_value('LATEXFLAGS','-interaction=batchmode')
env.append_value('PDFLATEXFLAGS','-interaction=batchmode')
env.append_value('XELATEXFLAGS','-interaction=batchmode')
self.cwd=self.inputs[0].parent.get_bld()
self.info('first pass on %s',self.__class__.__name__)
cur_hash=self.hash_aux_nodes()
self.call_latex()
self.hash_aux_nodes()
self.bibtopic()
self.bibfile()
self.bibunits()
self.makeindex()
self.makeglossaries()
for i in range(10):
prev_hash=cur_hash
cur_hash=self.hash_aux_nodes()
if not cur_hash:
Logs.error('No aux.h to process')
if cur_hash and cur_hash==prev_hash:
break
self.info('calling %s',self.__class__.__name__)
self.call_latex()
def hash_aux_nodes(self):
try:
self.aux_nodes
except AttributeError:
try:
self.aux_nodes=self.scan_aux(self.inputs[0].change_ext('.aux'))
except IOError:
return None
return Utils.h_list([Utils.h_file(x.abspath())for x in self.aux_nodes])
def call_latex(self):
self.env.env={}
self.env.env.update(os.environ)
self.env.env.update({'TEXINPUTS':self.texinputs()})
self.env.SRCFILE=self.inputs[0].abspath()
self.check_status('error when calling latex',self.texfun())
class latex(tex):
texfun,vars=Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}',shell=False)
class pdflatex(tex):
texfun,vars=Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}',shell=False)
class xelatex(tex):
texfun,vars=Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}',shell=False)
class dvips(Task.Task):
run_str='${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
class dvipdf(Task.Task):
run_str='${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
class pdf2ps(Task.Task):
run_str='${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}'
color='BLUE'
after=['latex','pdflatex','xelatex']
@feature('tex')
@before_method('process_source')
def apply_tex(self):
if not getattr(self,'type',None)in('latex','pdflatex','xelatex'):
self.type='pdflatex'
outs=Utils.to_list(getattr(self,'outs',[]))
try:
self.generator.bld.conf
except AttributeError:
default_prompt=False
else:
default_prompt=True
self.env.PROMPT_LATEX=getattr(self,'prompt',default_prompt)
deps_lst=[]
if getattr(self,'deps',None):
deps=self.to_list(self.deps)
for dep in deps:
if isinstance(dep,str):
n=self.path.find_resource(dep)
if not n:
self.bld.fatal('Could not find %r for %r'%(dep,self))
if not n in deps_lst:
deps_lst.append(n)
elif isinstance(dep,Node.Node):
deps_lst.append(dep)
for node in self.to_nodes(self.source):
if self.type=='latex':
task=self.create_task('latex',node,node.change_ext('.dvi'))
elif self.type=='pdflatex':
task=self.create_task('pdflatex',node,node.change_ext('.pdf'))
elif self.type=='xelatex':
task=self.create_task('xelatex',node,node.change_ext('.pdf'))
task.env=self.env
if deps_lst:
for n in deps_lst:
if not n in task.dep_nodes:
task.dep_nodes.append(n)
if hasattr(self,'texinputs_nodes'):
task.texinputs_nodes=self.texinputs_nodes
else:
task.texinputs_nodes=[node.parent,node.parent.get_bld(),self.path,self.path.get_bld()]
lst=os.environ.get('TEXINPUTS','')
if self.env.TEXINPUTS:
lst+=os.pathsep+self.env.TEXINPUTS
if lst:
lst=lst.split(os.pathsep)
for x in lst:
if x:
if os.path.isabs(x):
p=self.bld.root.find_node(x)
if p:
task.texinputs_nodes.append(p)
else:
Logs.error('Invalid TEXINPUTS folder %s',x)
else:
Logs.error('Cannot resolve relative paths in TEXINPUTS %s',x)
if self.type=='latex':
if'ps'in outs:
tsk=self.create_task('dvips',task.outputs,node.change_ext('.ps'))
tsk.env.env=dict(os.environ)
if'pdf'in outs:
tsk=self.create_task('dvipdf',task.outputs,node.change_ext('.pdf'))
tsk.env.env=dict(os.environ)
elif self.type=='pdflatex':
if'ps'in outs:
self.create_task('pdf2ps',task.outputs,node.change_ext('.ps'))
self.source=[]
def configure(self):
v=self.env
for p in'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps makeglossaries'.split():
try:
self.find_program(p,var=p.upper())
except self.errors.ConfigurationError:
pass
v.DVIPSFLAGS='-Ppdf'
| |
import sublime
import sublime_plugin
import subprocess
import os
from stat import *
sublime_version = 2
if not sublime.version() or int(sublime.version()) > 3000:
sublime_version = 3
if sublime.platform() == 'windows':
import ctypes
from ctypes import c_int32, c_uint32, c_void_p, c_wchar_p, POINTER
class CHOOSECOLOR(ctypes.Structure):
_fields_ = [('lStructSize', c_uint32),
('hwndOwner', c_void_p),
('hInstance', c_void_p),
('rgbResult', c_uint32),
('lpCustColors', POINTER(c_uint32)),
('Flags', c_uint32),
('lCustData', c_void_p),
('lpfnHook', c_void_p),
('lpTemplateName', c_wchar_p)]
class POINT(ctypes.Structure):
_fields_ = [('x', c_int32),
('y', c_int32)]
CustomColorArray = c_uint32 * 16
CC_SOLIDCOLOR = 0x80
CC_RGBINIT = 0x01
CC_FULLOPEN = 0x02
ChooseColorW = ctypes.windll.Comdlg32.ChooseColorW
ChooseColorW.argtypes = [POINTER(CHOOSECOLOR)]
ChooseColorW.restype = c_int32
GetDC = ctypes.windll.User32.GetDC
GetDC.argtypes = [c_void_p]
GetDC.restype = c_void_p
ReleaseDC = ctypes.windll.User32.ReleaseDC
ReleaseDC.argtypes = [c_void_p, c_void_p] # hwnd, hdc
ReleaseDC.restype = c_int32
GetCursorPos = ctypes.windll.User32.GetCursorPos
GetCursorPos.argtypes = [POINTER(POINT)] # POINT
GetCursorPos.restype = c_int32
GetPixel = ctypes.windll.Gdi32.GetPixel
GetPixel.argtypes = [c_void_p, c_int32, c_int32] # hdc, x, y
GetPixel.restype = c_uint32 # colorref
def get_pixel():
hdc = GetDC(0)
pos = POINT()
GetCursorPos(ctypes.byref(pos))
val = GetPixel(hdc, pos.x, pos.y)
ReleaseDC(0, hdc)
return val
def to_custom_color_array(custom_colors):
cc = CustomColorArray()
for i in range(16):
cc[i] = int(custom_colors[i])
return cc
def from_custom_color_array(custom_colors):
cc = [0] * 16
for i in range(16):
cc[i] = str(custom_colors[i])
return cc
def bgr_to_hexstr(bgr, byte_table=list(['{0:02X}'.format(b) for b in range(256)])):
# 0x00BBGGRR
b = byte_table[(bgr >> 16) & 0xff]
g = byte_table[(bgr >> 8) & 0xff]
r = byte_table[(bgr) & 0xff]
return (r + g + b)
def hexstr_to_bgr(hexstr):
if len(hexstr) == 3:
hexstr = hexstr[0] + hexstr[0] + hexstr[1] + hexstr[1] + hexstr[2] + hexstr[2]
r = int(hexstr[0:2], 16)
g = int(hexstr[2:4], 16)
b = int(hexstr[4:6], 16)
return (b << 16) | (g << 8) | r
def win_pick(window, starting_color):
paste = None
start_color = None
if starting_color is not None:
start_color = hexstr_to_bgr(starting_color[1:])
s = sublime.load_settings("ColorPicker.sublime-settings")
custom_colors = s.get("custom_colors", ['0'] * 16)
if len(custom_colors) < 16:
custom_colors = ['0'] * 16
s.set('custom_colors', custom_colors)
cc = CHOOSECOLOR()
ctypes.memset(ctypes.byref(cc), 0, ctypes.sizeof(cc))
cc.lStructSize = ctypes.sizeof(cc)
if sublime_version == 2:
cc.hwndOwner = window.hwnd()
else:
# Temporary fix for Sublime Text 3 - For some reason the hwnd crashes it
# Of course, clicking out of the colour picker and into Sublime will make
# Sublime not respond, but as soon as you exit the colour picker it's ok
cc.hwndOwner = None
cc.Flags = CC_SOLIDCOLOR | CC_FULLOPEN | CC_RGBINIT
cc.rgbResult = c_uint32(start_color) if not paste and start_color else get_pixel()
cc.lpCustColors = to_custom_color_array(custom_colors)
if ChooseColorW(ctypes.byref(cc)):
color = bgr_to_hexstr(cc.rgbResult)
else:
color = None
return color
class ColorPicker(object):
# SVG Colors spec: http://www.w3.org/TR/css3-color/#svg-color
SVGColors = {
"aliceblue": "F0F8FF",
"antiquewhite": "FAEBD7",
"aqua": "00FFFF",
"aquamarine": "7FFFD4",
"azure": "F0FFFF",
"beige": "F5F5DC",
"bisque": "FFE4C4",
"black": "000000",
"blanchedalmond": "FFEBCD",
"blue": "0000FF",
"blueviolet": "8A2BE2",
"brown": "A52A2A",
"burlywood": "DEB887",
"cadetblue": "5F9EA0",
"chartreuse": "7FFF00",
"chocolate": "D2691E",
"coral": "FF7F50",
"cornflowerblue": "6495ED",
"cornsilk": "FFF8DC",
"crimson": "DC143C",
"cyan": "00FFFF",
"darkblue": "00008B",
"darkcyan": "008B8B",
"darkgoldenrod": "B8860B",
"darkgray": "A9A9A9",
"darkgreen": "006400",
"darkgrey": "A9A9A9",
"darkkhaki": "BDB76B",
"darkmagenta": "8B008B",
"darkolivegreen": "556B2F",
"darkorange": "FF8C00",
"darkorchid": "9932CC",
"darkred": "8B0000",
"darksalmon": "E9967A",
"darkseagreen": "8FBC8F",
"darkslateblue": "483D8B",
"darkslategray": "2F4F4F",
"darkslategrey": "2F4F4F",
"darkturquoise": "00CED1",
"darkviolet": "9400D3",
"deeppink": "FF1493",
"deepskyblue": "00BFFF",
"dimgray": "696969",
"dimgrey": "696969",
"dodgerblue": "1E90FF",
"firebrick": "B22222",
"floralwhite": "FFFAF0",
"forestgreen": "228B22",
"fuchsia": "FF00FF",
"gainsboro": "DCDCDC",
"ghostwhite": "F8F8FF",
"gold": "FFD700",
"goldenrod": "DAA520",
"gray": "808080",
"green": "008000",
"greenyellow": "ADFF2F",
"grey": "808080",
"honeydew": "F0FFF0",
"hotpink": "FF69B4",
"indianred": "CD5C5C",
"indigo": "4B0082",
"ivory": "FFFFF0",
"khaki": "F0E68C",
"lavender": "E6E6FA",
"lavenderblush": "FFF0F5",
"lawngreen": "7CFC00",
"lemonchiffon": "FFFACD",
"lightblue": "ADD8E6",
"lightcoral": "F08080",
"lightcyan": "E0FFFF",
"lightgoldenrodyellow": "FAFAD2",
"lightgray": "D3D3D3",
"lightgreen": "90EE90",
"lightgrey": "D3D3D3",
"lightpink": "FFB6C1",
"lightsalmon": "FFA07A",
"lightseagreen": "20B2AA",
"lightskyblue": "87CEFA",
"lightslategray": "778899",
"lightslategrey": "778899",
"lightsteelblue": "B0C4DE",
"lightyellow": "FFFFE0",
"lime": "00FF00",
"limegreen": "32CD32",
"linen": "FAF0E6",
"magenta": "FF00FF",
"maroon": "800000",
"mediumaquamarine": "66CDAA",
"mediumblue": "0000CD",
"mediumorchid": "BA55D3",
"mediumpurple": "9370DB",
"mediumseagreen": "3CB371",
"mediumslateblue": "7B68EE",
"mediumspringgreen": "00FA9A",
"mediumturquoise": "48D1CC",
"mediumvioletred": "C71585",
"midnightblue": "191970",
"mintcream": "F5FFFA",
"mistyrose": "FFE4E1",
"moccasin": "FFE4B5",
"navajowhite": "FFDEAD",
"navy": "000080",
"oldlace": "FDF5E6",
"olive": "808000",
"olivedrab": "6B8E23",
"orange": "FFA500",
"orangered": "FF4500",
"orchid": "DA70D6",
"palegoldenrod": "EEE8AA",
"palegreen": "98FB98",
"paleturquoise": "AFEEEE",
"palevioletred": "DB7093",
"papayawhip": "FFEFD5",
"peachpuff": "FFDAB9",
"peru": "CD853F",
"pink": "FFC0CB",
"plum": "DDA0DD",
"powderblue": "B0E0E6",
"purple": "800080",
"red": "FF0000",
"rosybrown": "BC8F8F",
"royalblue": "4169E1",
"saddlebrown": "8B4513",
"salmon": "FA8072",
"sandybrown": "F4A460",
"seagreen": "2E8B57",
"seashell": "FFF5EE",
"sienna": "A0522D",
"silver": "C0C0C0",
"skyblue": "87CEEB",
"slateblue": "6A5ACD",
"slategray": "708090",
"slategrey": "708090",
"snow": "FFFAFA",
"springgreen": "00FF7F",
"steelblue": "4682B4",
"tan": "D2B48C",
"teal": "008080",
"thistle": "D8BFD8",
"tomato": "FF6347",
"turquoise": "40E0D0",
"violet": "EE82EE",
"wheat": "F5DEB3",
"white": "FFFFFF",
"whitesmoke": "F5F5F5",
"yellow": "FFFF00",
"yellowgreen": "9ACD32"
}
def pick(self, window, starting_color=None):
start_color = None
start_color_osx = None
if starting_color is not None:
svg_color_hex = self.SVGColors.get(starting_color, None)
if svg_color_hex is not None:
starting_color = svg_color_hex
if self.is_valid_hex_color(starting_color):
start_color = "#" + starting_color
start_color_osx = starting_color
if sublime.platform() == 'windows':
color = win_pick(window, start_color)
elif sublime.platform() == 'osx':
args = [os.path.join(sublime.packages_path(), binpath)]
if start_color_osx:
args.append('-startColor')
args.append(start_color_osx)
else:
args = [os.path.join(sublime.packages_path(), binpath)]
if start_color:
args.append(start_color)
if sublime.platform() != 'windows':
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
color = proc.communicate()[0].strip()
if color:
if sublime.platform() != 'windows' or sublime_version == 2:
color = color.decode('utf-8')
return color
def is_valid_hex_color(self, s):
if len(s) not in (3, 6):
return False
try:
return 0 <= int(s, 16) <= 0xffffff
except ValueError:
return False
class ColorPickApiGetColorCommand(sublime_plugin.WindowCommand):
def run(self, settings, default_color=None):
if default_color is not None and default_color.startswith('#'):
default_color = default_color[1:]
color = ColorPicker().pick(self.window, default_color)
s = sublime.load_settings(settings)
s.set('color_pick_return', '#' + color if color else None)
class ColorPickApiIsAvailableCommand(sublime_plugin.ApplicationCommand):
def run(self, settings):
s = sublime.load_settings(settings)
s.set('color_pick_return', True)
class ColorPickCommand(sublime_plugin.TextCommand):
def run(self, edit):
sel = self.view.sel()
selected = None
# get the currently selected color - if any
if len(sel) > 0:
selected = self.view.substr(self.view.word(sel[0])).strip()
if selected.startswith('#'):
selected = selected[1:]
cp = ColorPicker()
color = cp.pick(self.view.window(), selected)
# Determine user preference for case of letters (default upper)
s = sublime.load_settings("ColorPicker.sublime-settings")
upper_case = s.get("color_upper_case", True)
if upper_case:
color = color.upper()
else:
color = color.lower()
# replace all regions with color
for region in sel:
word = self.view.word(region)
# if the selected word is a valid color, replace it
if cp.is_valid_hex_color(self.view.substr(word)):
# include '#' if present
if self.view.substr(word.a - 1) == '#':
word = sublime.Region(word.a - 1, word.b)
# replace
self.view.replace(edit, word, '#' + color)
# otherwise just replace the selected region
else:
self.view.replace(edit, region, '#' + color)
libdir = os.path.join('ColorPicker', 'lib')
if sublime.platform() == 'osx':
binpath = os.path.join(libdir, 'osx_colorpicker')
else:
binpath = os.path.join(libdir, 'linux_colorpicker.py')
def plugin_loaded():
if sublime.platform() == 'osx' or sublime.platform() == 'linux':
binfile = os.path.join(sublime.packages_path(), binpath)
if not os.access(binfile, os.X_OK):
os.chmod(binfile, 0o755)
if sublime_version == 2:
plugin_loaded()
| |
"""HTTP Client using pyCurl."""
from collections import deque
from functools import partial
from io import BytesIO
from time import time
from kombu.asynchronous.hub import READ, WRITE, get_event_loop
from kombu.exceptions import HttpError
from kombu.utils.encoding import bytes_to_str
from .base import BaseClient
try:
import pycurl # noqa
except ImportError: # pragma: no cover
pycurl = Curl = METH_TO_CURL = None # noqa
else:
from pycurl import Curl # noqa
METH_TO_CURL = { # noqa
'GET': pycurl.HTTPGET,
'POST': pycurl.POST,
'PUT': pycurl.UPLOAD,
'HEAD': pycurl.NOBODY,
}
__all__ = ('CurlClient',)
DEFAULT_USER_AGENT = 'Mozilla/5.0 (compatible; pycurl)'
EXTRA_METHODS = frozenset(['DELETE', 'OPTIONS', 'PATCH'])
class CurlClient(BaseClient):
"""Curl HTTP Client."""
Curl = Curl
def __init__(self, hub=None, max_clients=10):
if pycurl is None:
raise ImportError('The curl client requires the pycurl library.')
hub = hub or get_event_loop()
super().__init__(hub)
self.max_clients = max_clients
self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
self._curls = [self.Curl() for i in range(max_clients)]
self._free_list = self._curls[:]
self._pending = deque()
self._fds = {}
self._socket_action = self._multi.socket_action
self._timeout_check_tref = self.hub.call_repeatedly(
1.0, self._timeout_check,
)
# pycurl 7.29.0 workaround
dummy_curl_handle = pycurl.Curl()
self._multi.add_handle(dummy_curl_handle)
self._multi.remove_handle(dummy_curl_handle)
def close(self):
self._timeout_check_tref.cancel()
for _curl in self._curls:
_curl.close()
self._multi.close()
def add_request(self, request):
self._pending.append(request)
self._process_queue()
self._set_timeout(0)
return request
# the next two methods are used for linux/epoll workaround:
# we temporarily remove all curl fds from hub, so curl cannot
# close a fd which is still inside epoll
def _pop_from_hub(self):
for fd in self._fds:
self.hub.remove(fd)
def _push_to_hub(self):
for fd, events in self._fds.items():
if events & READ:
self.hub.add_reader(fd, self.on_readable, fd)
if events & WRITE:
self.hub.add_writer(fd, self.on_writable, fd)
def _handle_socket(self, event, fd, multi, data, _pycurl=pycurl):
if event == _pycurl.POLL_REMOVE:
if fd in self._fds:
self._fds.pop(fd, None)
else:
if event == _pycurl.POLL_IN:
self._fds[fd] = READ
elif event == _pycurl.POLL_OUT:
self._fds[fd] = WRITE
elif event == _pycurl.POLL_INOUT:
self._fds[fd] = READ | WRITE
def _set_timeout(self, msecs):
pass # TODO
def _timeout_check(self, _pycurl=pycurl):
self._pop_from_hub()
try:
while 1:
try:
ret, _ = self._multi.socket_all()
except pycurl.error as exc:
ret = exc.args[0]
if ret != _pycurl.E_CALL_MULTI_PERFORM:
break
finally:
self._push_to_hub()
self._process_pending_requests()
def on_readable(self, fd, _pycurl=pycurl):
return self._on_event(fd, _pycurl.CSELECT_IN)
def on_writable(self, fd, _pycurl=pycurl):
return self._on_event(fd, _pycurl.CSELECT_OUT)
def _on_event(self, fd, event, _pycurl=pycurl):
self._pop_from_hub()
try:
while 1:
try:
ret, _ = self._socket_action(fd, event)
except pycurl.error as exc:
ret = exc.args[0]
if ret != _pycurl.E_CALL_MULTI_PERFORM:
break
finally:
self._push_to_hub()
self._process_pending_requests()
def _process_pending_requests(self):
while 1:
q, succeeded, failed = self._multi.info_read()
for curl in succeeded:
self._process(curl)
for curl, errno, reason in failed:
self._process(curl, errno, reason)
if q == 0:
break
self._process_queue()
def _process_queue(self):
while 1:
started = 0
while self._free_list and self._pending:
started += 1
curl = self._free_list.pop()
request = self._pending.popleft()
headers = self.Headers()
buf = BytesIO()
curl.info = {
'headers': headers,
'buffer': buf,
'request': request,
'curl_start_time': time(),
}
self._setup_request(curl, request, buf, headers)
self._multi.add_handle(curl)
if not started:
break
def _process(self, curl, errno=None, reason=None, _pycurl=pycurl):
info, curl.info = curl.info, None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info['buffer']
if errno:
code = 599
error = HttpError(code, reason)
error.errno = errno
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(_pycurl.HTTP_CODE)
effective_url = curl.getinfo(_pycurl.EFFECTIVE_URL)
buffer.seek(0)
# try:
request = info['request']
request.on_ready(self.Response(
request=request, code=code, headers=info['headers'],
buffer=buffer, effective_url=effective_url, error=error,
))
def _setup_request(self, curl, request, buffer, headers, _pycurl=pycurl):
setopt = curl.setopt
setopt(_pycurl.URL, bytes_to_str(request.url))
# see tornado curl client
request.headers.setdefault('Expect', '')
request.headers.setdefault('Pragma', '')
setopt(
_pycurl.HTTPHEADER,
['{}: {}'.format(*h) for h in request.headers.items()],
)
setopt(
_pycurl.HEADERFUNCTION,
partial(request.on_header or self.on_header, request.headers),
)
setopt(
_pycurl.WRITEFUNCTION, request.on_stream or buffer.write,
)
setopt(
_pycurl.FOLLOWLOCATION, request.follow_redirects,
)
setopt(
_pycurl.USERAGENT,
bytes_to_str(request.user_agent or DEFAULT_USER_AGENT),
)
if request.network_interface:
setopt(_pycurl.INTERFACE, request.network_interface)
setopt(
_pycurl.ENCODING, 'gzip,deflate' if request.use_gzip else 'none',
)
if request.proxy_host:
if not request.proxy_port:
raise ValueError('Request with proxy_host but no proxy_port')
setopt(_pycurl.PROXY, request.proxy_host)
setopt(_pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
setopt(_pycurl.PROXYUSERPWD, '{}:{}'.format(
request.proxy_username, request.proxy_password or ''))
else:
setopt(_pycurl.PROXY, '')
curl.unsetopt(_pycurl.PROXYUSERPWD)
setopt(_pycurl.SSL_VERIFYPEER, 1 if request.validate_cert else 0)
setopt(_pycurl.SSL_VERIFYHOST, 2 if request.validate_cert else 0)
if request.ca_certs is not None:
setopt(_pycurl.CAINFO, request.ca_certs)
setopt(_pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
for meth in METH_TO_CURL.values():
setopt(meth, False)
try:
meth = METH_TO_CURL[request.method]
except KeyError:
curl.setopt(_pycurl.CUSTOMREQUEST, request.method)
else:
curl.unsetopt(_pycurl.CUSTOMREQUEST)
setopt(meth, True)
if request.method in ('POST', 'PUT'):
body = request.body.encode('utf-8') if request.body else bytes()
reqbuffer = BytesIO(body)
setopt(_pycurl.READFUNCTION, reqbuffer.read)
if request.method == 'POST':
def ioctl(cmd):
if cmd == _pycurl.IOCMD_RESTARTREAD:
reqbuffer.seek(0)
setopt(_pycurl.IOCTLFUNCTION, ioctl)
setopt(_pycurl.POSTFIELDSIZE, len(body))
else:
setopt(_pycurl.INFILESIZE, len(body))
elif request.method == 'GET':
assert not request.body
if request.auth_username is not None:
auth_mode = {
'basic': _pycurl.HTTPAUTH_BASIC,
'digest': _pycurl.HTTPAUTH_DIGEST
}[request.auth_mode or 'basic']
setopt(_pycurl.HTTPAUTH, auth_mode)
userpwd = '{}:{}'.format(
request.auth_username, request.auth_password or '',
)
setopt(_pycurl.USERPWD, userpwd)
else:
curl.unsetopt(_pycurl.USERPWD)
if request.client_cert is not None:
setopt(_pycurl.SSLCERT, request.client_cert)
if request.client_key is not None:
setopt(_pycurl.SSLKEY, request.client_key)
if request.on_prepare is not None:
request.on_prepare(curl)
| |
import datetime
from typing import Callable, Optional
import pytz
from necrobot.match.matchgsheetinfo import MatchGSheetInfo
from necrobot.match.matchinfo import MatchInfo
# from necrobot.match.matchracedata import MatchRaceData
from necrobot.race.raceinfo import RaceInfo
from necrobot.user import userlib
from necrobot.user.necrouser import NecroUser
from necrobot.util import console
from necrobot.util.decorators import commits
class Match(object):
def __init__(
self,
commit_fn,
racer_1_id,
racer_2_id,
match_id=None,
suggested_time=None,
r1_confirmed=False,
r2_confirmed=False,
r1_unconfirmed=False,
r2_unconfirmed=False,
match_info=MatchInfo(),
cawmentator_id=None,
channel_id=None,
gsheet_info=None,
finish_time=None,
autogenned=False,
league_tag=None
):
"""Create a `Match` object. There should be no need to call this directly; use `matchutil.make_match` instead,
since this needs to interact with the database.
Parameters
----------
commit_fn: Callable
Function for commiting to the database.
racer_1_id: int
The DB user ID of the first racer.
racer_2_id: int
The DB user ID of the second racer.
match_id: int
The DB unique ID of this match.
suggested_time: datetime.datetime
The time the match is suggested for. If no tzinfo, UTC is assumed.
r1_confirmed: bool
Whether the first racer has confirmed the match time.
r2_confirmed: bool
Whether the second racer has confirmed the match time.
r1_unconfirmed: bool
Whether the first racer wishes to unconfirm the match time.
r2_unconfirmed: bool
Whether the second racer wishes to unconfirm the match time.
match_info: MatchInfo
The type of match.
cawmentator_id: int
The DB unique ID of the cawmentator for this match.
channel_id: int
The discord.ID of the channel for this match, if any.
gsheet_info: MatchGSheetInfo
If this match was created from a GSheet, the worksheet and row it was created from.
finish_time: datetime.datetime
The time the match finished at. If no tzinfo, UTC is assumed.
league_tag: str
The tag for the league this is a match in, or None if no such
"""
self._match_id = match_id # type: int
self._league_tag = league_tag # type: Optional[str]
# Racers in the match
self._racer_1_id = racer_1_id # type: int
self._racer_1 = None # type: Optional[NecroUser]
self._racer_2_id = racer_2_id # type: int
self._racer_2 = None # type: Optional[NecroUser]
# Scheduling data
self._suggested_time = None # type: Optional[datetime.datetime]
self._finish_time = None # type: Optional[datetime.datetime]
self._set_suggested_time(suggested_time)
self._set_finish_time(finish_time)
self._confirmed_by_r1 = r1_confirmed # type: bool
self._confirmed_by_r2 = r2_confirmed # type: bool
self._r1_wishes_to_unconfirm = r1_unconfirmed # type: bool
self._r2_wishes_to_unconfirm = r2_unconfirmed # type: bool
# Format and race data
self._match_info = MatchInfo.copy(match_info) # type: MatchInfo
# Other
self._cawmentator_id = int(cawmentator_id) if cawmentator_id is not None else None # type: int
self._channel_id = channel_id # type: int
self._gsheet_info = gsheet_info # type: MatchGSheetInfo
self._autogenned = autogenned # type: bool
# Commit function
self._commit = commit_fn # type: Callable[[], None]
def __repr__(self):
return 'Match: <ID={mid}>, <ChannelName={cname}'.format(mid=self.match_id, cname=self.matchroom_name)
def __eq__(self, other):
return self.match_id == other.match_id
def __str__(self):
return self.matchroom_name
async def initialize(self):
self._racer_1 = await userlib.get_user(user_id=self._racer_1_id)
self._racer_2 = await userlib.get_user(user_id=self._racer_2_id)
if self._racer_1 is None or self._racer_2 is None:
raise RuntimeError('Attempted to make a Match object with an unregistered racer.')
@property
def format_str(self) -> str:
"""Get a string describing the match format."""
return self.match_info.format_str
@property
def ranked(self):
return self._match_info.ranked
@property
def is_registered(self) -> bool:
return self._match_id is not None
@property
def match_id(self) -> int:
return self._match_id
@property
def racers(self) -> list:
return [self.racer_1, self.racer_2]
@property
def racer_1(self) -> NecroUser:
return self._racer_1
@property
def racer_2(self) -> NecroUser:
return self._racer_2
@property
def suggested_time(self) -> datetime.datetime:
return self._suggested_time
@property
def finish_time(self) -> datetime.datetime:
return self._finish_time
@property
def confirmed_by_r1(self) -> bool:
return self._confirmed_by_r1
@property
def confirmed_by_r2(self) -> bool:
return self._confirmed_by_r2
@property
def r1_wishes_to_unconfirm(self) -> bool:
return self._r1_wishes_to_unconfirm
@property
def r2_wishes_to_unconfirm(self) -> bool:
return self._r2_wishes_to_unconfirm
@property
def has_suggested_time(self) -> bool:
return self.suggested_time is not None
@property
def is_scheduled(self) -> bool:
return self.has_suggested_time and self.confirmed_by_r1 and self.confirmed_by_r2
@property
def is_best_of(self) -> int:
return self._match_info.is_best_of
@property
def number_of_races(self) -> int:
return self._match_info.max_races
@property
def race_info(self) -> RaceInfo:
return self._match_info.race_info
@property
def match_info(self) -> MatchInfo:
return self._match_info
@property
def cawmentator_id(self) -> int:
return self._cawmentator_id
@property
def channel_id(self) -> int:
return self._channel_id
@property
def sheet_id(self) -> int:
return self._gsheet_info.wks_id if self._gsheet_info is not None else None
@property
def sheet_row(self) -> int:
return self._gsheet_info.row if self._gsheet_info is not None else None
@property
def autogenned(self) -> bool:
return self._autogenned
@property
def league_tag(self) -> str:
return self._league_tag
@property
def matchroom_name(self) -> str:
"""Get a name for a channel for this match."""
racer_names = []
for racer in self.racers:
racer_matchroom_name = racer.matchroom_name
if racer_matchroom_name is not None:
racer_names.append(racer_matchroom_name)
if len(racer_names) == 2:
racer_names.sort()
return '{0}-{1}-{2}-{3}'.format(racer_names[0], racer_names[1], self.league_tag, self.match_id)
else:
return self.race_info.raceroom_name
@property
def time_until_match(self) -> datetime.timedelta or None:
return (self.suggested_time - pytz.utc.localize(datetime.datetime.utcnow())) if self.is_scheduled else None
@property
def discord_rel_timestamp(self) -> datetime.timedelta or None:
return f'<t:{int(self.suggested_time.timestamp())}:R>' if self.is_scheduled else None
async def commit(self) -> None:
"""Write the match to the database."""
await self._commit(self)
async def get_cawmentator(self) -> NecroUser or None:
if self._cawmentator_id is None:
return None
return await userlib.get_user(user_id=self._cawmentator_id)
def racing_in_match(self, user) -> bool:
"""
Parameters
----------
user: NecroUser
Returns
-------
bool
True if the user is in the match.
"""
return user == self.racer_1 or user == self.racer_2
# Whether the match has been confirmed by the racer
def is_confirmed_by(self, racer: NecroUser) -> bool:
"""
Parameters
----------
racer: NecroUser
Returns
-------
bool
Whether the Match has been confirmed by racer.
"""
if racer == self.racer_1:
return self._confirmed_by_r1
elif racer == self.racer_2:
return self._confirmed_by_r2
else:
return False
def set_match_id(self, match_id: int) -> None:
"""Sets the match ID. There should be no need to call this yourself."""
self._match_id = match_id
@commits
def set_finish_time(self, time: datetime.datetime) -> None:
"""Sets the finishing time for the match.
Parameters
----------
time: datetime.datetime
The time of match finished. If no tzinfo, UTC is assumed.
"""
self._set_finish_time(time)
@commits
def suggest_time(self, time: datetime.datetime) -> None:
"""Unconfirms all previous times and suggests a new time for the match.
Parameters
----------
time: datetime.datetime
The time to suggest for the match.
"""
self.force_unconfirm()
self._set_suggested_time(time)
@commits
def confirm_time(self, racer: NecroUser) -> None:
"""Confirms the current suggested time by the given racer. (The match is scheduled after
both racers have confirmed.)
Parameters
----------
racer: NecroUser
"""
if racer == self.racer_1:
self._confirmed_by_r1 = True
elif racer == self.racer_2:
self._confirmed_by_r2 = True
@commits
def unconfirm_time(self, racer: NecroUser) -> None:
"""Attempts to unconfirm the current suggested time by the given racer. This deletes the
suggested time if either the match is not already scheduled or the other racer has also
indicated a desire to unconfirm.
Parameters
----------
racer: NecroUser
"""
if racer == self.racer_1:
if (not self._confirmed_by_r2) or self._r2_wishes_to_unconfirm:
self.force_unconfirm()
else:
self._r1_wishes_to_unconfirm = True
elif racer == self.racer_2:
if (not self._confirmed_by_r1) or self._r1_wishes_to_unconfirm:
self.force_unconfirm()
else:
self._r2_wishes_to_unconfirm = True
@commits
def force_confirm(self) -> None:
"""Forces all racers to confirm the suggested time."""
if self._suggested_time is None:
console.warning('Tried to force_confirm a Match with no suggested time.')
return
self._confirmed_by_r1 = True
self._confirmed_by_r2 = True
self._r1_wishes_to_unconfirm = False
self._r2_wishes_to_unconfirm = False
@commits
def force_unconfirm(self) -> None:
"""Unconfirms and deletes any current suggested time."""
self._confirmed_by_r1 = False
self._confirmed_by_r2 = False
self._r1_wishes_to_unconfirm = False
self._r2_wishes_to_unconfirm = False
self._suggested_time = None
@commits
def set_repeat(self, number: int) -> None:
"""Sets the match type to be a repeat-X.
Parameters
----------
number: int
The number of races to be played in the match.
"""
self._match_info.is_best_of = False
self._match_info.max_races = number
@commits
def set_best_of(self, number: int) -> None:
"""Sets the match type to be a best-of-X.
Parameters
----------
number: int
The maximum number of races to be played (the match will be a best-of-number).
"""
self._match_info.is_best_of = True
self._match_info.max_races = number
@commits
def set_race_info(self, race_info: RaceInfo) -> None:
"""Sets the type of races to be done in the match.
Parameters
----------
race_info: RaceInfo
The new match RaceInfo.
"""
self._match_info.race_info = race_info
@commits
def set_cawmentator_id(self, cawmentator_id: int or None) -> None:
"""Sets a cawmentator for the match. Using cawmentator_id = None will remove cawmentary.
Parameters
----------
cawmentator_id: Optional[int]
The user ID of the cawmentator.
"""
self._cawmentator_id = cawmentator_id
@commits
def set_channel_id(self, channel_id: int or None) -> None:
"""Sets a channel ID for the match.
Parameters
----------
channel_id: Optional[int]
A discord.Channel ID
"""
self._channel_id = int(channel_id)
@commits
def set_league_tag(self, league_tag: Optional[str]):
"""Sets a league tag for the match.
Parameters
----------
league_tag: Optional[str]
A league's tag
"""
self._league_tag = league_tag
@commits
def raw_update(self, **kwargs):
if 'suggested_time' in kwargs:
self._set_suggested_time(kwargs['suggested_time'])
if 'r1_confirmed' in kwargs:
self._confirmed_by_r1 = kwargs['r1_confirmed']
if 'r2_confirmed' in kwargs:
self._confirmed_by_r2 = kwargs['r2_confirmed']
if 'r1_unconfirmed' in kwargs:
self._r1_wishes_to_unconfirm = kwargs['r1_unconfirmed']
if 'r2_unconfirmed' in kwargs:
self._r2_wishes_to_unconfirm = kwargs['r2_unconfirmed']
if 'match_info' in kwargs:
self._match_info = kwargs['match_info']
if 'cawmentator_id' in kwargs:
self._cawmentator_id = kwargs['cawmentator_id']
if 'channel_id' in kwargs:
self._channel_id = kwargs['channel_id']
if 'gsheet_info' in kwargs:
self._gsheet_info = kwargs['gsheet_info']
if 'finish_time' in kwargs:
self._finish_time = kwargs['finish_time']
if 'league_tag' in kwargs:
self._league_tag = kwargs['league_tag']
def _set_suggested_time(self, time: datetime.datetime or None) -> None:
if time is None:
self._suggested_time = None
return
if time.tzinfo is None:
time = pytz.utc.localize(time)
self._suggested_time = time.astimezone(pytz.utc)
def _set_finish_time(self, time: datetime.datetime or None) -> None:
if time is None:
self._finish_time = None
return
if time.tzinfo is None:
time = pytz.utc.localize(time)
self._finish_time = time.astimezone(pytz.utc)
| |
"""Support for ONVIF Cameras with FFmpeg as decoder."""
import asyncio
from haffmpeg.camera import CameraMjpeg
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
from onvif.exceptions import ONVIFError
import voluptuous as vol
from homeassistant.components.camera import SUPPORT_STREAM, Camera
from homeassistant.components.ffmpeg import CONF_EXTRA_ARGUMENTS, DATA_FFMPEG
from homeassistant.const import HTTP_BASIC_AUTHENTICATION
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from .base import ONVIFBaseEntity
from .const import (
ABSOLUTE_MOVE,
ATTR_CONTINUOUS_DURATION,
ATTR_DISTANCE,
ATTR_MOVE_MODE,
ATTR_PAN,
ATTR_PRESET,
ATTR_SPEED,
ATTR_TILT,
ATTR_ZOOM,
CONF_RTSP_TRANSPORT,
CONF_SNAPSHOT_AUTH,
CONTINUOUS_MOVE,
DIR_DOWN,
DIR_LEFT,
DIR_RIGHT,
DIR_UP,
DOMAIN,
GOTOPRESET_MOVE,
LOGGER,
RELATIVE_MOVE,
SERVICE_PTZ,
STOP_MOVE,
ZOOM_IN,
ZOOM_OUT,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ONVIF camera video stream."""
platform = entity_platform.current_platform.get()
# Create PTZ service
platform.async_register_entity_service(
SERVICE_PTZ,
{
vol.Optional(ATTR_PAN): vol.In([DIR_LEFT, DIR_RIGHT]),
vol.Optional(ATTR_TILT): vol.In([DIR_UP, DIR_DOWN]),
vol.Optional(ATTR_ZOOM): vol.In([ZOOM_OUT, ZOOM_IN]),
vol.Optional(ATTR_DISTANCE, default=0.1): cv.small_float,
vol.Optional(ATTR_SPEED, default=0.5): cv.small_float,
vol.Optional(ATTR_MOVE_MODE, default=RELATIVE_MOVE): vol.In(
[
CONTINUOUS_MOVE,
RELATIVE_MOVE,
ABSOLUTE_MOVE,
GOTOPRESET_MOVE,
STOP_MOVE,
]
),
vol.Optional(ATTR_CONTINUOUS_DURATION, default=0.5): cv.small_float,
vol.Optional(ATTR_PRESET, default="0"): cv.string,
},
"async_perform_ptz",
)
device = hass.data[DOMAIN][config_entry.unique_id]
async_add_entities(
[ONVIFCameraEntity(device, profile) for profile in device.profiles]
)
return True
class ONVIFCameraEntity(ONVIFBaseEntity, Camera):
"""Representation of an ONVIF camera."""
def __init__(self, device, profile):
"""Initialize ONVIF camera entity."""
ONVIFBaseEntity.__init__(self, device, profile)
Camera.__init__(self)
self.stream_options[CONF_RTSP_TRANSPORT] = device.config_entry.options.get(
CONF_RTSP_TRANSPORT
)
self._basic_auth = (
device.config_entry.data.get(CONF_SNAPSHOT_AUTH)
== HTTP_BASIC_AUTHENTICATION
)
self._stream_uri = None
@property
def supported_features(self) -> int:
"""Return supported features."""
return SUPPORT_STREAM
@property
def name(self) -> str:
"""Return the name of this camera."""
return f"{self.device.name} - {self.profile.name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
if self.profile.index:
return f"{self.device.info.mac or self.device.info.serial_number}_{self.profile.index}"
return self.device.info.mac or self.device.info.serial_number
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self.device.max_resolution == self.profile.video.resolution.width
async def stream_source(self):
"""Return the stream source."""
return self._stream_uri
async def async_camera_image(self):
"""Return a still image response from the camera."""
image = None
if self.device.capabilities.snapshot:
try:
image = await self.device.device.get_snapshot(
self.profile.token, self._basic_auth
)
except ONVIFError as err:
LOGGER.error(
"Fetch snapshot image failed from %s, falling back to FFmpeg; %s",
self.device.name,
err,
)
if image is None:
ffmpeg = ImageFrame(self.hass.data[DATA_FFMPEG].binary)
image = await asyncio.shield(
ffmpeg.get_image(
self._stream_uri,
output_format=IMAGE_JPEG,
extra_cmd=self.device.config_entry.options.get(
CONF_EXTRA_ARGUMENTS
),
)
)
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
LOGGER.debug("Handling mjpeg stream from camera '%s'", self.device.name)
ffmpeg_manager = self.hass.data[DATA_FFMPEG]
stream = CameraMjpeg(ffmpeg_manager.binary)
await stream.open_camera(
self._stream_uri,
extra_cmd=self.device.config_entry.options.get(CONF_EXTRA_ARGUMENTS),
)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
ffmpeg_manager.ffmpeg_stream_content_type,
)
finally:
await stream.close()
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
uri_no_auth = await self.device.async_get_stream_uri(self.profile)
self._stream_uri = uri_no_auth.replace(
"rtsp://", f"rtsp://{self.device.username}:{self.device.password}@", 1
)
async def async_perform_ptz(
self,
distance,
speed,
move_mode,
continuous_duration,
preset,
pan=None,
tilt=None,
zoom=None,
) -> None:
"""Perform a PTZ action on the camera."""
await self.device.async_perform_ptz(
self.profile,
distance,
speed,
move_mode,
continuous_duration,
preset,
pan,
tilt,
zoom,
)
| |
"""
Author: Maneesh D <maneeshd77@gmail.com>
Doubly-Linked-List
"""
from __future__ import absolute_import
from sys import getsizeof
class Node:
"""
Node in a Doubly-Linked-List
"""
def __init__(self, data, left=None, right=None):
"""
Create a Node
"""
self.data = data
self.left = left
self.right = right
def __str__(self):
"""
String representation of the Node object.
"""
return "[{0}]".format(self.data)
def __repr__(self):
"""
Printable representation of the Node object.
"""
return "%s(%r, %r, %r)" % (self.__class__, self.data, self.left, self.right)
def __len__(self):
"""
Length of a Node. Always returns 1
"""
return 1
def __sizeof__(self):
"""
Overriding to get the size of node data
"""
return getsizeof(self.data)
def __eq__(self, other):
"""
Overriding the `euqals` operation
"""
return self.data == other.data
def __ge__(self, other):
"""
Overriding the `greater than or euqals` operation
"""
return self.data >= other.data
def __le__(self, other):
"""
Overriding the `lesser than or euqals` operation
"""
return self.data <= other.data
def __lt__(self, other):
"""
Overriding the `lesser than` operation
"""
return self.data < other.data
def __gt__(self, other):
"""
Overriding the `greater than` operation
"""
return self.data > other.data
def get_memory_footprint(self):
"""
Total amount of memory used by a Node in Bytes
"""
return self.__sizeof__() + getsizeof(self)
class DoublyLinkedList:
"""
Doubly-Linked-List
"""
def __init__(self, head=None, tail=None):
"""
Create a Doubly-Linked-List
"""
self.head = head
self.tail = tail
def __str__(self):
"""
String representation of linked list.
O(n)
"""
nodes = list()
if self.head and self.tail:
cur_node = self.head
while cur_node:
nodes.append(str(cur_node))
cur_node = cur_node.next
return "HEAD <=> {0} <=> TAIL".format(" <=> ".join(nodes))
else:
return "HEAD <=> None <=> TAIL"
def __repr__(self):
"""
Printable representation of the Node object.
O(1)
"""
return "%s(%r)" % (self.__class__, self.head)
def __len__(self):
"""
Number of nodes in the linked list
O(n)
"""
num_of_nodes = 0
cur_node = self.head
while cur_node:
num_of_nodes += 1
cur_node = cur_node.next
return num_of_nodes
def __sizeof__(self):
"""
Total size of the linked list
O(n)
"""
size = 0
cur_node = self.head
while cur_node:
size += cur_node.get_memory_footprint()
cur_node = cur_node.next
return size
def __getitem__(self, search_key):
"""
Search for the first occurance of a node with `data` matching `search_key`.
Returns the `Node` if found else returns `None`
Ex: linked_list[1]
O(n)
"""
cur_node = self.head
while cur_node and cur_node.data != search_key:
cur_node = cur_node.next
# cur will be None if it hits the end of list or list is empty
return cur_node
def prepend(self, data):
"""
Insert data at the begining of the linked list.
O(1)
"""
pass
def append(self, data):
"""
Insert data at the end of the linked list.
O(n)
"""
pass
def search(self, search_key):
"""
Alias/Wrapper for __getitem__
"""
return self.__getitem__(search_key)
def remove(self, search_key):
"""
Remove for the first occurance of a node with `data` matching `search_key`.
O(n)
"""
pass
def reverse(self):
"""
Reverse the linked list
O(n)
"""
pass
def get_memory_footprint(self):
"""
Amount of memory used by the linked list in Bytes
O(n) + O(1)
"""
return self.__sizeof__() + getsizeof(self)
if __name__ == "__main__":
print("Doubly-Linked-List")
print("------------------")
# Create the linked list
print("\nCreating an empty linked list...")
linked_list = DoublyLinkedList()
print(linked_list)
print("")
| |
#!/usr/bin/env python
"""
CLI to mirror all files in a package from one conda channel to another
"""
import os
from argparse import ArgumentParser
from pprint import pformat
import sys
import subprocess
import tempfile
import logging
import traceback
import binstar_client
import slacker
logger = logging.getLogger('mirror.py')
def Popen(cmd, *args, **kwargs):
"""Returns stdout and stderr
Parameters
----------
cmd : list
List of strings to be sent to subprocess.Popen
Returns
-------
stdout : """
# capture the output with subprocess.Popen
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
*args, **kwargs)
except subprocess.CalledProcessError as cpe:
logger.error(cpe)
stdout, stderr = proc.communicate()
if stdout:
stdout = stdout.decode()
if stderr:
stderr = stderr.decode()
return stdout, stderr, proc.returncode
def cli():
p = ArgumentParser("Mirror packages from one channel to a different "
"channel for a given anaconda.org site with an "
"anaconda token. Note: will also work with "
"the BINSTAR_TOKEN environmental variable set or if "
"you have logged in to anaconda via the `anaconda "
"login` command built in to anaconda-client")
p.add_argument(
'packages',
nargs='*',
help="List of package names to mirror from one channel to another"
)
p.add_argument(
'--all',
action='store_true',
help=("Supercedes `packages` argument. Mirror *all* of the packages "
"from `from-owner` to `to-owner`."),
default=False
)
p.add_argument(
'--list',
action='store_true',
help='List all the packages on --from-user and then exit'
)
p.add_argument(
'--from-owner',
nargs='?',
help=("anaconda user to mirror packages from. Also acceptable to "
"pass in user/channel. channel will default to main unless "
"explicitly provided")
)
p.add_argument(
'--from-domain',
nargs='?',
help="anaconda api domain to mirror from. Only relevant if you are "
"not using anaconda.org",
default="https://api.anaconda.org"
)
p.add_argument(
'--from-token',
nargs='?',
help=("anaconda token used to authenticate you to the given anaconda "
"site. Required for uploading unless you are logged in (via "
"`anaconda login`)"),
)
p.add_argument(
'--from-disable-verify',
action='store_false',
help=('ssl verify connection to the `from_site`'),
default=True
)
p.add_argument(
'--to-owner',
nargs='?',
help=("anaconda user to mirror packages to. Also acceptable to "
"pass in user/channel. channel will default to main unless "
"explicitly provided")
)
p.add_argument(
'--to-domain',
nargs='?',
help="anaconda api domain to mirror to. Only relevant if you are "
"not using anaconda.org",
default="https://api.anaconda.org"
)
p.add_argument(
'--to-token',
nargs='?',
help=("anaconda token used to authenticate you to the given anaconda "
"site. Required for uploading unless you are logged in (via "
"`anaconda login`)"),
)
p.add_argument(
'--to-disable-verify',
action='store_false',
help=('ssl verify connection to the `from_site`'),
default=True
)
p.add_argument(
'--dry-run',
action='store_true',
help=("Figure out which packages would be copied, print it out and "
"then exit")
)
p.add_argument(
'--platform',
nargs="*",
action="store",
help=("Only copy packages for the listed platforms. Options are "
"'osx-32', 'osx-64', 'linux-32', 'linux-64', 'win-32' and "
"'win-64'. Defaults to 'linux-64'"),
default=["linux-64"]
)
p.add_argument(
'--log',
nargs="?",
action="store",
help="File to log to",
)
p.add_argument(
'--slack-token',
action='store',
nargs='?',
help=("Slack authentication token"),
)
p.add_argument(
'--slack-channel',
action='store',
nargs='?',
help=("Slack channel to post to"),
default="bob-the-builder",
)
args = p.parse_args()
args.to_channel = 'main'
args.from_channel = 'main'
# init some logging
if args.log:
stream = logging.StreamHandler()
filehandler = logging.FileHandler(args.log, mode='a')
stream.setLevel(logging.INFO)
filehandler.setLevel(logging.INFO)
logger.addHandler(stream)
logger.addHandler(filehandler)
logger.setLevel(logging.INFO)
logger.info("Logging to {}".format(args.log))
# set up slack integration
slack_token = args.slack_token
slack_channel = args.slack_channel
slack_api = slacker.Slacker(slack_token)
try:
ret = slack_api.auth.test()
except slacker.Error as e:
slack_api = None
if slack_token is None:
logger.info('No slack token provided. Not sending messages to '
'slack')
else:
logger.error('slack_token {} does grant access to the {} channel'
''.format(slack_token, slack_channel))
logger.error(traceback.format_exc())
logger.error(e)
else:
logger.info("Slack authentication successful.")
logger.info("Authenticating as the %s user", ret.body['user'])
logger.info("Authenticating to the %s team", ret.body['team'])
logger.info("\nSummary")
logger.info("-------")
logger.info("Mirroring from {} at {}".format(args.from_owner,
args.from_domain))
logger.info("Mirroring to {} at {}".format(args.to_owner, args.to_domain))
logger.info("\nPlatforms")
logger.info("---------")
logger.info(pformat(args.platform))
logger.info("\nPackages list")
logger.info("-------------")
if args.all:
logger.info("**all packages**")
else:
logger.info(pformat(args.packages))
try:
args.from_owner, args.from_channel = args.from_owner.split('/')
except ValueError:
# no extra channel information was passed
pass
try:
args.to_owner, args.to_channel = args.to_owner.split('/')
except ValueError:
# no extra channel information was passed
pass
from_cli = binstar_client.Binstar(token=args.from_token,
domain=args.from_domain,
verify=args.from_disable_verify)
to_cli = binstar_client.Binstar(token=args.to_token,
domain=args.to_domain,
verify=args.to_disable_verify)
# Get the package metadata from the specified anaconda channel
from_packages = from_cli.show_channel(args.from_channel,
args.from_owner)
if ',' in args.platform[0]:
pt = args.platform[0].split(',')
else:
pt = args.platform
from_files = {f['basename']: f for f in from_packages['files']
if f['attrs']['subdir'] in pt}
if args.list:
# print out the list of all files on the source channel and exit
logger.info("\nComplete files list on {} at {}:".format(
args.from_owner, args.from_domain))
logger.info("-------------------")
logger.info(pformat(sorted(list(from_files.keys()))))
sys.exit(0)
# Find the packages on the source channel that match the packages specified
# on the command line
if args.all:
matched = list(from_files.keys())
else:
matched = [f for f in from_files.keys()
for p in args.packages if p in f]
# and print them out
logger.info("\nFiles that exist on {} at {}:".format(args.from_owner,
args.from_domain))
logger.info(pformat(sorted(matched)))
# get the package metadata on the target channel
to_packages = to_cli.show_channel(args.to_channel, args.to_owner)
to_files = {f['basename']: f for f in to_packages['files']}
# figure out which packages already exist
already_exist = [f for f in matched if f in to_files.keys()]
logger.info("\nFiles that already exist on {} at {}:".format(
args.to_owner, args.to_domain))
logger.info(pformat(sorted(already_exist)))
# figure out which of these packages actually need to be copied
to_copy = [f for f in matched if f not in to_files.keys()]
# print out the packages that need to be copied
logger.info("\nFiles to be uploaded to {} at {}:".format(args.to_owner,
args.to_domain))
logger.info(pformat(sorted(to_copy)))
if args.dry_run:
# don't upload anything. Print out why we are quitting and then quit
logger.info("\nExiting because --dry-run flag is set")
sys.exit(0)
download_dir = tempfile.TemporaryDirectory(prefix='mirror')
upload_cmd = ['anaconda',
'--site', args.to_domain,
'-t', args.to_token,
'upload',
'-u', args.to_owner]
for copy_filename in to_copy:
# get the full metadata
md = from_files[copy_filename]
(login, package_name,
version, platform, filename) = md['full_name'].split('/')
destination = os.path.join(download_dir.name, filename)
logger.info("Downloading {} to {}".format(md['basename'],
destination))
ret = from_cli.download(
login, package_name, md['version'], md['basename'])
with open(destination, 'wb') as f:
f.write(ret.content)
assert os.stat(destination).st_size == md['size']
message = '{} to {} at {}\n'.format(filename,
args.to_owner,
args.to_domain)
logger.info('Uploading {}'.format(message))
stdout, stderr, returncode = Popen(upload_cmd + [destination])
if returncode == 0 and slack_api:
slack_api.chat.post_message(
slack_channel, "Mirrored {}".format(message))
else:
message = "Upload failed for " + message
message += "\n" + "stderr from {}".format(upload_cmd)
message += "\n" + pformat(stderr)
logger.error(message)
#if slack_api:
# message = message + ('\n\nCheck the log at {} on {}'
# ''.format(args.log, os.uname()))
# slack_api.chat.post_message(slack_channel, message)
#sys.exit(1)
logger.info("Script complete.")
if __name__ == "__main__":
cli()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Commands for:
- building and publishing virtual environments
- sync'ing application code
- managing (e.g. start, stop, etc.) the any service
on hosts.
Here's an example of updating all running hosts attached to a load-balancer:
.. code:: bash
fab hosts:lb=ipypi-test-i code_sync
Here's one where just echo which hosts haven been selected by our criteria:
.. code:: bash
fab hosts:env=test who
Here's one where we say exactly which host(s) to target and disable their
service(s):
.. code:: bash
fab -H i-2b6d1a58.internalpypi.io,ipypi-test-10-1-1-01.internalpypi.io,10.1.1
.56 svc_disable
"""
__version__ = '0.1.0'
import os
import time
import boto.ec2.elb
from fabric import api
# statics
class Context(dict):
def __getattr__(self, item):
return self[item]
ctx = Context(
app_name=None,
STARTUP_DELAY_SECS=5,
WAIT_DEFAULT_TIMEOUT_SECS=60,
WAIT_POLL_FREQ_SECS=5,
HEALTH_FILE='/var/lib/app/health',
DOMAIN='example.com',
WORKERS=[],
AWS_ENVIRONMENT_TAG='ChefEnvironment',
AWS_DISABLED_TAG='Disabled',
AWS_API_SUBNET_IDS=[],
AWS_WORKER_SUBNET_IDS=[],
AWS_VPC_ID='vpc-asd',
AWS_GROUP_ID=None,
AWS_ACCESS_KEY_ID=os.environ.get('AWS_ACCESS_KEY_ID', None),
AWS_SECRET_ACCESS_KEY=os.environ.get('AWS_SECRET_ACCESS_KEY', None),
S3_BUCKET='company.debs',
S3_ENDPOINT='s3-us-west-1.amazonaws.com',
)
# environment
api.env.user = os.environ.get('USER')
api.env.region_name = 'us-east-1'
api.env.instances = None
api.env.lbs = None
# common tasks
@api.task
def hosts(env=None, lb=None, subenets=None):
"""
Selects hosts to target.
:param env: The environment from which hosts should be *included*. All by
default. Should be one of 'prod', 'test', 'stage', 'dev'.
:param lb: The load-balancer whose attached hosts should be *included*.
"""
populate_lbs()
if lb:
lb = resolve_lb(lb)
tags = {}
if env:
tags['tag:' + ctx.AWS_ENVIRONMENT_TAG] = env
populate_instances(tags=tags, lb=lb, subenets=subenets)
# HACK: dns resolution does not seem to be working for all instances
#api.env.hosts = [i.id + '.' + ctx.DOMAIN for i in api.env.instances]
api.env.hosts = [
inst.interfaces[0].private_ip_address for inst in api.env.instances
]
for instance in api.env.instances:
print instance, instance.tags
@api.task
def who():
"""
Echos hosts that will be targeted by commands.
"""
pass
@api.task
def code_sync(branch='release', commit='HEAD', clear_cached='t'):
clear_cached = parse_flag(clear_cached)
with api.cd('~/' + ctx.app_name):
api.run('git fetch')
api.run('git checkout ' + branch)
if hash != 'HEAD':
swallow = api.run('git pull')
result = api.run(
'git branch --contains {} | grep {} | wc -l'.format(
commit, branch,
)
)
if int(result.strip()) == 0:
raise ValueError(
'Commit "{}" is not a part of "{}" branch!'.format(
commit, branch
)
)
api.run('git checkout ' + commit)
if clear_cached:
with api.settings(shell='bash -i -c'):
api.run("find -type f -regex '.+\.pyc' -exec rm -rf {} \;")
@api.task
def code_stat():
with api.cd('~/{name}'.format(name=ctx.app_name)):
api.run('echo `git rev-parse --abbrev-ref HEAD`:`git rev-parse '
'--verify HEAD`')
@api.task
@api.parallel
def shells():
"""
Ghetto detects whether any shell(s) are running.
"""
with api.settings(shell='bash -i -c'):
api.run('[ -z `pgrep -f "^python.*shell$" -u deploy` ]')
@api.task
def migrate_db():
with api.cd('~/' + ctx.app_name):
with api.settings(shell='bash -i -c'):
api.run('./scripts/migrate-db upgrade')
# service tasks
@api.task
def svc_hosts(env=None, lb=None):
hosts(env=env, lb=lb, subenets=ctx.AWS_API_SUBNET_IDS)
@api.task
def svc_start(skip_enable='f', wait='t'):
"""
Starts the service.
:param skip_enable: Flag indicating whether to skip enabling the host.
:param wait: Flag indicating whether to wait for host to roll into its lbs.
"""
api.run('service {} start; sleep {}'.format(
ctx.app_name, ctx.STARTUP_DELAY_SECS
))
api.run('service {} start'.format(ctx.app_name))
skip_enable = parse_flag(skip_enable)
if not skip_enable:
svc_enable()
wait_in_lbs(parse_wait(wait))
@api.task
def svc_stop(skip_disable='f', wait='t'):
"""
Stops the service.
:param skip_disable: Flag indicating whether to skip disabling the host.
:param wait: Flag indicating whether to wait for host to fall out of its
load-balancers.
"""
skip_disable = parse_flag(skip_disable)
if not skip_disable:
svc_disable()
wait_out_lbs(parse_wait(wait))
@api.task
def svc_reload():
"""
Reloads the service.
"""
api.run('service {} reload'.format(ctx.app_name))
@api.task
def svc_restart():
"""
Hard restarts the service.
"""
svc_disable()
api.run('service {} restart; sleep {}'.format(
ctx.app_name, ctx.STARTUP_DELAY_SECS
))
svc_enable()
@api.task
def svc_up(branch='release', commit='HEAD', restart='f'):
"""
Checks out code and reload or restarts the service.
:param branch: Branch to checkout. Defaults to "release".
:param commit: Commit hash within the branch to sync to, defaults to "HEAD".
:param restart: Flag indicating whether the service should be restarted or
just reloaded (the default).
"""
restart = parse_flag(restart)
code_sync(branch, commit)
# TODO: enable this
#migrate_db()
if restart:
svc_restart()
else:
svc_reload()
svc_stat()
@api.task
def svc_stat():
"""
Prints service status.
"""
code_stat()
api.run('service {} status'.format(ctx.app_name))
api.run('curl 127.0.01:5000/health')
@api.task
def svc_enable(wait='t'):
"""
Enabled service for traffic.
:param wait: Flag indicating whether to wait for host to roll into its
load-balancers.
"""
api.run('echo -n "finding your center" > {0}'.format(ctx.HEALTH_FILE))
wait_in_lbs(parse_wait(wait))
@api.task
def svc_disable(wait='t'):
"""
Disables service from serving traffic.
:param wait: Flag indicating whether to wait for host to fall out of its
load-balancers.
"""
wait = parse_wait(wait)
api.run('[ ! -f {0} ] || rm {0}'.format(ctx.HEALTH_FILE))
wait_out_lbs(wait)
# worker helpers
@api.task
def wrk_hosts(env=None, lb=None):
hosts(env=env, lb=lb, subenets=ctx.AWS_WORKER_SUBNET_IDS)
@api.task
def wrk_up(branch='release', commit='HEAD'):
"""
Checks out code and restarts all workers.
:param branch: Branch to checkout. Defaults to "release".
:param commit: Commit hash within the branch to sync to, defaults to "HEAD".
"""
code_sync(branch, commit)
wrk_restart()
wrk_stat()
@api.task
def wrk_stat(*workers):
"""
Prints status about the requested workers, or all if none are specified.
"""
code_stat()
for name in workers or ctx.WORKERS:
api.run('supervisorctl status {}; sleep 1'.format(name))
@api.task
def wrk_start(*workers):
"""
Starts the requested workers, or all if none are specified.
"""
for name in workers or ctx.WORKERS:
api.run('supervisorctl start {}; sleep 1'.format(name))
@api.task
def wrk_stop(*workers):
"""
Stops the requested workers, or all if none are specified.
"""
for name in workers or ctx.WORKERS:
api.run('supervisorctl stop {}; sleep 1'.format(name))
@api.task
def wrk_restart(*workers):
"""
Restarts the requested workers, or all if none are specified.
"""
for name in workers or ctx.WORKERS:
api.run('supervisorctl stop {}; sleep 1'.format(name))
# package tasks
@api.task
def pkg_build(version, branch='release', commit='HEAD', publish=False):
"""
Builds and downloads a deb of app_name (w/o the virtualenv).
:param version: Release version (e.g. 1.0.0).
:param branch: git branch from which to package. Defaults to 'release'.
:param commit: git commit commit from which to package. Defaults to 'HEAD'.
"""
code_sync(branch=branch, commit=commit)
if commit == 'HEAD':
with api.cd('~/' + ctx.app_name):
commit = api.run('git rev-parse HEAD')
with api.cd('~'):
api.run(
'[ ! -f {app_name}_1.{version}_all.deb ] || '
'rm -f {app_name}_1.{version}_all.deb'
.format(app_name=ctx.app_name, version=version)
)
rv = api.run(
'fpm -s dir -t deb -n {package_name} -v {version} '
'-a all -x "*.git" -x "*.pyc" '
'--description "{app_name} @ {branch}:{commit}" '
'--deb-user={user} '
'--deb-group={user} '
'~/{package_name}'
.format(
app_name=ctx.app_name,
package_name=ctx.app_name,
version=version,
user=api.env.user,
branch=branch,
commit=commit,
)
)
file_name = rv.split('"')[-2]
if publish:
pkg_publish(file_name)
@api.task
def pkg_build_venv(version, branch='release', commit='HEAD', publish=False):
"""
Builds and downloads a deb of app_name virtualenv (w/o the lib).
:param version: Release version (e.g. 1.0.0).
:param branch: git branch from which to package. Defaults to 'release'.
:param commit: git commit commit from which to package. Defaults to 'HEAD'.
"""
code_sync(commit=commit, branch=branch)
if commit == 'HEAD':
with api.cd('~/' + ctx.app_name):
commit = api.run('git rev-parse HEAD')
with api.cd('~'):
api.run(
'[ ! -f {app_name}-venv_{version}_amd64.deb ] || '
'rm -f {app_name}-venv_{version}_amd64.deb'
.format(app_name=ctx.app_name, version=version)
)
rv = api.run(
'fpm -s python -t deb -n {app} -v {version} '
'--description "{app_name} virtual environment @ {branch}:{commit}" '
'--deb-user={user} '
'--deb-group={user} '
'-s dir ~/.virtualenvs/{venv} '
.format(
app_name=ctx.app_name,
app=ctx.app_name + '_venv',
venv=ctx.app_name,
version=version,
user=api.env.user,
branch=branch,
commit=commit,
)
)
file_name = rv.split('"')[-2]
if publish:
pkg_publish(file_name)
@api.task
def pkg_publish(file_name):
"""
Uploads a deb package to the s3 bucket backing our apt repo. Note that:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
must both be set in your environment *and* have write permissions to the
s3 bucket.
:param file_name: Name of built deb file to publish.
"""
if ctx.AWS_ACCESS_KEY_ID is None:
raise Exception('Your environment is missing AWS_ACCESS_KEY_ID')
if ctx.AWS_SECRET_ACCESS_KEY is None:
raise Exception('Your environment is missing AWS_SECRET_ACCESS_KEY')
with api.cd('~'):
api.run(
'deb-s3 publish {file_name} '
'--bucket={s3_bucket} '
'--access-key-id={s3_access_key} '
'--secret-access-key={s3_secret_key} '
'--endpoint={s3_endpoint} '
'--visibility=private '
'--arch={arch}'
.format(
file_name=file_name,
s3_bucket=ctx.S3_BUCKET,
s3_access_key=ctx.AWS_ACCESS_KEY_ID,
s3_secret_key=ctx.AWS_SECRET_ACCESS_KEY,
s3_endpoint=ctx.S3_ENDPOINT,
arch='amd64',
)
)
# generic helpers
def parse_flag(flag):
if flag.lower() in (True, 1, '1', 't', 'true'):
return True
if flag.lower() in (False, 0, '0', 'f', 'false'):
return False
raise ValueError('Invalid flag value "{}"'.format(flag))
def parse_wait(raw):
try:
return int(raw)
except (ValueError, TypeError):
flag = parse_flag(raw)
if flag:
return ctx.WAIT_DEFAULT_TIMEOUT_SECS
return 0
# aws helpers
def populate_instances(
tags=None,
lb=None,
exclude_disabled=True,
subenets=None,
):
def local_filter(instance):
if subenets and instance.subnet_id not in subenets:
return False
if instance.tags.get(ctx.AWS_DISABLED_TAG, None) is not None:
return False
if lb:
return any(instance.id == i.id for i in lb.instances)
return True
if api.env.instances:
return api.env.instances
remote_filter = {
'vpc-id': ctx.AWS_VPC_ID,
'instance-state-name': 'running',
}
if ctx.AWS_GROUP_ID:
remote_filter['instance.group-id'] = ctx.AWS_GROUP_ID
if tags:
remote_filter.update(tags)
cxn = boto.ec2.connect_to_region(api.env.region_name)
instances = [
instance.instances[0]
for instance in cxn.get_all_instances(filters=remote_filter)
if local_filter(instance.instances[0])
]
api.env.instances = instances
return api.env.instances
def populate_lbs():
if api.env.lbs is not None:
return api.env.lbs
cxn = boto.ec2.elb.connect_to_region(api.env.region_name)
api.env.lbs = [
lb for lb in cxn.get_all_load_balancers()
if lb.instances is not None
]
return api.env.lbs
def resolve_lb(hint):
return resolve_lbs(hint)[0]
def resolve_lbs(*hints):
mapping = dict((lb.name, lb) for lb in api.env.lbs)
lbs = []
for hint in hints:
if hint in mapping:
lbs.append(mapping[hint])
continue
raise ValueError('Unknown load balancer "{}"'.format(hint))
return lbs
def instance_lbs(instance):
return [
lb for lb in api.env.lbs
if any(instance.id == i.id for i in lb.instances)
]
def current_instance():
populate_instances()
populate_lbs()
host_string = api.env.host_string
for i in api.env.instances:
if 'Name' in i.tags and i.tags['Name'].startswith(host_string):
break
if i.private_ip_address.startswith(host_string):
break
if i.private_ip_address.replace('.', '-') in host_string:
break
else:
i = None
return i
def wait_in_lbs(timeout):
def in_service(states):
return (
not states and
states[0].state == 'InService'
)
wait_xx_lbs(timeout, in_service)
def wait_out_lbs(timeout):
def out_of_service(states):
return (
not states or
states[0].state == 'OutOfService'
)
wait_xx_lbs(timeout, out_of_service)
def wait_xx_lbs(timeout, health):
instance = current_instance()
if instance is None:
return
lbs = instance_lbs(instance)
et = time.time() + timeout
while True:
lbs = [
lb for lb in lbs
if not health(lb.get_instance_health([instance.id]))
]
if not lbs:
break
if time.time() > et:
raise Exception(
'Timed out after {} sec(s) waiting on host "{}" '
'health for lb(s) {}'.format(
timeout,
api.env.host_string,
', '.join((lb.name for lb in lbs))
)
)
print '[%s] local: waiting %s sec(s) for lb(s) %s' % (
api.env.host_string, ctx.WAIT_POLL_FREQ_SECS, ', '.join(
(lb.name for lb in lbs)
),
)
time.sleep(ctx.WAIT_POLL_FREQ_SECS)
| |
"""Implementation of JSONEncoder
"""
import re
from __pypy__.builders import StringBuilder
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(b'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
#ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
INFINITY = float('inf')
def raw_encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return ESCAPE.sub(replace, s)
encode_basestring = lambda s: '"' + raw_encode_basestring(s) + '"'
def raw_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u{0:04x}'.format(n)
#return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
return ESCAPE_ASCII.sub(replace, s)
encode_basestring_ascii = lambda s: '"' + raw_encode_basestring_ascii(s) + '"'
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str | string |
+-------------------+---------------+
| int, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, *, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is true, the output is guaranteed to be str
objects with all incoming non-ASCII characters escaped. If
ensure_ascii is false, the output can contain non-ASCII characters.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be an (item_separator, key_separator)
tuple. The default is (', ', ': ') if *indent* is ``None`` and
(',', ': ') otherwise. To get the most compact JSON representation,
you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
if ensure_ascii:
self.__encoder = raw_encode_basestring_ascii
else:
self.__encoder = raw_encode_basestring
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
elif indent is not None:
self.item_separator = ','
if default is not None:
self.default = default
if indent is not None and not isinstance(indent, str):
self.indent_str = ' ' * indent
else:
self.indent_str = indent
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
raise TypeError("Object of type '%s' is not JSON serializable" %
o.__class__.__name__)
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> from json.encoder import JSONEncoder
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
if self.check_circular:
markers = {}
else:
markers = None
builder = StringBuilder()
self.__encode(o, markers, builder, 0)
return builder.build()
def __emit_indent(self, builder, _current_indent_level):
if self.indent is not None:
_current_indent_level += 1
newline_indent = '\n' + self.indent_str * _current_indent_level
separator = self.item_separator + newline_indent
builder.append(newline_indent)
else:
separator = self.item_separator
return separator, _current_indent_level
def __emit_unindent(self, builder, _current_indent_level):
if self.indent is not None:
builder.append('\n')
builder.append(self.indent_str * (_current_indent_level - 1))
def __encode(self, o, markers, builder, _current_indent_level):
if isinstance(o, str):
builder.append('"')
builder.append(self.__encoder(o))
builder.append('"')
elif o is None:
builder.append('null')
elif o is True:
builder.append('true')
elif o is False:
builder.append('false')
elif isinstance(o, int):
# Subclasses of int/float may override __str__, but we still
# want to encode them as integers/floats in JSON. One example
# within the standard library is IntEnum.
builder.append(int.__str__(o))
elif isinstance(o, float):
builder.append(self.__floatstr(o))
elif isinstance(o, (list, tuple)):
if not o:
builder.append('[]')
return
self.__encode_list(o, markers, builder, _current_indent_level)
elif isinstance(o, dict):
if not o:
builder.append('{}')
return
self.__encode_dict(o, markers, builder, _current_indent_level)
else:
self.__mark_markers(markers, o)
res = self.default(o)
self.__encode(res, markers, builder, _current_indent_level)
self.__remove_markers(markers, o)
return res
def __encode_list(self, l, markers, builder, _current_indent_level):
self.__mark_markers(markers, l)
builder.append('[')
first = True
separator, _current_indent_level = self.__emit_indent(builder,
_current_indent_level)
for elem in l:
if first:
first = False
else:
builder.append(separator)
self.__encode(elem, markers, builder, _current_indent_level)
del elem # XXX grumble
self.__emit_unindent(builder, _current_indent_level)
builder.append(']')
self.__remove_markers(markers, l)
def __encode_dict(self, d, markers, builder, _current_indent_level):
self.__mark_markers(markers, d)
first = True
builder.append('{')
separator, _current_indent_level = self.__emit_indent(builder,
_current_indent_level)
if self.sort_keys:
items = sorted(d.items(), key=lambda kv: kv[0])
else:
items = d.items()
for key, v in items:
if first:
first = False
else:
builder.append(separator)
if isinstance(key, str):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = self.__floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, int):
# see comment for int in __encode
key = int.__str__(key)
elif self.skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
builder.append('"')
builder.append(self.__encoder(key))
builder.append('"')
builder.append(self.key_separator)
self.__encode(v, markers, builder, _current_indent_level)
del key
del v # XXX grumble
self.__emit_unindent(builder, _current_indent_level)
builder.append('}')
self.__remove_markers(markers, d)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
return self.__iterencode(o, markers, 0)
def __floatstr(self, o):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == INFINITY:
text = 'Infinity'
elif o == -INFINITY:
text = '-Infinity'
else:
return float.__repr__(o)
if not self.allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
def __mark_markers(self, markers, o):
if markers is not None:
if id(o) in markers:
raise ValueError("Circular reference detected")
markers[id(o)] = None
def __remove_markers(self, markers, o):
if markers is not None:
del markers[id(o)]
def __iterencode_list(self, lst, markers, _current_indent_level):
if not lst:
yield '[]'
return
self.__mark_markers(markers, lst)
buf = '['
if self.indent is not None:
_current_indent_level += 1
newline_indent = '\n' + self.indent_str * _current_indent_level
separator = self.item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = self.item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, str):
yield buf + '"' + self.__encoder(value) + '"'
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, int):
# see comment for int in __encode
yield buf + int.__str__(value)
elif isinstance(value, float):
yield buf + self.__floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = self.__iterencode_list(value, markers,
_current_indent_level)
elif isinstance(value, dict):
chunks = self.__iterencode_dict(value, markers,
_current_indent_level)
else:
chunks = self.__iterencode(value, markers,
_current_indent_level)
yield from chunks
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + self.indent_str * _current_indent_level
yield ']'
self.__remove_markers(markers, lst)
def __iterencode_dict(self, dct, markers, _current_indent_level):
if not dct:
yield '{}'
return
self.__mark_markers(markers, dct)
yield '{'
if self.indent is not None:
_current_indent_level += 1
newline_indent = '\n' + self.indent_str * _current_indent_level
item_separator = self.item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = self.item_separator
first = True
if self.sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.items()
for key, value in items:
if isinstance(key, str):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = self.__floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, int):
# see comment for int in __encode
key = int.__str__(key)
elif self.skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield '"' + self.__encoder(key) + '"'
yield self.key_separator
if isinstance(value, str):
yield '"' + self.__encoder(value) + '"'
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, int):
yield int.__str__(value)
elif isinstance(value, float):
yield self.__floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = self.__iterencode_list(value, markers,
_current_indent_level)
elif isinstance(value, dict):
chunks = self.__iterencode_dict(value, markers,
_current_indent_level)
else:
chunks = self.__iterencode(value, markers,
_current_indent_level)
yield from chunks
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + self.indent_str * _current_indent_level
yield '}'
self.__remove_markers(markers, dct)
def __iterencode(self, o, markers, _current_indent_level):
if isinstance(o, str):
yield '"' + self.__encoder(o) + '"'
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, int):
yield int.__str__(o)
elif isinstance(o, float):
yield self.__floatstr(o)
elif isinstance(o, (list, tuple)):
yield from self.__iterencode_list(o, markers, _current_indent_level)
elif isinstance(o, dict):
yield from self.__iterencode_dict(o, markers, _current_indent_level)
else:
self.__mark_markers(markers, o)
obj = self.default(o)
yield from self.__iterencode(obj, markers, _current_indent_level)
self.__remove_markers(markers, o)
# overwrite some helpers here with more efficient versions
try:
from _pypyjson import raw_encode_basestring_ascii
except ImportError:
pass
| |
# Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for filtering (via class. accuracy) the Federated EMNIST dataset."""
import csv
import functools
import os.path
import tensorflow as tf
import tensorflow_federated as tff
from gans.experiments.emnist import emnist_data_utils
BASE_URL = 'https://storage.googleapis.com/tff-experiments-public/'
CSVS_BASE_PATH = 'gans/csvs/'
@functools.lru_cache(maxsize=1)
def get_unfiltered_client_data_for_training(batch_size):
r"""Returns `tff.simulation.datasets.ClientData` of unfiltered Federated EMNIST data.
The data returned will neither be filtered by user nor by example, so training
can take place with all users and all examples for each user.
Args:
batch_size: Batch size of output dataset. If None, don't batch.
Returns:
A tff.simulation.datasets.ClientData` of real images of numbers/letters. The
data has
not been filtered.
"""
return get_filtered_client_data_for_training(None, None, batch_size)
@functools.lru_cache(maxsize=1)
def get_filtered_by_user_client_data_for_training(invert_imagery_probability,
accuracy_threshold,
batch_size,
cache_dir=None):
r"""Returns `tff.simulation.datasets.ClientData` of filtered Federated EMNIST data.
Input data gets filtered on a per-user basis; users get selected via the
`accuracy_threshold` criterion, and then training can take place with all
examples from only the selected users.
Args:
invert_imagery_probability: The probability that a user\'s image data has
pixel intensity inverted. E.g., `0p1` corresponds to 0.1, or a 10%
probability that a user\'s data is flipped. Note that to save time in
experiment execution, this is precomputed via the ./filter_users.py
script, and the selection here controls which file to read from.
accuracy_threshold: Indicates the classification threshold by which a user
is included in the training population. E.g., `lt0p882` means any user
who\'s data cumulatively classifies with <0.882 accuracy would be used for
training; `gt0p939` means any user who\'s data cumulatively classifies
with >0.939 accuracy would be used for training. To save time in
experiment execution, this assignment is precomputed via the
./filter_users.py script, and the flag selection here is to indicate which
file to read from.
batch_size: Batch size of output dataset. If None, don't batch.
cache_dir: (Optional) base directory to cache the downloaded files. If None,
caches in Keras' default cache directory.
Returns:
A tff.simulation.datasets.ClientData` of real images of numbers/letters. The
data has
been filtered by user classification accuracy as per the input arguments.
"""
path_to_data = os.path.join(CSVS_BASE_PATH,
'inv_prob_{}'.format(invert_imagery_probability),
'filter_by_user',
'acc_{}'.format(accuracy_threshold))
try:
filename = 'client_ids.csv'
path_to_read_inversions_csv = tf.keras.utils.get_file(
fname=filename,
cache_subdir=path_to_data,
cache_dir=cache_dir,
origin=os.path.join(BASE_URL, path_to_data, filename))
except Exception:
msg = ('A URL fetch failure was encountered when trying to retrieve '
'filter-by-user generated csv file with invert_imagery_probability '
'`{}` and accuracy_threshold `{}`. Please run the ./filter_users.py '
'script to generate the missing data, and use the `cache_dir` '
'argument to this method to specify the location of the generated '
'data csv file.'.format(invert_imagery_probability,
accuracy_threshold))
raise ValueError(msg)
return get_filtered_client_data_for_training(path_to_read_inversions_csv,
None, batch_size)
@functools.lru_cache(maxsize=1)
def get_filtered_by_example_client_data_for_training(invert_imagery_probability,
min_num_examples,
example_class_selection,
batch_size,
cache_dir=None):
r"""Returns `tff.simulation.datasets.ClientData` of filtered Federated EMNIST data.
Input data gets filtered on a per-example basis. Any user meeting the
`min_num_examples` criterion is included. The examples are limited to those
that classified according to the `example_class_selection` criterion.
Args:
invert_imagery_probability: The probability that a user\'s image data has
pixel intensity inverted. E.g., `0p1` corresponds to 0.1, or a 10%
probability that a user\'s data is flipped. Note that to save time in
experiment execution, this is precomputed via the ./filter_examples.py
scripts, and the selection here controls which file to read from.
min_num_examples: Indicates the minimum number of examples that are either
correct or incorrect (as set by the `example_class_selection` argument) in
a client\'s local dataset for that client to be considered as part of
training sub-population. To save time in experiment execution, this
assignment is precomputed via the ./filter_examples.py script, and the
flag selection here is to indicate which file to read from.
example_class_selection: Indicates whether to train on a client\'s correct
or incorrect examples. To save time in experiment execution, this
assignment is precomputed via the ./filter_examples.py script, and the
flag selection here is to indicate which file to read from.
batch_size: Batch size of output dataset. If None, don't batch.
cache_dir: (Optional) base directory to cache the downloaded files. If None,
caches in Keras' default cache directory.
Returns:
A `tff.simulation.datasets.ClientData` of real images of numbers/letters.
The data
has been filtered as per the input arguments (either not filtered, filtered
by user classification accuracy, or filtered by example classification
correctness).
"""
path_to_data = os.path.join(CSVS_BASE_PATH,
'inv_prob_{}'.format(invert_imagery_probability),
'filter_by_example',
'min_num_examples_{}'.format(min_num_examples),
'{}'.format(example_class_selection))
try:
filename = 'client_ids.csv'
path_to_read_inversions_csv = tf.keras.utils.get_file(
fname=filename,
cache_subdir=path_to_data,
cache_dir=cache_dir,
origin=os.path.join(BASE_URL, path_to_data, filename))
filename = 'example_indices_map.csv'
path_to_read_example_indices_csv = tf.keras.utils.get_file(
fname=filename,
cache_subdir=path_to_data,
cache_dir=cache_dir,
origin=os.path.join(BASE_URL, path_to_data, filename))
except Exception:
msg = ('A URL fetch failure was encountered when trying to retrieve '
'filter-by-example generated csv files with '
'invert_imagery_probability `{}`, min_num_examples `{}`, and '
'example_class_selection `{}`. Please run the ./filter_examples.py '
'script to generate the missing data, and use the `cache_dir` '
'argument to this method to specify the location of the generated '
'data csv files.'.format(invert_imagery_probability,
min_num_examples, example_class_selection))
raise ValueError(msg)
return get_filtered_client_data_for_training(
path_to_read_inversions_csv, path_to_read_example_indices_csv, batch_size)
def get_filtered_client_data_for_training(path_to_read_inversions_csv,
path_to_read_example_indices_csv,
batch_size):
"""Form ClientData using paths to pixel inversion, example selection data."""
raw_client_data = emnist_data_utils.create_real_images_tff_client_data(
'train')
client_ids = raw_client_data.client_ids
selected_client_ids_inversion_map = None
client_ids_example_indices_map = None
# If filter-by-user or filter-by-example, load the csv data into maps, and
# update the client IDs to just the users that will be part of training.
if path_to_read_inversions_csv is not None:
selected_client_ids_inversion_map, client_ids_example_indices_map = (
_get_client_ids_inversion_and_example_indices_maps(
path_to_read_inversions_csv, path_to_read_example_indices_csv))
client_ids = list(selected_client_ids_inversion_map.keys())
def _get_dataset(client_id):
"""Retrieve/preprocess a tf.data.Dataset for a given client_id."""
raw_ds = raw_client_data.serializable_dataset_fn(client_id)
invert_imagery = False
if selected_client_ids_inversion_map:
invert_imagery = selected_client_ids_inversion_map[client_id]
# If filter-by-example, do it here.
if client_ids_example_indices_map:
raw_ds = _filter_by_example(raw_ds, client_ids_example_indices_map,
client_id)
return emnist_data_utils.preprocess_img_dataset(
raw_ds,
invert_imagery=invert_imagery,
include_label=False,
batch_size=batch_size,
shuffle=True,
repeat=False)
return tff.simulation.datasets.ClientData.from_clients_and_tf_fn(
client_ids, _get_dataset)
def _filter_by_example(raw_ds, client_ids_example_indices_map, client_id):
"""Form a tf.data.Dataset from the examples in the map for the client_id."""
example_indices = client_ids_example_indices_map[client_id]
# B/c the csv stores the list as a string, we need to do some slightly
# klugey conversion from a string to list. (We strip off the first and
# last characters in the string, which are [ and ], and then split on
# commas as delimiters, to recover the original list of ints.
example_indices = [int(s) for s in example_indices[1:-1].split(',')]
# Get the elements (OrderedDicts) in the raw data which are at the indices
# indicated by the list above. This creates a dictionary of lists such that
# the i-th element in each list corresponds to the i-th example. This allows
# us to use the serializable Dataset.from_tensor_slices.
elements = tf.nest.map_structure(lambda spec: [], raw_ds.element_spec)
index = 0
for element in raw_ds:
if index in example_indices:
tf.nest.map_strucutre(lambda es, e: es.append(e), elements, element)
index += 1
return tf.data.Dataset.from_tensor_slices(elements)
def _get_client_ids_inversion_and_example_indices_maps(
path_to_read_inversions_csv, path_to_read_example_indices_csv):
"""Return paths to csv files storing maps indicating the data to train on."""
if path_to_read_inversions_csv is None:
raise ValueError(
'No path provided to the CSV file that stores map from client ids to '
'image inversion data.')
# Load (from CSV file) the specific client IDs that the GAN will train on, and
# whether or not the images on that client are inverted.
selected_client_ids_inversion_map = {}
with tf.io.gfile.GFile(path_to_read_inversions_csv, 'r') as csvfile:
csvreader = csv.reader(csvfile)
for [key, val] in csvreader:
selected_client_ids_inversion_map[key] = (val == 'True')
# If specified (via CSV file), the specific examples on each client ID that
# the GAN will be trained on.
client_ids_example_indices_map = None
if path_to_read_example_indices_csv:
client_ids_example_indices_map = {}
with tf.io.gfile.GFile(path_to_read_example_indices_csv, 'r') as csvfile:
csvreader = csv.reader(csvfile)
for [key, val] in csvreader:
client_ids_example_indices_map[key] = val
set_1 = set(client_ids_example_indices_map.keys())
set_2 = set(selected_client_ids_inversion_map.keys())
symmetric_diff = set_1 ^ set_2
if symmetric_diff:
raise ValueError(
'The CSV files at path_to_read_inversions_csv and '
'path_to_read_example_indices_csv contain different keys.')
return selected_client_ids_inversion_map, client_ids_example_indices_map
| |
import random
import matplotlib
import matplotlib.pyplot as plt
import time
#lower_bust = 31.235
#higher_profit = 63.208
lower_bust = 19.00
higher_profit = 69.00
# back to 1,000
sampleSize = 1000
startingFunds = 10000
wagerSize = 100
wagerCount = 100
def rollDice():
roll = random.randint(1,100)
if roll == 100:
return False
elif roll <= 50:
return False
elif 100 > roll >= 50:
return True
def multiple_bettor(funds,initial_wager,wager_count):#,color):
global multiple_busts
global multiple_profits
value = funds
wager = initial_wager
wX = []
vY = []
currentWager = 1
previousWager = 'win'
previousWagerAmount = initial_wager
while currentWager <= wager_count:
if previousWager == 'win':
if rollDice():
value += wager
wX.append(currentWager)
vY.append(value)
else:
value -= wager
previousWager = 'loss'
previousWagerAmount = wager
wX.append(currentWager)
vY.append(value)
if value <= 0:
multiple_busts += 1
break
elif previousWager == 'loss':
if rollDice():
wager = previousWagerAmount * random_multiple
if (value - wager) <= 0:
wager = value
value += wager
wager = initial_wager
previousWager = 'win'
wX.append(currentWager)
vY.append(value)
else:
wager = previousWagerAmount * random_multiple
if (value - wager) <= 0:
wager = value
value -= wager
previousWager = 'loss'
previousWagerAmount = wager
wX.append(currentWager)
vY.append(value)
if value <= 0:
multiple_busts += 1
break
currentWager += 1
#plt.plot(wX,vY)
if value > funds:
multiple_profits+=1
def multiple_bettor2(funds,initial_wager,wager_count,multiple):#,color):
global multiple_busts
global multiple_profits
value = funds
wager = initial_wager
wX = []
vY = []
currentWager = 1
previousWager = 'win'
previousWagerAmount = initial_wager
while currentWager <= wager_count:
if previousWager == 'win':
if rollDice():
value += wager
wX.append(currentWager)
vY.append(value)
else:
value -= wager
previousWager = 'loss'
previousWagerAmount = wager
wX.append(currentWager)
vY.append(value)
if value <= 0:
multiple_busts += 1
break
elif previousWager == 'loss':
if rollDice():
wager = previousWagerAmount * random_multiple
if (value - wager) <= 0:
wager = value
value += wager
wager = initial_wager
previousWager = 'win'
wX.append(currentWager)
vY.append(value)
else:
wager = previousWagerAmount * 2
if (value - wager) <= 0:
wager = value
value -= wager
previousWager = 'loss'
previousWagerAmount = wager
wX.append(currentWager)
vY.append(value)
if value <= 0:
multiple_busts += 1
break
currentWager += 1
#plt.plot(wX,vY)
if value > funds:
multiple_profits+=1
def doubler_bettor(funds,initial_wager,wager_count,color):
global doubler_busts
global doubler_profits
value = funds
wager = initial_wager
wX = []
vY = []
currentWager = 1
previousWager = 'win'
previousWagerAmount = initial_wager
while currentWager <= wager_count:
if previousWager == 'win':
if rollDice():
value += wager
wX.append(currentWager)
vY.append(value)
else:
value -= wager
previousWager = 'loss'
previousWagerAmount = wager
wX.append(currentWager)
vY.append(value)
if value < 0:
currentWager += 10000000000000000
doubler_busts += 1
elif previousWager == 'loss':
if rollDice():
wager = previousWagerAmount * 2
if (value - wager) < 0:
wager = value
value += wager
wager = initial_wager
previousWager = 'win'
wX.append(currentWager)
vY.append(value)
else:
wager = previousWagerAmount * 2
if (value - wager) < 0:
wager = value
value -= wager
previousWager = 'loss'
previousWagerAmount = wager
wX.append(currentWager)
vY.append(value)
if value <= 0:
currentWager += 10000000000000000
doubler_busts += 1
currentWager += 1
#plt.plot(wX,vY,color)
if value > funds:
doubler_profits+=1
def simple_bettor(funds,initial_wager,wager_count,color):
global simple_busts
global simple_profits
value = funds
wager = initial_wager
wX = []
vY = []
currentWager = 1
while currentWager <= wager_count:
if rollDice():
value += wager
wX.append(currentWager)
vY.append(value)
else:
value -= wager
wX.append(currentWager)
vY.append(value)
if value <= 0:
currentWager += 10000000000000000
simple_busts +=1
currentWager += 1
plt.plot(wX,vY,color)
if value > funds:
simple_profits+=1
x = 0
#Doubler Bettor Bust Chances: 84.1457... so anything less than this... aaaand
#Doubler Bettor Profit Chances: 15.6355 ... aaaand better than this.
while x < 10000:
multiple_busts = 0.0
multiple_profits = 0.0
multipleSampSize = 100000
currentSample = 1
random_multiple = random.uniform(0.6,2.0)
while currentSample <= multipleSampSize:
multiple_bettor(startingFunds,wagerSize,wagerCount)
currentSample += 1
if ((multiple_busts/multipleSampSize)*100.00 < lower_bust) and ((multiple_profits/multipleSampSize)*100.00 > higher_profit):
print(('#################################################'))
print(('found a winner, the multiple was:',random_multiple))
print(('Lower Bust Rate Than:',lower_bust))
print(('Higher profit rate than:',higher_profit))
print(('Bust Rate:',(multiple_busts/multipleSampSize)*100.00))
print(('Profit Rate:',(multiple_profits/multipleSampSize)*100.00))
print(('#################################################'))
time.sleep(5)
#plt.show()
else:
'''
print '####################################'
print 'To beat:'
print 'Lower Bust Rate Than:',lower_bust
print 'Higher profit rate than:',higher_profit
print 'Bust Rate:',(multiple_busts/multipleSampSize)*100.00
print 'Profit Rate:',(multiple_profits/multipleSampSize)*100.00
print '####################################'
'''
#clears the figure
#plt.clf()
x+=1
| |
# orm/unitofwork.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The internals for the unit of work system.
The session's flush() process passes objects to a contextual object
here, which assembles flush tasks based on mappers and their properties,
organizes them in order of dependency, and executes.
"""
from .. import util, event
from ..util import topological
from . import attributes, persistence, util as orm_util
def track_cascade_events(descriptor, prop):
"""Establish event listeners on object attributes which handle
cascade-on-set/append.
"""
key = prop.key
def append(state, item, initiator):
# process "save_update" cascade rules for when
# an instance is appended to the list of another instance
if item is None:
return
sess = state.session
if sess:
if sess._warn_on_events:
sess._flush_warning("collection append")
prop = state.manager.mapper._props[key]
item_state = attributes.instance_state(item)
if prop._cascade.save_update and \
(prop.cascade_backrefs or key == initiator.key) and \
not sess._contains_state(item_state):
sess._save_or_update_state(item_state)
return item
def remove(state, item, initiator):
if item is None:
return
sess = state.session
if sess:
prop = state.manager.mapper._props[key]
if sess._warn_on_events:
sess._flush_warning(
"collection remove"
if prop.uselist
else "related attribute delete")
# expunge pending orphans
item_state = attributes.instance_state(item)
if prop._cascade.delete_orphan and \
item_state in sess._new and \
prop.mapper._is_orphan(item_state):
sess.expunge(item)
def set_(state, newvalue, oldvalue, initiator):
# process "save_update" cascade rules for when an instance
# is attached to another instance
if oldvalue is newvalue:
return newvalue
sess = state.session
if sess:
if sess._warn_on_events:
sess._flush_warning("related attribute set")
prop = state.manager.mapper._props[key]
if newvalue is not None:
newvalue_state = attributes.instance_state(newvalue)
if prop._cascade.save_update and \
(prop.cascade_backrefs or key == initiator.key) and \
not sess._contains_state(newvalue_state):
sess._save_or_update_state(newvalue_state)
if oldvalue is not None and \
oldvalue is not attributes.NEVER_SET and \
oldvalue is not attributes.PASSIVE_NO_RESULT and \
prop._cascade.delete_orphan:
# possible to reach here with attributes.NEVER_SET ?
oldvalue_state = attributes.instance_state(oldvalue)
if oldvalue_state in sess._new and \
prop.mapper._is_orphan(oldvalue_state):
sess.expunge(oldvalue)
return newvalue
event.listen(descriptor, 'append', append, raw=True, retval=True)
event.listen(descriptor, 'remove', remove, raw=True, retval=True)
event.listen(descriptor, 'set', set_, raw=True, retval=True)
class UOWTransaction(object):
def __init__(self, session):
self.session = session
# dictionary used by external actors to
# store arbitrary state information.
self.attributes = {}
# dictionary of mappers to sets of
# DependencyProcessors, which are also
# set to be part of the sorted flush actions,
# which have that mapper as a parent.
self.deps = util.defaultdict(set)
# dictionary of mappers to sets of InstanceState
# items pending for flush which have that mapper
# as a parent.
self.mappers = util.defaultdict(set)
# a dictionary of Preprocess objects, which gather
# additional states impacted by the flush
# and determine if a flush action is needed
self.presort_actions = {}
# dictionary of PostSortRec objects, each
# one issues work during the flush within
# a certain ordering.
self.postsort_actions = {}
# a set of 2-tuples, each containing two
# PostSortRec objects where the second
# is dependent on the first being executed
# first
self.dependencies = set()
# dictionary of InstanceState-> (isdelete, listonly)
# tuples, indicating if this state is to be deleted
# or insert/updated, or just refreshed
self.states = {}
# tracks InstanceStates which will be receiving
# a "post update" call. Keys are mappers,
# values are a set of states and a set of the
# columns which should be included in the update.
self.post_update_states = util.defaultdict(lambda: (set(), set()))
@property
def has_work(self):
return bool(self.states)
def is_deleted(self, state):
"""return true if the given state is marked as deleted
within this uowtransaction."""
return state in self.states and self.states[state][0]
def memo(self, key, callable_):
if key in self.attributes:
return self.attributes[key]
else:
self.attributes[key] = ret = callable_()
return ret
def remove_state_actions(self, state):
"""remove pending actions for a state from the uowtransaction."""
isdelete = self.states[state][0]
self.states[state] = (isdelete, True)
def get_attribute_history(self, state, key,
passive=attributes.PASSIVE_NO_INITIALIZE):
"""facade to attributes.get_state_history(), including
caching of results."""
hashkey = ("history", state, key)
# cache the objects, not the states; the strong reference here
# prevents newly loaded objects from being dereferenced during the
# flush process
if hashkey in self.attributes:
history, state_history, cached_passive = self.attributes[hashkey]
# if the cached lookup was "passive" and now
# we want non-passive, do a non-passive lookup and re-cache
if not cached_passive & attributes.SQL_OK \
and passive & attributes.SQL_OK:
impl = state.manager[key].impl
history = impl.get_history(state, state.dict,
attributes.PASSIVE_OFF |
attributes.LOAD_AGAINST_COMMITTED)
if history and impl.uses_objects:
state_history = history.as_state()
else:
state_history = history
self.attributes[hashkey] = (history, state_history, passive)
else:
impl = state.manager[key].impl
# TODO: store the history as (state, object) tuples
# so we don't have to keep converting here
history = impl.get_history(state, state.dict, passive |
attributes.LOAD_AGAINST_COMMITTED)
if history and impl.uses_objects:
state_history = history.as_state()
else:
state_history = history
self.attributes[hashkey] = (history, state_history,
passive)
return state_history
def has_dep(self, processor):
return (processor, True) in self.presort_actions
def register_preprocessor(self, processor, fromparent):
key = (processor, fromparent)
if key not in self.presort_actions:
self.presort_actions[key] = Preprocess(processor, fromparent)
def register_object(self, state, isdelete=False,
listonly=False, cancel_delete=False,
operation=None, prop=None):
if not self.session._contains_state(state):
if not state.deleted and operation is not None:
util.warn("Object of type %s not in session, %s operation "
"along '%s' will not proceed" %
(orm_util.state_class_str(state), operation, prop))
return False
if state not in self.states:
mapper = state.manager.mapper
if mapper not in self.mappers:
self._per_mapper_flush_actions(mapper)
self.mappers[mapper].add(state)
self.states[state] = (isdelete, listonly)
else:
if not listonly and (isdelete or cancel_delete):
self.states[state] = (isdelete, False)
return True
def issue_post_update(self, state, post_update_cols):
mapper = state.manager.mapper.base_mapper
states, cols = self.post_update_states[mapper]
states.add(state)
cols.update(post_update_cols)
def _per_mapper_flush_actions(self, mapper):
saves = SaveUpdateAll(self, mapper.base_mapper)
deletes = DeleteAll(self, mapper.base_mapper)
self.dependencies.add((saves, deletes))
for dep in mapper._dependency_processors:
dep.per_property_preprocessors(self)
for prop in mapper.relationships:
if prop.viewonly:
continue
dep = prop._dependency_processor
dep.per_property_preprocessors(self)
@util.memoized_property
def _mapper_for_dep(self):
"""return a dynamic mapping of (Mapper, DependencyProcessor) to
True or False, indicating if the DependencyProcessor operates
on objects of that Mapper.
The result is stored in the dictionary persistently once
calculated.
"""
return util.PopulateDict(
lambda tup: tup[0]._props.get(tup[1].key) is tup[1].prop
)
def filter_states_for_dep(self, dep, states):
"""Filter the given list of InstanceStates to those relevant to the
given DependencyProcessor.
"""
mapper_for_dep = self._mapper_for_dep
return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]]
def states_for_mapper_hierarchy(self, mapper, isdelete, listonly):
checktup = (isdelete, listonly)
for mapper in mapper.base_mapper.self_and_descendants:
for state in self.mappers[mapper]:
if self.states[state] == checktup:
yield state
def _generate_actions(self):
"""Generate the full, unsorted collection of PostSortRecs as
well as dependency pairs for this UOWTransaction.
"""
# execute presort_actions, until all states
# have been processed. a presort_action might
# add new states to the uow.
while True:
ret = False
for action in list(self.presort_actions.values()):
if action.execute(self):
ret = True
if not ret:
break
# see if the graph of mapper dependencies has cycles.
self.cycles = cycles = topological.find_cycles(
self.dependencies,
list(self.postsort_actions.values()))
if cycles:
# if yes, break the per-mapper actions into
# per-state actions
convert = dict(
(rec, set(rec.per_state_flush_actions(self)))
for rec in cycles
)
# rewrite the existing dependencies to point to
# the per-state actions for those per-mapper actions
# that were broken up.
for edge in list(self.dependencies):
if None in edge or \
edge[0].disabled or edge[1].disabled or \
cycles.issuperset(edge):
self.dependencies.remove(edge)
elif edge[0] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[0]]:
self.dependencies.add((dep, edge[1]))
elif edge[1] in cycles:
self.dependencies.remove(edge)
for dep in convert[edge[1]]:
self.dependencies.add((edge[0], dep))
return set([a for a in self.postsort_actions.values()
if not a.disabled
]
).difference(cycles)
def execute(self):
postsort_actions = self._generate_actions()
#sort = topological.sort(self.dependencies, postsort_actions)
#print "--------------"
#print "\ndependencies:", self.dependencies
#print "\ncycles:", self.cycles
#print "\nsort:", list(sort)
#print "\nCOUNT OF POSTSORT ACTIONS", len(postsort_actions)
# execute
if self.cycles:
for set_ in topological.sort_as_subsets(
self.dependencies,
postsort_actions):
while set_:
n = set_.pop()
n.execute_aggregate(self, set_)
else:
for rec in topological.sort(
self.dependencies,
postsort_actions):
rec.execute(self)
def finalize_flush_changes(self):
"""mark processed objects as clean / deleted after a successful
flush().
this method is called within the flush() method after the
execute() method has succeeded and the transaction has been committed.
"""
states = set(self.states)
isdel = set(
s for (s, (isdelete, listonly)) in self.states.items()
if isdelete
)
other = states.difference(isdel)
self.session._remove_newly_deleted(isdel)
self.session._register_newly_persistent(other)
class IterateMappersMixin(object):
def _mappers(self, uow):
if self.fromparent:
return iter(
m for m in
self.dependency_processor.parent.self_and_descendants
if uow._mapper_for_dep[(m, self.dependency_processor)]
)
else:
return self.dependency_processor.mapper.self_and_descendants
class Preprocess(IterateMappersMixin):
def __init__(self, dependency_processor, fromparent):
self.dependency_processor = dependency_processor
self.fromparent = fromparent
self.processed = set()
self.setup_flush_actions = False
def execute(self, uow):
delete_states = set()
save_states = set()
for mapper in self._mappers(uow):
for state in uow.mappers[mapper].difference(self.processed):
(isdelete, listonly) = uow.states[state]
if not listonly:
if isdelete:
delete_states.add(state)
else:
save_states.add(state)
if delete_states:
self.dependency_processor.presort_deletes(uow, delete_states)
self.processed.update(delete_states)
if save_states:
self.dependency_processor.presort_saves(uow, save_states)
self.processed.update(save_states)
if (delete_states or save_states):
if not self.setup_flush_actions and (
self.dependency_processor.\
prop_has_changes(uow, delete_states, True) or
self.dependency_processor.\
prop_has_changes(uow, save_states, False)
):
self.dependency_processor.per_property_flush_actions(uow)
self.setup_flush_actions = True
return True
else:
return False
class PostSortRec(object):
disabled = False
def __new__(cls, uow, *args):
key = (cls, ) + args
if key in uow.postsort_actions:
return uow.postsort_actions[key]
else:
uow.postsort_actions[key] = \
ret = \
object.__new__(cls)
return ret
def execute_aggregate(self, uow, recs):
self.execute(uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
",".join(str(x) for x in self.__dict__.values())
)
class ProcessAll(IterateMappersMixin, PostSortRec):
def __init__(self, uow, dependency_processor, delete, fromparent):
self.dependency_processor = dependency_processor
self.delete = delete
self.fromparent = fromparent
uow.deps[dependency_processor.parent.base_mapper].\
add(dependency_processor)
def execute(self, uow):
states = self._elements(uow)
if self.delete:
self.dependency_processor.process_deletes(uow, states)
else:
self.dependency_processor.process_saves(uow, states)
def per_state_flush_actions(self, uow):
# this is handled by SaveUpdateAll and DeleteAll,
# since a ProcessAll should unconditionally be pulled
# into per-state if either the parent/child mappers
# are part of a cycle
return iter([])
def __repr__(self):
return "%s(%s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
self.delete
)
def _elements(self, uow):
for mapper in self._mappers(uow):
for state in uow.mappers[mapper]:
(isdelete, listonly) = uow.states[state]
if isdelete == self.delete and not listonly:
yield state
class IssuePostUpdate(PostSortRec):
def __init__(self, uow, mapper, isdelete):
self.mapper = mapper
self.isdelete = isdelete
def execute(self, uow):
states, cols = uow.post_update_states[self.mapper]
states = [s for s in states if uow.states[s][0] == self.isdelete]
persistence.post_update(self.mapper, states, uow, cols)
class SaveUpdateAll(PostSortRec):
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
persistence.save_obj(self.mapper,
uow.states_for_mapper_hierarchy(self.mapper, False, False),
uow
)
def per_state_flush_actions(self, uow):
states = list(uow.states_for_mapper_hierarchy(
self.mapper, False, False))
base_mapper = self.mapper.base_mapper
delete_all = DeleteAll(uow, base_mapper)
for state in states:
# keep saves before deletes -
# this ensures 'row switch' operations work
action = SaveUpdateState(uow, state, base_mapper)
uow.dependencies.add((action, delete_all))
yield action
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, False)
class DeleteAll(PostSortRec):
def __init__(self, uow, mapper):
self.mapper = mapper
assert mapper is mapper.base_mapper
def execute(self, uow):
persistence.delete_obj(self.mapper,
uow.states_for_mapper_hierarchy(self.mapper, True, False),
uow
)
def per_state_flush_actions(self, uow):
states = list(uow.states_for_mapper_hierarchy(
self.mapper, True, False))
base_mapper = self.mapper.base_mapper
save_all = SaveUpdateAll(uow, base_mapper)
for state in states:
# keep saves before deletes -
# this ensures 'row switch' operations work
action = DeleteState(uow, state, base_mapper)
uow.dependencies.add((save_all, action))
yield action
for dep in uow.deps[self.mapper]:
states_for_prop = uow.filter_states_for_dep(dep, states)
dep.per_state_flush_actions(uow, states_for_prop, True)
class ProcessState(PostSortRec):
def __init__(self, uow, dependency_processor, delete, state):
self.dependency_processor = dependency_processor
self.delete = delete
self.state = state
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
dependency_processor = self.dependency_processor
delete = self.delete
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.dependency_processor is dependency_processor and
r.delete is delete]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
if delete:
dependency_processor.process_deletes(uow, states)
else:
dependency_processor.process_saves(uow, states)
def __repr__(self):
return "%s(%s, %s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
orm_util.state_str(self.state),
self.delete
)
class SaveUpdateState(PostSortRec):
def __init__(self, uow, state, mapper):
self.state = state
self.mapper = mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.mapper is mapper]
recs.difference_update(our_recs)
persistence.save_obj(mapper,
[self.state] +
[r.state for r in our_recs],
uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
orm_util.state_str(self.state)
)
class DeleteState(PostSortRec):
def __init__(self, uow, state, mapper):
self.state = state
self.mapper = mapper
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
mapper = self.mapper
our_recs = [r for r in recs
if r.__class__ is cls_ and
r.mapper is mapper]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
persistence.delete_obj(mapper,
[s for s in states if uow.states[s][0]],
uow)
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
orm_util.state_str(self.state)
)
| |
#!/usr/bin/python
# Parses out 5 fields from eE formatted AIS data (MMSI, Lat, Lon, Date and Message Type), attaches a Unique ID based
# on the source file and the row number, an indication as to whether or not there was any problem in parsing the 5 AIS
# fields and, finally, the original CSV row. The output is tab delimited, and any characters with special meaning in
# the Postgres \copy command (i.e. \) are escaped. The data output for the CSV row are restricted to those fields
# appropriate for the message type, and excludes any fields not cited in the AIS definition for the indicated type.
# The fields are pipe-delimited rather than the original comma separated, double quote enclosed, format. Rows for
# which the message type is not available or incorrect are output with all field values. Note that vessel types
# are expected to be in text eqivalent form as the result of the NMEA parsing scipt (0_gpsd_ais_NM4_parsing.py) used.
from glob import glob
import sys
import os
import re
# Stackoverflow sourced short f'n for testing whether / not string values are numeric, modified from
# source to account for multiple number types.
# Type-value ranges for PG: http://www.postgresql.org/docs/9.1/static/datatype-numeric.html
def is_float(s):
try:
float(s)
return True
except ValueError:
return False
def is_integer(s):
try:
inttest = int(s)
#Ignores possibility of -2147483648, considered acceptable compromise for performance, value
#shouldn't exist in AIS data.
if abs(inttest) < 2147483648:
return True
else:
return False
except ValueError:
return False
# Usage string for the script.
USAGE_STRING = ("Usage: split_ONC_AIS_msg_type.py datafile_date outputfilename inputfilename \n\n"
"Parses out 5 basic fields (MMSI, Type, Lat, Lon, Date) from Terrestrial AIS data obtained from ONC's "
"online dmas.uvic.ca data service and pre-parsed / formatted, generates a unique line ID. Also tests that the basic fields "
"parse properly. Inserts the 7 generated fields (5 + ID, Flag as result of parse test) along with the original line, all in a "
"tab delimited output file. Uses OV (ONC, Venus) designation along with the datafile date to aid in generating the "
"appropriate unique ID, based on line number within the file. Designed to handle only messages 1,2,3,5,18\n")
# Array of message types with positional information.
POSITIONAL_MESSAGE_TYPES = [1, 2, 3, 18]
# If at least four arguments are not provided, display an usage message.
if (len(sys.argv) < 4):
# Adjust to print function / python3 CH 20180107 (Add parens)
print(USAGE_STRING)
quit()
# Retrieve the datafile date (as a component of the unique_id to be generated)
datafile_date = sys.argv[1]
# retrieve the output filename.
out_filename = sys.argv[2]
# Check the output file for existence before running.
if os.path.exists(out_filename):
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Error, output file exists: (" + out_filename + ") aborting.")
quit()
# Open the output file.
try:
out_records = open(out_filename, 'w')
except IOError:
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Error opening output file: " + out_filename + "\n")
quit()
# Retrieve the input filename.
in_filename = sys.argv[3]
# Check the input file for existence before running.
if( not os.path.exists(in_filename)):
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Error, input file does not exist: (" + in_filename + ") aborting.")
quit()
# Print a header line for each of the output files to be generated from the eE AIS data.
# Do not write out a header line, gets in the way of \copy - out_records.write("Unq_ID\tMMSI\tLongitude\tLatitude\tDate\tMsgType\tParseError\tAIS_CSV\n")
print("Processing: " + in_filename)
with open(in_filename,'r') as in_vessel_records:
# Calculate the length of the input filename string.
in_filename_len = len(in_filename)
# Calculate a unique ID prefix value based on the input filename. Prepend the source suffix (S or T), plus 'T'
# to indicate Taggart/Terrestrial. Also include provided datafile date (presumed to indicate the time span of the
# incoming datafile e.g yyyy or yyyymm or yyyymmdd).
unq_ID_prefix = "OV" + datafile_date + "_"
#CCCCC
# Adjust to print function / python3 CH 20180107 (Add parens)
print("unq_ID_prefix: " + unq_ID_prefix)
# Reset a counter into the input file.
in_line_counter = 0
for line in in_vessel_records:
# Split the input line on pipe characters (output from pre-parsing step)
tokenizedline = line.strip().split('|')
# Initialize a flag indicating whether or not the base fields from record were found to be parseable.
parse_error_flag = False
# Obtain the string containing the message type.
str_msg_type = tokenizedline[1]
# Attempt to obtain the message type as an integer from the second token returned by the split operation.
try:
input_msg_type = int(str_msg_type)
#If the message type is not in the expected sest (1,2,3,5,18,27), update the value to null and set the parse error flag for the row.
if(not input_msg_type in (1,2,3,5,18,27)):
#CCC Debug
print("Message type parse error.(" + unq_ID_prefix + str(in_line_counter) + ": " + str_msg_type + ")\n")
# Had to create raw string to avoid Unicode error in Python 3 CH20180107 str_msg_type = "\N"
str_msg_type = r"\N"
parse_error_flag = True
# If the value for message type cannot be parsed into an integer, set the value to null and set the parse error flag for the row.
except ValueError:
#CCC Debug
print("Message type parse error.(" + unq_ID_prefix + str(in_line_counter) + ": " + str_msg_type + ")")
# Had to create raw string to avoid Unicode error in Python 3 CH20180107 str_msg_type = "\N"
str_msg_type = r"\N"
parse_error_flag = True
# If the message type suggests that longitude and latitude fields should be present, verify that the values are actually coordinates.
if(input_msg_type in POSITIONAL_MESSAGE_TYPES):
longitude_string = tokenizedline[8]
latitude_string = tokenizedline[9]
# If either of the coordinates are not parseable as floating point numbers, check to see if they're just
# improperly formatted exponenets (e.g. "1.0E2.0" -- trailing .0 is superfluous and wrong {unless the
# system supports fractional powers of 10}, which python doesn't) -- either fix the value, or set it to
# null.
if (not(is_float(longitude_string))):
suffix_search = re.search('([-]{0,1}[0-9]+[\.]{0,1}E[+-]{0,1}[0-9]+)(\.[0-9]+)\Z',longitude_string)
if(suffix_search is None):
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Longitude parse error.(" + unq_ID_prefix + str(in_line_counter) + ": " + longitude_string + ")")
# Had to create raw string to avoid Unicode error in Python 3 longitude_string = "\N"
longitude_string = r"\N"
parse_error_flag = True
else:
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Translating: " + longitude_string + " to: " + suffix_search.group(1))
longitude_string = suffix_search.group(1)
if (not(is_float(latitude_string))):
suffix_search = re.search('([-]{0,1}[0-9]+[\.]{0,1}E[+-]{0,1}[0-9]+)(\.[0-9]+)\Z',latitude_string)
if(suffix_search is None):
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Latitude parse error.(" + unq_ID_prefix + str(in_line_counter) + ": " + latitude_string + ")")
# Had to create raw string to avoid Unicode error in Python 3 CH20180107 latitude_string = "\N"
latitude_string = r"\N"
parse_error_flag = True
else:
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Translating: " + latitude_string + " to: " + suffix_search.group(1))
latitude_string = suffix_search.group(1)
# Attempt to set coordinate values for all non-positional message types to \N
else:
# Had to create raw strings to avoid Unicode error in Python 3 CH 20180107
longitude_string = r"\N"
latitude_string = r"\N"
# If the date value is not of the expected length, or if it is, but has unexpected non-numeric components, set the parse error
#flag and insert a null in place of the date.
raw_date_string = tokenizedline[0]
# Expected format: YYYYMMDDThhmmss.000Z
if(len(raw_date_string) != 20):
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Date string parse error.(" + unq_ID_prefix + str(in_line_counter) + ": " + raw_date_string + ")")
parse_error_flag = True
# Had to create raw string to avoid Unicode error in Python 3 CH 20180107 parsed_date_string = "\N"
parsed_date_string = r"\N"
elif (not(is_integer(raw_date_string[0:8])) or not(is_integer(raw_date_string[9:15]))):
#CCC Debug
print("Date string parse error.(" + unq_ID_prefix + str(in_line_counter) + ": " + raw_date_string + ")")
parse_error_flag = True
# Had to create raw string to avoid Unicode error in Python 3 CH 20180107 parsed_date_string = "\N"
parsed_date_string = r"\N"
# If the date is ok, construct a Postgres-acceptable timestamp from the date_string value.
#e.g 20141001T000005 -> 2014-10-01 00:00:05
else:
parsed_date_string = raw_date_string[0:4] + "-" + raw_date_string[4:6] + "-" + raw_date_string[6:8] + " " + raw_date_string[9:11] + ":" + raw_date_string[11:13] + ":" + raw_date_string[13:15]
# If the MMSI is non numeric, set the parse error flag and insert a null in place of the MMSI.
MMSI_string = tokenizedline[3]
if(not(is_integer(MMSI_string))):
#CCC Debug
print("MMSI parse error.(" + unq_ID_prefix + str(in_line_counter) + ": " + MMSI_string + ")")
parse_error_flag = True
# Had to create raw string to avoid Unicode error in Python 3 CH 20180107 MMSI_string = "\N"
MMSI_string = r"\N"
# Output tokenized raw fields according to the message type observed, escape any backslashes in the input line.
#1_2_3
if(input_msg_type in (1, 2, 3)):
"""Expecting
MMSI 3
Message_ID 1
Repeat_indicator 2
Time 0
Millisecond -
Region -
Country -
Base_station -
Online_data -
Group_code -
Sequence_ID -
Channel -
Data_length -
Navigational_status 4
ROT 5
SOG 6
Accuracy 7
Longitude 8
Latitude 9
COG 10
Heading 11
Maneuver 13
RAIM_flag 14
Communication_state 15
UTC_second 12
spare -
"""
if(len(tokenizedline) < 16):
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Parse error, invalid number of tokens in input line.\n Line: " + str(in_line_counter) + " - " + line.strip())
parse_error_flag = True
PG_safe_line = line.strip().replace("\\","\\\\")
# Had to create raw strings to avoid Unicode error in Python 3 CH 20180107 out_records.write(unq_ID_prefix + str(in_line_counter) + "\t" + "\N" + "\t" + "\N" + "\t" + "\N" + "\t" + "\N" + "\t" + "\N" + "\t" + str(int(parse_error_flag)) + "\t" + PG_safe_line.strip() + "\n")
out_records.write(unq_ID_prefix + str(in_line_counter) + "\t" + r"\N" + "\t" + r"\N" + "\t" + r"\N" + "\t" + r"\N" + "\t" + r"\N" + "\t" + str(int(parse_error_flag)) + "\t" + PG_safe_line.strip() + "\n")
continue
else:
# Original eE data PG_safe_line = (tokenizedline[0] + "|" + tokenizedline[1] + "|" + tokenizedline[2] + "|" + tokenizedline[3] + "|" + tokenizedline[4] + "|" + tokenizedline[5] + "|" + tokenizedline[6] + "|" + tokenizedline[7] + "|" + tokenizedline[8] + "|" + tokenizedline[9] + "|" + tokenizedline[10] + "|" + tokenizedline[11] + "|" + tokenizedline[12] + "|" + tokenizedline[24] + "|" + tokenizedline[25] + "|" + tokenizedline[26] + "|" + tokenizedline[27] + "|" + tokenizedline[28] + "|" + tokenizedline[29] + "|" + tokenizedline[30] + "|" + tokenizedline[31] + "|" + tokenizedline[33] + "|" + tokenizedline[34] + "|" + tokenizedline[36] + "|" + tokenizedline[42] + "|" + tokenizedline[135] + "\n").replace("\\","\\\\")
PG_safe_line = (tokenizedline[3] + "|" + tokenizedline[1] + "|" + tokenizedline[2] + "|" + tokenizedline[0] + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + tokenizedline[4] + "|" + tokenizedline[5] + "|" + tokenizedline[6] + "|" + tokenizedline[7] + "|" + tokenizedline[8] + "|" + tokenizedline[9] + "|" + tokenizedline[10] + "|" + tokenizedline[11] + "|" + tokenizedline[13] + "|" + tokenizedline[14] + "|" + tokenizedline[15] + "|" + tokenizedline[12] + "|" + "" + "\n").replace("\\","\\\\")
#5
elif(input_msg_type == 5):
"""
Expecting:
MMSI 3
Message_ID 1
Repeat_indicator 2
Time 0
Millisecond -
Region -
Country -
Base_station -
Online_data -
Group_code -
Sequence_ID -
Channel -
Data_length -
Vessel_Name 7
Call_sign 6
IMO 5
Ship_Type 8
Dimension_to_Bow 9
Dimension_to_stern 10
Dimension_to_port 11
Dimension_to_starboard 12
Draught 18
Destination 19
AIS_version 4
Fixing_device 13
Transmission_control -
ETA_month 14
ETA_day 15
ETA_hour 16
ETA_minute 17
Sequence -
Data_terminal 20
Mode -
spare -
spare2 -
"""
if(len(tokenizedline) < 21):
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Parse error, invalid number of tokens in input line.\n Line: " + str(in_line_counter) + " - " + line.strip())
parse_error_flag = True
PG_safe_line = line.strip().replace("\\","\\\\")
# Had to create raw strings to avoid Unicode error in Python 3 CH 20180107 out_records.write(unq_ID_prefix + str(in_line_counter) + "\t" + "\N" + "\t" + "\N" + "\t" + "\N" + "\t" + "\N" + "\t" + "\N" + "\t" + str(int(parse_error_flag)) + "\t" + PG_safe_line.strip() + "\n")
out_records.write(unq_ID_prefix + str(in_line_counter) + "\t" + r"\N" + "\t" + r"\N" + "\t" + r"\N" + "\t" + r"\N" + "\t" + r"\N" + "\t" + str(int(parse_error_flag)) + "\t" + PG_safe_line.strip() + "\n")
continue
else:
# Original eE data PG_safe_line = (tokenizedline[0] + "|" + tokenizedline[1] + "|" + tokenizedline[2] + "|" + tokenizedline[3] + "|" + tokenizedline[4] + "|" + tokenizedline[5] + "|" + tokenizedline[6] + "|" + tokenizedline[7] + "|" + tokenizedline[8] + "|" + tokenizedline[9] + "|" + tokenizedline[10] + "|" + tokenizedline[11] + "|" + tokenizedline[12] + "|" + tokenizedline[13] + "|" + tokenizedline[14] + "|" + tokenizedline[15] + "|" + tokenizedline[16] + "|" + tokenizedline[17] + "|" + tokenizedline[18] + "|" + tokenizedline[19] + "|" + tokenizedline[20] + "|" + tokenizedline[21] + "|" + tokenizedline[22] + "|" + tokenizedline[23] + "|" + tokenizedline[43] + "|" + tokenizedline[44] + "|" + tokenizedline[45] + "|" + tokenizedline[46] + "|" + tokenizedline[47] + "|" + tokenizedline[48] + "|" + tokenizedline[49] + "|" + tokenizedline[65] + "|" + tokenizedline[67] + "|" + tokenizedline[135] + "|" + tokenizedline[136] + "\n").replace("\\","\\\\")
PG_safe_line = (tokenizedline[3] + "|" + tokenizedline[1] + "|" + tokenizedline[2] + "|" + tokenizedline[0] + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + tokenizedline[7] + "|" + tokenizedline[6] + "|" + tokenizedline[5] + "|" + tokenizedline[8] + "|" + tokenizedline[9] + "|" + tokenizedline[10] + "|" + tokenizedline[11] + "|" + tokenizedline[12] + "|" + tokenizedline[18] + "|" + tokenizedline[19] + "|" + tokenizedline[4] + "|" + tokenizedline[13] + "|" + "" + "|" + tokenizedline[14] + "|" + tokenizedline[15] + "|" + tokenizedline[16] + "|" + tokenizedline[17] + "|" + "" + "|" + tokenizedline[20] + "|" + "" + "|" + "" + "|" + "" + "\n").replace("\\","\\\\")
elif(input_msg_type == 18):
if(len(tokenizedline) < 21):
# Adjust to print function / python3 CH 20180107 (Add parens)
print("Parse error, invalid number of tokens in input line.\n Line: " + str(in_line_counter) + " - " + line.strip())
parse_error_flag = True
PG_safe_line = line.strip().replace("\\","\\\\")
# Had to create raw strings to avoid Unicode error in Python 3 CH 20180107 out_records.write(unq_ID_prefix + str(in_line_counter) + "\t" + "\N" + "\t" + "\N" + "\t" + "\N" + "\t" + "\N" + "\t" + "\N" + "\t" + str(int(parse_error_flag)) + "\t" + PG_safe_line.strip() + "\n")
out_records.write(unq_ID_prefix + str(in_line_counter) + "\t" + r"\N" + "\t" + r"\N" + "\t" + r"\N" + "\t" + r"\N" + "\t" + r"\N" + "\t" + str(int(parse_error_flag)) + "\t" + PG_safe_line.strip() + "\n")
continue
else:
PG_safe_line = (tokenizedline[3] + "|" + tokenizedline[1] + "|" + tokenizedline[2] + "|" + tokenizedline[0] + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + "" + "|" + tokenizedline[5] + "|" + tokenizedline[6] + "|" + tokenizedline[7] + "|" + tokenizedline[8] + "|" + tokenizedline[9] + "|" + tokenizedline[10] + "|" + "" + "|" + tokenizedline[19] + "|" + "" + "|" + tokenizedline[20] + "|" + "|" + tokenizedline[11] + "" + "|" + "" + "|" + "|" + tokenizedline[18] + "|" + tokenizedline[13] + "|" + tokenizedline[14] + "|" + tokenizedline[15] + "|" + tokenizedline[16] + "|" + tokenizedline[17] + "|" + tokenizedline[4] + "|" + tokenizedline[12] + "\n").replace("\\","\\\\")
else:
print("Parse warning, unhandled message type, skipping \n Line: " + str(in_line_counter) + " - " + line.strip())
continue
# Write the current line to output, formatted for ingest into Postgres.
out_records.write(unq_ID_prefix + str(in_line_counter) + "\t" + MMSI_string + "\t" + longitude_string + "\t" + latitude_string + "\t" + parsed_date_string + "\t" + str_msg_type + "\t" + str(int(parse_error_flag)) + "\t" + PG_safe_line)
# Increment the current input line counter.
in_line_counter += 1
# Close the output file.
out_records.close
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.