code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ReviewedVersionCommit'
db.create_table('review_queue_reviewedversioncommit', (
('versioncommit_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['widgy.VersionCommit'], unique=True, primary_key=True)),
('approved_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', null=True, on_delete=models.PROTECT, to=orm[user_orm_label])),
('approved_at', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
))
db.send_create_signal('review_queue', ['ReviewedVersionCommit'])
def backwards(self, orm):
# Deleting model 'ReviewedVersionCommit'
db.delete_table('review_queue_reviewedversioncommit')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'review_queue.reviewedversioncommit': {
'Meta': {'object_name': 'ReviewedVersionCommit', '_ormbases': ['widgy.VersionCommit']},
'approved_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'approved_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': "orm['%s']" % user_model_label}),
'versioncommit_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['widgy.VersionCommit']", 'unique': 'True', 'primary_key': 'True'})
},
'widgy.node': {
'Meta': {'unique_together': "[('content_type', 'content_id')]", 'object_name': 'Node'},
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_frozen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'widgy.versioncommit': {
'Meta': {'object_name': 'VersionCommit'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_model_label, 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['widgy.VersionCommit']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'publish_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'root_node': ('widgy.db.fields.WidgyField', [], {'to': "orm['widgy.Node']", 'null': 'True', 'on_delete': 'models.PROTECT', 'blank': 'True'}),
'tracker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commits'", 'to': "orm['widgy.VersionTracker']"})
},
'widgy.versiontracker': {
'Meta': {'object_name': 'VersionTracker'},
'head': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['widgy.VersionCommit']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.PROTECT'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'working_copy': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['widgy.Node']", 'unique': 'True', 'on_delete': 'models.PROTECT'})
}
}
complete_apps = ['review_queue']
|
j00bar/django-widgy
|
widgy/contrib/review_queue/south_migrations/0001_initial.py
|
Python
|
apache-2.0
| 7,453
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import WebRiskServiceV1Beta1Client
from .async_client import WebRiskServiceV1Beta1AsyncClient
__all__ = (
"WebRiskServiceV1Beta1Client",
"WebRiskServiceV1Beta1AsyncClient",
)
|
googleapis/python-webrisk
|
google/cloud/webrisk_v1beta1/services/web_risk_service_v1_beta1/__init__.py
|
Python
|
apache-2.0
| 797
|
# stdlib imports
# django imports
from django import forms
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.forms import widgets
from django.forms.fields import MultipleChoiceField
from django.forms.widgets import Textarea
# third-party imports
from nocaptcha_recaptcha.fields import NoReCaptchaField
from nocaptcha_recaptcha.widgets import NoReCaptchaWidget
# project imports
from dbdb.core.models import CitationUrl
from dbdb.core.models import Feature
from dbdb.core.models import FeatureOption
from dbdb.core.models import System
from dbdb.core.models import SystemVersion
from dbdb.core.models import SystemVersionMetadata
# widgets
class InvisibleReCaptchaWidget(NoReCaptchaWidget):
template = getattr(settings, 'INVISIBLE_RECAPTCHA_WIDGET_TEMPLATE', 'nocaptcha_recaptcha/widget.html')
# fields
class TagFieldM2M(MultipleChoiceField):
widget = forms.TextInput(attrs={'data-role': 'tagsinput', 'placeholder': ''})
def prepare_value(self, value):
try:
return ','.join([x.url for x in value])
except (AttributeError, TypeError):
if value is not None:
return value.split(',')
return ''
def clean(self, value):
if value:
urls = value.split(',')
else:
urls = []
url_objs = []
for url in urls:
cit_url, _ = CitationUrl.objects.get_or_create(url=url)
url_objs.append(cit_url)
return url_objs
pass
# forms
class SystemFeaturesForm(forms.Form):
def __init__(self, *args, **kwargs):
try:
features = kwargs.pop('features')
except KeyError:
features = []
super(SystemFeaturesForm, self).__init__(*args, **kwargs)
initial = {}
for feature in features:
o = feature.options.values_list('value', flat=True)
if len(o) > 1:
o = list(o)
elif len(o) == 1:
o = o[0]
else:
o = None
initial[feature.feature.label] = {
'options': o,
'description': feature.description,
'citations': ','.join(feature.citations.values_list('url', flat=True))
}
pass
features = Feature.objects.all()
self.features = []
for feature in features:
initial_value = None
if feature.multivalued:
if feature.label in initial:
initial_value = initial[feature.label]['options']
self.fields[feature.label+'_choices'] = forms.MultipleChoiceField(
choices=(
(x, x) for x in FeatureOption.objects.filter(feature=feature).order_by('value')
),
initial=initial_value,
required=False
)
pass
else:
if feature.label in initial:
initial_value = initial[feature.label]['options']
self.fields[feature.label+'_choices'] = forms.ChoiceField(
choices=(
(x, x) for x in FeatureOption.objects.filter(feature=feature).order_by('value')
),
initial=initial_value,
required=False
)
pass
initial_desc = None
initial_cit = None
if feature.label in initial:
initial_desc = initial[feature.label]['description']
initial_cit = initial[feature.label]['citations']
pass
self.fields[feature.label+'_description'] = forms.CharField(
label='Description',
help_text="This field supports Markdown Syntax",
widget=widgets.Textarea(),
initial=initial_desc,
required=False
)
self.fields[feature.label+'_citation'] = forms.CharField(
label='Citations',
help_text="Separate the urls with commas",
widget=widgets.TextInput(attrs={'data-role': 'tagsinput', 'placeholder': ''}),
initial=initial_cit,
required=False
)
self.fields[feature.label+'_choices'].feature_id = feature.id
self.fields[feature.label+'_description'].feature_id = feature.id
self.fields[feature.label+'_citation'].feature_id = feature.id
pass
return
pass
# model forms
class CreateUserForm(forms.ModelForm):
email = forms.EmailField(max_length=254, required=True)
password = forms.CharField(max_length=128, label='Password', widget=widgets.PasswordInput)
password2 = forms.CharField(max_length=128, label='Password Confirmation', widget=widgets.PasswordInput)
captcha = NoReCaptchaField(
gtag_attrs={
'callback': 'onCaptchaSubmit', # name of JavaScript callback function
'bind': 'btn_submit' # submit button's ID in the form template
},
widget=InvisibleReCaptchaWidget
)
def __init__(self, *args, **kwargs):
super(CreateUserForm, self).__init__(*args, **kwargs)
self.initial_email = None
initial = getattr(self, 'initial', None)
if initial and 'email' in initial and initial['email']:
self.initial_email = initial['email']
self.fields['email'].widget.attrs['readonly'] = True
pass
return
def clean_email(self):
if self.initial_email:
return self.initial_email
return self.cleaned_data['email']
def clean_password2(self):
if self.cleaned_data['password2'] == self.cleaned_data['password']:
return self.cleaned_data['password2']
raise ValidationError("The passwords do not match")
class Meta:
model = get_user_model()
fields = ['username', 'email', 'password', 'password2', 'captcha']
pass
class SystemForm(forms.ModelForm):
# This is only shown to non-superusers
orig_name = forms.CharField(max_length=128, label="Name", disabled=True, required=False)
class Meta:
model = System
fields = ['name','orig_name']
pass
class SystemVersionEditForm(forms.ModelForm):
description_citations = TagFieldM2M(
help_text="Separate the urls with commas",
required=False
)
history_citations = TagFieldM2M(
help_text="Separate the urls with commas",
required=False
)
start_year_citations = TagFieldM2M(
help_text="Separate the urls with commas",
required=False
)
end_year_citations = TagFieldM2M(
help_text="Separate the urls with commas",
required=False
)
acquired_by_citations = TagFieldM2M(
help_text="Separate the urls with commas",
required=False
)
class Meta:
model = SystemVersion
fields = [
'logo',
'description',
'description_citations',
'history',
'history_citations',
'url',
'source_url',
'tech_docs',
'wikipedia_url',
'developer',
'start_year',
'start_year_citations',
'end_year',
'end_year_citations',
'acquired_by',
'acquired_by_citations',
'project_types',
'countries',
'former_names',
'comment'
]
pass
class SystemVersionForm(forms.ModelForm):
description_citations = TagFieldM2M(
help_text="Separate the urls with commas",
required=False
)
history_citations = TagFieldM2M(
help_text="Separate the urls with commas",
required=False
)
start_year_citations = TagFieldM2M(
help_text="Separate the urls with commas",
required=False
)
end_year_citations = TagFieldM2M(
help_text="Separate the urls with commas",
required=False
)
acquired_by_citations = TagFieldM2M(
help_text="Separate the urls with commas",
required=False
)
class Meta:
model = SystemVersion
fields = [
'logo',
'description',
'description_citations',
'history',
'history_citations',
'url',
'source_url',
'tech_docs',
'wikipedia_url',
'developer',
'start_year',
'start_year_citations',
'end_year',
'end_year_citations',
'acquired_by',
'acquired_by_citations',
'project_types',
'countries',
'former_names',
]
pass
class SystemVersionMetadataForm(forms.ModelForm):
class Meta:
model = SystemVersionMetadata
exclude = ['system']
pass
|
cmu-db/dbms-library
|
dbdb/core/forms.py
|
Python
|
apache-2.0
| 9,094
|
import pyximport; pyximport.install()
import subiterations
import numpy as np
import scipy.ndimage as nd
from scipy.ndimage import imread
from scipy.misc import imsave, toimage
from scipy.stats import threshold
from itertools import izip
from multiprocessing import Process, Manager, cpu_count
from threading import Thread
from scipy.ndimage.morphology import grey_closing
import math
skeleton_images_path = "skeleton_images/"
class GrayscaleSkeleton:
def __init__(self):
pass
def skeletonize(self, image):
image = grey_closing(image, footprint=circle(8), mode='constant', cval=0.0)
image = add_zero_mat(image)
prev_binary_image = np.zeros_like(image)
image_bit_depth = (image.dtype.itemsize * 8) / 2
print "image_bit_depth: " + str(image_bit_depth)
#image_thresholds = range(2**image_bit_depth,-1,-16)
image_thresholds = [2**x for x in range(image_bit_depth, 3, -1)] + range(15, 0, -1)
print "image_thresholds: " + str(image_thresholds)
for curr_threshold in image_thresholds:
print "curr_threshold: " + str(curr_threshold)
curr_thresh_image = threshold(image, curr_threshold)
curr_binary_image = curr_thresh_image.astype(np.bool).astype(np.int)
imsave(skeleton_images_path + "binary_" + str(curr_threshold) + ".png", curr_binary_image)
curr_sum_image = (prev_binary_image + curr_binary_image)
curr_skeleton_image = self.thin_pixels(curr_sum_image)
imsave(skeleton_images_path + "skeleton_" + str(curr_threshold) + ".png", curr_skeleton_image)
print "curr_skeleton max: " + str(curr_skeleton_image.max())
prev_binary_image = curr_skeleton_image
return remove_zero_mat(prev_binary_image)
def thin_pixels(self, image):
pixel_removed = True
neighbors = nd.convolve((image>0).astype(np.int),[[1,1,1],[1,0,1],[1,1,1]],mode='constant',cval=0.0)
fg_pixels = np.where((image==1) & (neighbors >= 2) & (neighbors <= 6))
check_pixels = zip(fg_pixels[0], fg_pixels[1])
while len(check_pixels)>0:
print len(check_pixels)
(image, sub1_check_pixels) = self.parallel_sub(subiterations.first_subiteration, image, check_pixels)
(image, sub2_check_pixels) = self.parallel_sub(subiterations.second_subiteration, image, list(set(check_pixels+sub1_check_pixels)))
check_pixels=list(set(sub1_check_pixels+sub2_check_pixels))
neighbors = nd.convolve(image>0,[[1,1,1],[1,0,1],[1,1,1]],mode='constant',cval=0.0)
fg_pixels = np.where(image==1)
check_pixels = zip(fg_pixels[0],fg_pixels[1])
(image, _) = self.parallel_sub(self.empty_pools, image, check_pixels)
return image
def parallel_sub(self, sub_function, image, fg_pixels):
manager = Manager()
queue = manager.Queue()
next_queue = manager.Queue()
num_procs = int(math.ceil(float(cpu_count()) * 0.75))
workload_size = int(math.ceil(float(len(fg_pixels)) / float(num_procs)))
process_list = []
if len(fg_pixels) == 0:
return (image, [])
(zero_pixels, next_pixels) = sub_function(image,fg_pixels)
for (x,y) in zero_pixels:
image[x][y]=0;
return (image, next_pixels)
def PRE_first_subiteration(self, curr_image, fg_pixels):
zero_pixels = {}
next_pixels = {}
for (i, j) in fg_pixels:
if curr_image[i][j] != 1: continue
p2 = curr_image[i - 1][j]
p3 = curr_image[i - 1][j + 1]
p4 = curr_image[i][j + 1]
p5 = curr_image[i + 1][j + 1]
p6 = curr_image[i + 1][j]
p7 = curr_image[i + 1][j - 1]
p8 = curr_image[i][j - 1]
p9 = curr_image[i - 1][j - 1]
if (2 <= (bool(p2) + bool(p3) + bool(p4) + bool(p5) + bool(p6) + bool(p7) + bool(p8) + bool(p9)) <= 6 and
(p2 * p4 * p6 == 0) and
(p4 * p6 * p8 == 0)):
if ((not p2 and p3) + (not p3 and p4) + (not p4 and p5) + (not p5 and p6) + (not p6 and p7) + (not p7 and p8) + (not p8 and p9) + (not p9 and p2) == 1):
zero_pixels[(i,j)] = 0
if p2 == 1:
next_pixels[(i-1,j)]=0
if p3 == 1:
next_pixels[(i-1,j+1)]=0
if p4 == 1:
next_pixels[(i,j+1)]=0
if p5 == 1:
next_pixels[(i+1,j+1)]=0
if p6 == 1:
next_pixels[(i+1,j)]=0
if p7 == 1:
next_pixels[(i+1,j-1)]=0
if p8 == 1:
next_pixels[(i,j-1)]=0
if p9 == 1:
next_pixels[(i-1,j-1)]=0
return zero_pixels.keys(), next_pixels.keys()
def PRE_second_subiteration(self, curr_image, fg_pixels):
zero_pixels = {}
next_pixels = {}
for (i, j) in fg_pixels:
if curr_image[i][j] != 1: continue
p2 = curr_image[i - 1][j]
p3 = curr_image[i - 1][j + 1]
p4 = curr_image[i][j + 1]
p5 = curr_image[i + 1][j + 1]
p6 = curr_image[i + 1][j]
p7 = curr_image[i + 1][j - 1]
p8 = curr_image[i][j - 1]
p9 = curr_image[i - 1][j - 1]
if (2 <= (bool(p2) + bool(p3) + bool(p4) + bool(p5) + bool(p6) + bool(p7) + bool(p8) + bool(p9)) <= 6 and
(p2 * p4 * p8 == 0) and
(p2 * p6 * p8 == 0)):
if ((not p2 and p3) + (not p3 and p4) + (not p4 and p5) + (not p5 and p6) + (not p6 and p7) + (not p7 and p8) + (not p8 and p9) + (not p9 and p2) == 1):
zero_pixels[(i,j)] = 0
if p2 == 1:
next_pixels[(i-1,j)]=0
if p3 == 1:
next_pixels[(i-1,j+1)]=0
if p4 == 1:
next_pixels[(i,j+1)]=0
if p5 == 1:
next_pixels[(i+1,j+1)]=0
if p6 == 1:
next_pixels[(i+1,j)]=0
if p7 == 1:
next_pixels[(i+1,j-1)]=0
if p8 == 1:
next_pixels[(i,j-1)]=0
if p9 == 1:
next_pixels[(i-1,j-1)]=0
return zero_pixels.keys(), next_pixels.keys()
def empty_pools(self, curr_image, fg_pixels):
zero_pixels = {}
for (i, j) in fg_pixels:
p2 = curr_image[i - 1][j]
p3 = curr_image[i - 1][j + 1]
p4 = curr_image[i][j + 1]
p5 = curr_image[i + 1][j + 1]
p6 = curr_image[i + 1][j]
p7 = curr_image[i + 1][j - 1]
p8 = curr_image[i][j - 1]
p9 = curr_image[i - 1][j - 1]
if (bool(p2) + bool(p3) + bool(p4) + bool(p5) + bool(p6) + bool(p7) + bool(p8) + bool(p9) > 6):
zero_pixels[(i,j)] = 0
return zero_pixels,[]
#
# helper functions
#
def add_zero_mat(image):
num_rows, num_cols = image.shape
image = np.insert(image, num_rows, np.zeros(num_cols, dtype=np.int), 0)
image = np.insert(image, 0, np.zeros(num_cols, dtype=np.int), 0)
num_rows, num_cols = image.shape
image = np.insert(image, num_cols, np.zeros(num_rows, dtype=np.int), 1)
image = np.insert(image, 0, np.zeros(num_rows, dtype=np.int), 1)
return image
def remove_zero_mat(image):
num_rows, num_cols = image.shape
image = np.delete(image, num_rows - 1, 0)
image = np.delete(image, 0, 0)
image = np.delete(image, num_cols - 1, 1)
image = np.delete(image, 0, 1)
return image
def circle(radius):
x, y = np.mgrid[:(2 * radius) + 1, :(2 * radius) + 1]
circle = (x - radius) ** 2 + (y - radius) ** 2
return (circle <= (radius ** 2)).astype(np.int)
import sys, time
if __name__ == '__main__':
input_filename = str(sys.argv[1])
output_filename = str(sys.argv[2])
print "input filename: " + str(input_filename)
print "output filename: " + str(output_filename)
input_kde = imread(input_filename)
s = GrayscaleSkeleton()
start_time = time.time()
skeleton = s.skeletonize(input_kde)
print "total elapsed time: " + str(time.time() - start_time) + " seconds"
toimage(skeleton, cmin=0, cmax=255).save(output_filename)
|
Vanuan/gpx_to_road_map
|
biagoni2012/skeleton.py
|
Python
|
apache-2.0
| 8,440
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
Test.Summary = '''
Test transactions and sessions over http2, making sure they open and close in the proper order.
'''
Test.SkipUnless(
Condition.HasCurlFeature('http2')
)
# Define default ATS. Disable the cache to simplify the test.
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True, command="traffic_manager", enable_cache=False)
server = Test.MakeOriginServer("server")
server2 = Test.MakeOriginServer("server2")
Test.testName = ""
request_header = {"headers": "GET / HTTP/1.1\r\nHost: oc.test\r\n\r\n",
"timestamp": "1469733493.993", "body": ""}
# expected response from the origin server
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length:0\r\n\r\n",
"timestamp": "1469733493.993", "body": ""}
# add ssl materials like key, certificates for the server
ts.addDefaultSSLFiles()
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir,
'ssntxnorder_verify.so'), ts)
# add response to the server dictionary
server.addResponse("sessionfile.log", request_header, response_header)
ts.Disk.records_config.update({
'proxy.config.http2.zombie_debug_timeout_in': 10,
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'ssntxnorder_verify',
'proxy.config.cache.enable_read_while_writer': 0,
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
})
ts.Disk.remap_config.AddLine(
'map https://oc.test:{0} http://127.0.0.1:{1}'.format(
ts.Variables.ssl_port, server.Variables.Port)
)
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
cmd = 'curl -k --resolve oc.test:{0}:127.0.0.1 --http2 https://oc.test:{0}'.format(ts.Variables.ssl_port)
numberOfRequests = 100
tr = Test.AddTestRun()
# Create a bunch of curl commands to be executed in parallel. Default.Process is set in SpawnCommands.
# On Fedora 28/29, it seems that curl will occaisionally timeout after a couple seconds and return exitcode 2
# Examinig the packet capture shows that Traffic Server dutifully sends the response
ps = tr.SpawnCommands(cmdstr=cmd, count=numberOfRequests, retcode=Any(0, 2))
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.ReturnCode = Any(0, 2)
# Execution order is: ts/server, ps(curl cmds), Default Process.
tr.Processes.Default.StartBefore(
server, ready=When.PortOpen(server.Variables.Port))
tr.Processes.Default.StartBefore(Test.Processes.ts)
# Don't know why we need both the start before and the start after
ts.StartAfter(*ps)
server.StartAfter(*ps)
tr.StillRunningAfter = ts
# Signal that all the curl processes have completed
tr = Test.AddTestRun("Curl Done")
tr.DelayStart = 2 # Delaying a couple seconds to make sure the global continuation's lock contention resolves.
tr.Processes.Default.Command = "traffic_ctl plugin msg done done"
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Env = ts.Env
tr.StillRunningAfter = ts
# Parking this as a ready tester on a meaningless process
# To stall the test runs that check for the stats until the
# stats have propagated and are ready to read.
def make_done_stat_ready(tsenv):
def done_stat_ready(process, hasRunFor, **kw):
retval = subprocess.run("traffic_ctl metric get ssntxnorder_verify.test.done > done 2> /dev/null", shell=True, env=tsenv)
if retval.returncode == 0:
retval = subprocess.run("grep 1 done > /dev/null", shell=True, env=tsenv)
return retval.returncode == 0
return done_stat_ready
# number of sessions/transactions opened and closed are equal
tr = Test.AddTestRun("Check Ssn order errors")
server2.StartupTimeout = 60
# Again, here the imporant thing is the ready function not the server2 process
tr.Processes.Default.StartBefore(server2, ready=make_done_stat_ready(ts.Env))
tr.Processes.Default.Command = 'traffic_ctl metric get ssntxnorder_verify.err'
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Streams.All = Testers.ContainsExpression(
'ssntxnorder_verify.err 0', 'incorrect statistic return, or possible error.')
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
comparator_command = '''
if test "`traffic_ctl metric get ssntxnorder_verify.{0}.start | cut -d ' ' -f 2`" -eq "`traffic_ctl metric get ssntxnorder_verify.{0}.close | cut -d ' ' -f 2`" ; then\
echo yes;\
else \
echo no; \
fi; \
traffic_ctl metric match ssntxnorder_verify
'''
# number of sessions/transactions opened and closed are equal
tr = Test.AddTestRun("Check for ssn open/close")
tr.Processes.Default.Command = comparator_command.format('ssn')
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression(
"yes", 'should verify contents')
tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression(
"ssntxnorder_verify.ssn.start 0", 'should be nonzero')
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr = Test.AddTestRun("Check for txn/open/close")
tr.Processes.Default.Command = comparator_command.format('txn')
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression(
"yes", 'should verify contents')
tr.Processes.Default.Streams.stdout += Testers.ExcludesExpression(
"ssntxnorder_verify.txn.start 0", 'should be nonzero')
# and we receive the same number of transactions as we asked it to make
tr.Processes.Default.Streams.stdout += Testers.ContainsExpression(
"ssntxnorder_verify.txn.start {}".format(numberOfRequests), 'should be the number of transactions we made')
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
|
pbchou/trafficserver
|
tests/gold_tests/continuations/openclose_h2.test.py
|
Python
|
apache-2.0
| 6,696
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import logging
import math
from datetime import datetime, timedelta
from typing import Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from flask_babel import gettext as _
from superset import app, cache, db, security_manager
from superset.common.query_object import QueryObject
from superset.connectors.base.models import BaseDatasource
from superset.connectors.connector_registry import ConnectorRegistry
from superset.exceptions import QueryObjectValidationError
from superset.stats_logger import BaseStatsLogger
from superset.utils import core as utils
from superset.utils.core import DTTM_ALIAS
from superset.viz import set_and_log_cache
config = app.config
stats_logger: BaseStatsLogger = config["STATS_LOGGER"]
logger = logging.getLogger(__name__)
class QueryContext:
"""
The query context contains the query object and additional fields necessary
to retrieve the data payload for a given viz.
"""
cache_type: ClassVar[str] = "df"
enforce_numerical_metrics: ClassVar[bool] = True
datasource: BaseDatasource
queries: List[QueryObject]
force: bool
custom_cache_timeout: Optional[int]
result_type: utils.ChartDataResultType
result_format: utils.ChartDataResultFormat
# TODO: Type datasource and query_object dictionary with TypedDict when it becomes
# a vanilla python type https://github.com/python/mypy/issues/5288
def __init__( # pylint: disable=too-many-arguments
self,
datasource: Dict[str, Any],
queries: List[Dict[str, Any]],
force: bool = False,
custom_cache_timeout: Optional[int] = None,
result_type: Optional[utils.ChartDataResultType] = None,
result_format: Optional[utils.ChartDataResultFormat] = None,
) -> None:
self.datasource = ConnectorRegistry.get_datasource(
str(datasource["type"]), int(datasource["id"]), db.session
)
self.queries = [QueryObject(**query_obj) for query_obj in queries]
self.force = force
self.custom_cache_timeout = custom_cache_timeout
self.result_type = result_type or utils.ChartDataResultType.FULL
self.result_format = result_format or utils.ChartDataResultFormat.JSON
def get_query_result(self, query_object: QueryObject) -> Dict[str, Any]:
"""Returns a pandas dataframe based on the query object"""
# Here, we assume that all the queries will use the same datasource, which is
# a valid assumption for current setting. In the long term, we may
# support multiple queries from different data sources.
timestamp_format = None
if self.datasource.type == "table":
dttm_col = self.datasource.get_column(query_object.granularity)
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
result = self.datasource.query(query_object.to_dict())
df = result.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic
if not df.empty:
if DTTM_ALIAS in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
# Column has already been formatted as a timestamp.
df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format
)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df[DTTM_ALIAS] += query_object.time_shift
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df, query_object)
df.replace([np.inf, -np.inf], np.nan)
df = query_object.exec_post_processing(df)
return {
"query": result.query,
"status": result.status,
"error_message": result.error_message,
"df": df,
}
@staticmethod
def df_metrics_to_num(df: pd.DataFrame, query_object: QueryObject) -> None:
"""Converting metrics to numeric when pandas.read_sql cannot"""
for col, dtype in df.dtypes.items():
if dtype.type == np.object_ and col in query_object.metrics:
df[col] = pd.to_numeric(df[col], errors="coerce")
def get_data(self, df: pd.DataFrame,) -> Union[str, List[Dict[str, Any]]]:
if self.result_format == utils.ChartDataResultFormat.CSV:
include_index = not isinstance(df.index, pd.RangeIndex)
result = df.to_csv(index=include_index, **config["CSV_EXPORT"])
return result or ""
return df.to_dict(orient="records")
def get_single_payload(self, query_obj: QueryObject) -> Dict[str, Any]:
"""Returns a payload of metadata and data"""
if self.result_type == utils.ChartDataResultType.QUERY:
return {
"query": self.datasource.get_query_str(query_obj.to_dict()),
"language": self.datasource.query_language,
}
if self.result_type == utils.ChartDataResultType.SAMPLES:
row_limit = query_obj.row_limit or math.inf
query_obj = copy.copy(query_obj)
query_obj.groupby = []
query_obj.metrics = []
query_obj.post_processing = []
query_obj.row_limit = min(row_limit, config["SAMPLES_ROW_LIMIT"])
query_obj.row_offset = 0
query_obj.columns = [o.column_name for o in self.datasource.columns]
payload = self.get_df_payload(query_obj)
df = payload["df"]
status = payload["status"]
if status != utils.QueryStatus.FAILED:
payload["data"] = self.get_data(df)
del payload["df"]
if self.result_type == utils.ChartDataResultType.RESULTS:
return {"data": payload["data"]}
return payload
def get_payload(self) -> List[Dict[str, Any]]:
"""Get all the payloads from the QueryObjects"""
return [self.get_single_payload(query_object) for query_object in self.queries]
@property
def cache_timeout(self) -> int:
if self.custom_cache_timeout is not None:
return self.custom_cache_timeout
if self.datasource.cache_timeout is not None:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, "database")
and self.datasource.database.cache_timeout
) is not None:
return self.datasource.database.cache_timeout
return config["CACHE_DEFAULT_TIMEOUT"]
def cache_key(self, query_obj: QueryObject, **kwargs: Any) -> Optional[str]:
extra_cache_keys = self.datasource.get_extra_cache_keys(query_obj.to_dict())
cache_key = (
query_obj.cache_key(
datasource=self.datasource.uid,
extra_cache_keys=extra_cache_keys,
rls=security_manager.get_rls_ids(self.datasource)
if config["ENABLE_ROW_LEVEL_SECURITY"]
and self.datasource.is_rls_supported
else [],
changed_on=self.datasource.changed_on,
**kwargs
)
if query_obj
else None
)
return cache_key
def get_df_payload( # pylint: disable=too-many-statements
self, query_obj: QueryObject, **kwargs: Any
) -> Dict[str, Any]:
"""Handles caching around the df payload retrieval"""
cache_key = self.cache_key(query_obj, **kwargs)
logger.info("Cache key: %s", cache_key)
is_loaded = False
stacktrace = None
df = pd.DataFrame()
cached_dttm = datetime.utcnow().isoformat().split(".")[0]
cache_value = None
status = None
query = ""
error_message = None
if cache_key and cache and not self.force:
cache_value = cache.get(cache_key)
if cache_value:
stats_logger.incr("loading_from_cache")
try:
df = cache_value["df"]
query = cache_value["query"]
status = utils.QueryStatus.SUCCESS
is_loaded = True
stats_logger.incr("loaded_from_cache")
except KeyError as ex:
logger.exception(ex)
logger.error(
"Error reading cache: %s", utils.error_msg_from_exception(ex)
)
logger.info("Serving from cache")
if query_obj and not is_loaded:
try:
invalid_columns = [
col
for col in query_obj.columns
+ query_obj.groupby
+ utils.get_column_names_from_metrics(query_obj.metrics)
if col not in self.datasource.column_names
]
if invalid_columns:
raise QueryObjectValidationError(
_(
"Columns missing in datasource: %(invalid_columns)s",
invalid_columns=invalid_columns,
)
)
query_result = self.get_query_result(query_obj)
status = query_result["status"]
query = query_result["query"]
error_message = query_result["error_message"]
df = query_result["df"]
if status != utils.QueryStatus.FAILED:
stats_logger.incr("loaded_from_source")
if not self.force:
stats_logger.incr("loaded_from_source_without_force")
is_loaded = True
except QueryObjectValidationError as ex:
error_message = str(ex)
status = utils.QueryStatus.FAILED
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
if not error_message:
error_message = str(ex)
status = utils.QueryStatus.FAILED
stacktrace = utils.get_stacktrace()
if is_loaded and cache_key and cache and status != utils.QueryStatus.FAILED:
set_and_log_cache(
cache_key,
df,
query,
cached_dttm,
self.cache_timeout,
self.datasource.uid,
)
return {
"cache_key": cache_key,
"cached_dttm": cache_value["dttm"] if cache_value is not None else None,
"cache_timeout": self.cache_timeout,
"df": df,
"error": error_message,
"is_cached": cache_key is not None,
"query": query,
"status": status,
"stacktrace": stacktrace,
"rowcount": len(df.index),
}
def raise_for_access(self) -> None:
"""
Raise an exception if the user cannot access the resource.
:raises SupersetSecurityException: If the user cannot access the resource
"""
security_manager.raise_for_access(query_context=self)
|
airbnb/superset
|
superset/common/query_context.py
|
Python
|
apache-2.0
| 12,432
|
import io,requests,json,time,datetime,os.path,socket,random,re
IN_DIR="../data/tweet"
OUT_DIR="../data/imdb/parts"
SPLITS=25
def split_files():
movie_fName=IN_DIR+"/movies.dat"
count=1;
try:
with open(movie_fName,'r') as in_f:
out_fName=OUT_DIR+"/movies_"+str(count)+".dat"
out_f=open(out_fName,'w')
print("INFO: Writing to "+out_fName)
for l in in_f:
out_f.write(l)
count=count+1
if count%SPLITS==0:
out_f.close()
out_fName=OUT_DIR+"/movies_"+str(count)+".dat"
out_f=open(out_fName,'w')
print("INFO: Writing to "+out_fName)
out_f.close()
except (OSError, IOError) as e:
print("ERROR: File "+movie_fName+" not found!")
print("ERROR: Please run get_movie_tweeting_data.sh to get the Tweet Data")
if __name__ == "__main__":
split_files()
|
saikatgomes/recsys
|
code/split_movies_list.py
|
Python
|
apache-2.0
| 988
|
#!/usr/bin/env python
# Author:
# Rudiger Birkner (Networked Systems Group ETH Zurich)
from collections import namedtuple
import json
from netaddr import IPNetwork
''' Config Parser '''
class Config(object):
MULTISWITCH = 0
MULTITABLE = 1
MULTIHOP = 2
SUPERSETS = 0
MDS = 1
def __init__(self, config_file):
self.mode = None
self.vmac_mode = None
self.vmac_options = None
self.vnhs = None
self.dpids = None
self.dpid_2_name = {}
self.refmon = None
self.flanc_auth = None
self.route_server = None
self.arp_proxy = None
self.peers = {}
# loading config file
config = json.load(open(config_file, 'r'))
# parse config
self.parse_config(config)
def parse_config(self, config):
if "Mode" in config:
if config["Mode"] == "Multi-Switch":
self.mode = self.MULTISWITCH
if config["Mode"] == "Multi-Table":
self.mode = self.MULTITABLE
if config["Mode"] == "Multi-Hop":
self.mode = self.MULTIHOP
if "VMAC" in config:
if "Mode" in config["VMAC"]:
if config["VMAC"]["Mode"] == "Superset":
self.vmac_mode = self.SUPERSETS
if config["VMAC"]["Mode"] == "MDS":
self.vmac_mode = self.MDS
if "Options" in config["VMAC"]:
self.vmac_options = config["VMAC"]["Options"]
if "RefMon Settings" in config:
if "fabric options" in config["RefMon Settings"]:
if "dpids" in config["RefMon Settings"]["fabric options"]:
self.dpids = config["RefMon Settings"]["fabric options"]["dpids"]
for k,v in self.dpids.iteritems():
self.dpid_2_name[v] = k
if "RefMon Server" in config:
self.refmon = config["RefMon Server"]
if "Flanc Auth Info" in config:
self.flanc_auth = config["Flanc Auth Info"]
if "VNHs" in config:
self.vnhs = IPNetwork(config["VNHs"])
if "Route Server" in config:
switch = None
if "switch" in config["Route Server"]:
switch = config["Route Server"]["switch"]
self.route_server = Peer("RS", [Port(config["Route Server"]["Port"], switch, config["Route Server"]["MAC"], config["Route Server"]["IP"])])
if "ARP Proxy" in config:
switch = None
if "switch" in config["ARP Proxy"]:
switch = config["ARP Proxy"]["switch"]
self.arp_proxy = Peer("ARP", [Port(config["ARP Proxy"]["Port"], switch, config["ARP Proxy"]["MAC"], config["ARP Proxy"]["IP"])])
if "Participants" in config:
for participant_name, participant in config["Participants"].iteritems():
participant_name = int(participant_name)
if ("Inbound Rules" in participant):
inbound_rules = participant["Inbound Rules"]
else:
inbound_rules = None
if ("Outbound Rules" in participant):
outbound_rules = participant["Outbound Rules"]
else:
outbound_rules = None
if ("Ports" in participant):
ports = [Port(port['Id'], port['switch'], port['MAC'], port['IP'])
for port in participant["Ports"]]
self.peers[participant_name] = Participant(participant_name, ports, inbound_rules, outbound_rules)
def isMultiSwitchMode(self):
return self.mode == self.MULTISWITCH
def isMultiTableMode(self):
return self.mode == self.MULTITABLE
def isMultiHopMode(self):
return self.mode == self.MULTIHOP
def isSupersetsMode(self):
return self.vmac_mode == self.SUPERSETS
def isMDSMode(self):
return self.vmac_mode == self.MDS
Peer = namedtuple('Peer', 'name ports')
Port = namedtuple('Port', 'id switch mac ip')
Participant = namedtuple('Participant', 'name ports inbound_rules outbound_rules')
|
h2020-endeavour/iSDX
|
xctrl/lib.py
|
Python
|
apache-2.0
| 4,203
|
""" Cloud API asynchronous "PDF To Text" job example.
Allows to avoid timeout errors when processing huge or scanned PDF documents.
"""
import os
import requests # pip install requests
import time
import datetime
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co
API_KEY = "******************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Source PDF file
SourceFile = ".\\sample.pdf"
# Comma-separated list of page indices (or ranges) to process. Leave empty for all pages. Example: '0,2-5,7-'.
Pages = ""
# PDF document password. Leave empty for unprotected documents.
Password = ""
# Destination CSV file name
DestinationFile = ".\\result.csv"
# (!) Make asynchronous job
Async = True
def main(args = None):
uploadedFileUrl = uploadFile(SourceFile)
if (uploadedFileUrl != None):
convertPdfToCSV(uploadedFileUrl, DestinationFile)
def convertPdfToCSV(uploadedFileUrl, destinationFile):
"""Converts PDF To CSV using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["async"] = Async
parameters["name"] = os.path.basename(destinationFile)
parameters["password"] = Password
parameters["pages"] = Pages
parameters["url"] = uploadedFileUrl
# Prepare URL for 'PDF To CSV' API request
url = "{}/pdf/convert/to/csv".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Asynchronous job ID
jobId = json["jobId"]
# URL of the result file
resultFileUrl = json["url"]
# Check the job status in a loop.
# If you don't want to pause the main thread you can rework the code
# to use a separate thread for the status checking and completion.
while True:
status = checkJobStatus(jobId) # Possible statuses: "working", "failed", "aborted", "success".
# Display timestamp and status (for demo purposes)
print(datetime.datetime.now().strftime("%H:%M.%S") + ": " + status)
if status == "success":
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
break
elif status == "working":
# Pause for a few seconds
time.sleep(3)
else:
print(status)
break
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def checkJobStatus(jobId):
"""Checks server job status"""
url = f"{BASE_URL}/job/check?jobid={jobId}"
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
return json["status"]
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
def uploadFile(fileName):
"""Uploads file to the cloud"""
# 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.
# Prepare URL for 'Get Presigned URL' API request
url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format(
BASE_URL, os.path.basename(fileName))
# Execute request and get response as JSON
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# URL to use for file upload
uploadUrl = json["presignedUrl"]
# URL for future reference
uploadedFileUrl = json["url"]
# 2. UPLOAD FILE TO CLOUD.
with open(fileName, 'rb') as file:
requests.put(uploadUrl, data=file, headers={ "x-api-key": API_KEY, "content-type": "application/octet-stream" })
return uploadedFileUrl
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main()
|
bytescout/ByteScout-SDK-SourceCode
|
PDF.co Web API/PDF To CSV API/Python/Convert PDF To CSV From Uploaded File Asynchronously/ConvertPdfToCSVFromUploadedFileAsynchronously.py
|
Python
|
apache-2.0
| 4,935
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' pidhandler.py '''
import traceback
import tornado.gen
import tornado.web
from heron.common.src.python.utils.log import Log
from heron.tools.tracker.src.python import utils
from heron.tools.tracker.src.python.handlers import BaseHandler
@tornado.gen.coroutine
def getInstancePid(topology_info, instance_id):
"""
This method is used by other modules, and so it
is not a part of the class.
Fetches Instance pid from heron-shell.
"""
try:
http_client = tornado.httpclient.AsyncHTTPClient()
endpoint = utils.make_shell_endpoint(topology_info, instance_id)
url = "%s/pid/%s" % (endpoint, instance_id)
Log.debug("HTTP call for url: %s", url)
response = yield http_client.fetch(url)
raise tornado.gen.Return(response.body)
except tornado.httpclient.HTTPError as e:
raise Exception(str(e))
class PidHandler(BaseHandler):
"""
URL - /topologies/jmap?cluster=<cluster>&topology=<topology> \
&environ=<environment>&instance=<instance>
Parameters:
- cluster - Name of the cluster.
- role - (optional) Role used to submit the topology.
- environ - Running environment.
- topology - Name of topology (Note: Case sensitive. Can only
include [a-zA-Z0-9-_]+)
- instance - Instance Id
If successfule returns the pid of instance. May include training
spaces and/or linefeed before/after.
The response JSON is a dict with following format:
{
'command': Full command executed at server.
'stdout': Text on stdout of executing the command.
'stderr': <optional> Text on stderr.
}
"""
# pylint: disable=attribute-defined-outside-init
def initialize(self, tracker):
""" initialize """
self.tracker = tracker
@tornado.gen.coroutine
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
instance = self.get_argument_instance()
topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)
result = yield getInstancePid(topology_info, instance)
self.write_success_response(result)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
|
lucperkins/heron
|
heron/tools/tracker/src/python/handlers/pidhandler.py
|
Python
|
apache-2.0
| 2,951
|
#!/usr/bin/env python
# coding: utf-8
#copyRight by heibanke
import time
def time_cost(f):
def _f(*arg, **kwarg):
start = time.clock()
a=f(*arg,**kwarg)
end = time.clock()
print f.__name__,"run cost time is ",end-start
return a
return _f
def fib(n):
if n<=2:
return 1
else:
return fib(n-1)+fib(n-2)
#@time_cost
def fib_opt(n):
a,b,i=0,1,0
while i<n:
a,b=b,a+b
i+=1
else:
return b
def fib_iter():
a,b = 0,1
while True:
yield b
a,b = b,a+b
N=10000
#print time_cost(fib)(N)
#print fib_opt(N)
import time
start = time.clock()
opt_result= [fib_opt(i) for i in xrange(N)]
end = time.clock()
print end-start
print "------------------------------"
A=fib_iter()
start = time.clock()
iter_result= [A.next() for i in xrange(N)]
end = time.clock()
print end-start
|
heibanke/python_do_something
|
Code/Chapter4/func_fib_opt.py
|
Python
|
apache-2.0
| 834
|
# Copyright 2015 Canonical Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nova.compute import power_state
LXD_POWER_STATES = {
100: power_state.RUNNING,
101: power_state.RUNNING,
102: power_state.SHUTDOWN,
103: power_state.RUNNING,
104: power_state.SHUTDOWN,
105: power_state.NOSTATE,
106: power_state.NOSTATE,
107: power_state.SHUTDOWN,
108: power_state.CRASHED,
109: power_state.SUSPENDED,
110: power_state.SUSPENDED,
111: power_state.SUSPENDED,
200: power_state.RUNNING,
400: power_state.CRASHED,
401: power_state.NOSTATE
}
|
tpouyer/nova-lxd
|
nova_lxd/nova/virt/lxd/constants.py
|
Python
|
apache-2.0
| 1,148
|
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username="admin", password="secret"):
self.app.navigation.open_start_page()
# Input userName
self.app.page.fill_field("user", username)
# Input Password
self.app.page.fill_field("pass", password)
# Click submit
self.app.wd.find_element_by_css_selector("input[type='submit']").click()
self.app.navigation.go_to_home_page()
def logout(self):
self.app.wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
return len(self.app.wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
return self.app.wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1] == username
def ensure_logout(self):
if self.is_logged_in():
self.logout()
def ensure_login(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
|
Alinyan/python_training
|
fixture/session.py
|
Python
|
apache-2.0
| 1,170
|
#!/usr/bin/python
#---------------------------------------------------------------------------
# Copyright 2011 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
#
# Gets the current rate and absolute limits for your account.
#
# This script expects to find the token.json file in the 'Session' directory.
# That file should have been created during the authentication stage.
#
# The response is put in the file:
#
# Session/latestListOfImages.json.
#
# For details, please see:
# http://docs.rackspace.com/servers/api/v2/cs-devguide/content/ProgrammaticLimits.html
#
#---------------------------------------------------------------------------
import requests
import json
tokenFile=open('Session/token.json','r')
token=json.load(tokenFile)
tokenFile.close()
host='https://dfw.servers.api.rackspacecloud.com/v2/'
account=token['tenant']['id']
uri='/limits'
url=host+account+uri
headers={'X-Auth-Token':token['id']}
response = requests.get(url,headers=headers)
tokenFile=open('Session/latestLimits.json','w')
tokenFile.write(json.dumps(response.json))
tokenFile.close()
|
luisibanez/cloud-whisperer
|
Rackspace/getLimits.py
|
Python
|
apache-2.0
| 1,716
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: text_file
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
Read text file in map folder.
"""
###############################################################################
# Copyright 2017 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Standard library modules.
# Third party modules.
# Local modules.
# Project modules.
# Globals and constants variables.
class Analysis():
def __init__(self, dose=0.0, energy_width_eV=0, energy_loss_eV=0.0):
self.dose = dose
self.energy_width_eV = energy_width_eV
self.energy_loss_eV = energy_loss_eV
class TextParameters():
def __init__(self):
self.analysis1 = Analysis(0.0, 0, 0.0)
self.analysis2 = Analysis(0.0, 0, 0.0)
self.analysis3 = Analysis(0.0, 0, 0.0)
def read(self, file):
lines = file.readlines()
map_mode = True
for line in lines:
if line.startswith("[Analysis]"):
map_mode = False
try:
keyword, value = line.split('=')
keyword = keyword.strip()
value = value.strip()
if map_mode:
if keyword.startswith("SU9000 Magnification"):
self.magnification = int(value)
elif keyword.startswith("EELS model"):
self.model = value
elif keyword.startswith("S.H."):
self.sample_height = value
elif keyword.startswith("File Name"):
self.file_name = value
elif keyword.startswith("Date"):
self.date = value
elif keyword.startswith("Time"):
self.time = value
elif keyword.startswith("Capture resolution"):
self.capture_resolution = value
elif keyword.startswith("Accelerating Voltage"):
value, unit = value.split()
self.accelerating_voltage_V = int(value)
elif keyword.startswith("Energy Width"):
self.energy_width = value
elif keyword.startswith("Energy Loss"):
self.energy_loss = value
elif keyword.startswith("Scan Speed"):
self.speed = value
elif keyword.startswith("DPI"):
self.dpi = int(value)
elif keyword.startswith("Pixel Distance"):
self.pixel_distance = value
else:
if keyword.startswith("Dose"):
unit = value[-2:]
items = value[:-2].split('/')
if not items[0].startswith('-'):
self.analysis1.dose = float(items[0])
if not items[1].startswith('-'):
self.analysis2.dose = float(items[1])
if not items[2].startswith('-'):
self.analysis3.dose = float(items[2])
elif keyword.startswith("Energy Width"):
unit = value[-2:]
items = value[:-2].split('/')
if not items[0].startswith('-'):
self.analysis1.energy_width_eV = int(items[0])
if not items[1].startswith('-'):
self.analysis2.energy_width_eV = int(items[1])
if not items[2].startswith('-'):
self.analysis3.energy_width_eV = int(items[2])
elif keyword.startswith("Energy Loss"):
unit = value[-2:]
items = value[:-2].split('/')
if not items[0].startswith('-'):
self.analysis1.energy_loss_eV = float(items[0])
if not items[1].startswith('-'):
self.analysis2.energy_loss_eV = float(items[1])
if not items[2].startswith('-'):
self.analysis3.energy_loss_eV = float(items[2])
elif keyword.startswith("A(x,y)"):
self.A = value
elif keyword.startswith("B(x,y)"):
self.B = value
elif keyword.startswith("Distance"):
self.distance = value
elif keyword.startswith("Pitch"):
self.pitch = value
elif keyword.startswith("Number"):
self.number = int(value)
elif keyword.startswith("Adjust"):
self.adjust = value
elif keyword.startswith("Data Size"):
self.data_size = value
elif keyword.startswith("Integration"):
self.integration = int(value)
except ValueError:
pass
|
drix00/pysemeels
|
pysemeels/hitachi/eels_su/map/text_file.py
|
Python
|
apache-2.0
| 5,800
|
from flask import Flask
from flask import render_template
from flask import request
from flask import jsonify
from docker import client
from os import environ
import redis
app = Flask(__name__)
app.debug = True
#set defaults
IMAGE_NAME = "keeb/mongodb"
COMMAND = ["/usr/bin/mongod", "--config", "/etc/mongodb.conf"]
DOMAIN = "mongodb.stinemat.es"
HIPACHE_PORT="80"
EXPOSED_PORT="27017"
#environment variables, must be set in order for application to function
try:
REDIS_PORT=environ["REDIS_PORT"]
REDIS_HOST=environ["REDIS_HOST"]
HIPACHE_PORT=environ["HIPACHE_PORT"]
DOCKER_HOST=environ["DOCKER_HOST"]
except Exception, e:
print e
print "environment not properly configured"
print environ
import sys; sys.exit(1)
r = redis.StrictRedis(host=REDIS_HOST, port=int(REDIS_PORT))
c = client.Client(base_url='http://%s:4243' % DOCKER_HOST)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/new', methods=["POST"])
def new():
container = c.create_container(IMAGE_NAME, COMMAND, ports=[EXPOSED_PORT])
container_id = container["Id"]
c.start(container_id)
container_port = c.port(container_id, EXPOSED_PORT)
r.rpush("frontend:%s.%s" % (container_id, DOMAIN), container_id)
r.rpush("frontend:%s.%s" % (container_id, DOMAIN), "http://%s:%s" %(DOMAIN, container_port))
if HIPACHE_PORT == "80":
url = "%s:%s" % (DOMAIN, container_port)
else:
url="%s:%s" % (DOMAIN, container_port)
return jsonify(
url=url,
port=container_port,
hipache_port=HIPACHE_PORT,
id=container_id)
@app.route('/details/<url>')
def details(url):
return render_template("details.html",url=url)
@app.route('/robot.txt')
def robot():
return render_template("robot.html")
if __name__ == '__main__':
import sys, os
app.run(host="0.0.0.0")
|
keeb/any-saas
|
api.py
|
Python
|
apache-2.0
| 1,891
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tokenize
from hacking import core
START_DOCSTRING_TRIPLE = ['u"""', 'r"""', '"""', "u'''", "r'''", "'''"]
END_DOCSTRING_TRIPLE = ['"""', "'''"]
@core.flake8ext
def hacking_docstring_start_space(physical_line, previous_logical, tokens):
r"""Check for docstring not starting with space.
OpenStack HACKING guide recommendation for docstring:
Docstring should not start with space
Okay: def foo():\n '''This is good.'''
Okay: def foo():\n r'''This is good.'''
Okay: def foo():\n a = ''' This is not a docstring.'''
Okay: def foo():\n pass\n ''' This is not.'''
H401: def foo():\n ''' This is not.'''
H401: def foo():\n r''' This is not.'''
"""
docstring = is_docstring(tokens, previous_logical)
if docstring:
start, start_triple = _find_first_of(docstring, START_DOCSTRING_TRIPLE)
if docstring[len(start_triple)] == ' ':
# docstrings get tokenized on the last line of the docstring, so
# we don't know the exact position.
return (0, "H401: docstring should not start with"
" a space")
@core.flake8ext
def hacking_docstring_multiline_end(physical_line, previous_logical, tokens):
r"""Check multi line docstring end.
OpenStack HACKING guide recommendation for docstring:
Docstring should end on a new line
Okay: '''foobar\nfoo\nbar\n'''
Okay: def foo():\n '''foobar\n\nfoo\nbar\n'''
Okay: class Foo(object):\n '''foobar\n\nfoo\nbar\n'''
Okay: def foo():\n a = '''not\na\ndocstring'''
Okay: def foo():\n a = '''not\na\ndocstring''' # blah
Okay: def foo():\n pass\n'''foobar\nfoo\nbar\n d'''
H403: def foo():\n '''foobar\nfoo\nbar\ndocstring'''
H403: def foo():\n '''foobar\nfoo\nbar\npretend raw: r'''
H403: class Foo(object):\n '''foobar\nfoo\nbar\ndocstring'''\n\n
"""
docstring = is_docstring(tokens, previous_logical)
if docstring:
if '\n' not in docstring:
# not a multi line
return
else:
last_line = docstring.split('\n')[-1]
pos = max(last_line.rfind(i) for i in END_DOCSTRING_TRIPLE)
if len(last_line[:pos].strip()) > 0:
# Something before the end docstring triple
return (pos,
"H403: multi line docstrings should end on a new line")
@core.flake8ext
def hacking_docstring_multiline_start(physical_line, previous_logical, tokens):
r"""Check multi line docstring starts immediately with summary.
OpenStack HACKING guide recommendation for docstring:
Docstring should start with a one-line summary, less than 80 characters.
Okay: '''foobar\n\nfoo\nbar\n'''
Okay: def foo():\n a = '''\nnot\na docstring\n'''
H404: def foo():\n '''\nfoo\nbar\n'''\n\n
H404: def foo():\n r'''\nfoo\nbar\n'''\n\n
"""
docstring = is_docstring(tokens, previous_logical)
if docstring:
if '\n' not in docstring:
# single line docstring
return
start, start_triple = _find_first_of(docstring, START_DOCSTRING_TRIPLE)
lines = docstring.split('\n')
if lines[0].strip() == start_triple:
# docstrings get tokenized on the last line of the docstring, so
# we don't know the exact position.
return (0, "H404: multi line docstring "
"should start without a leading new line")
@core.flake8ext
def hacking_docstring_summary(physical_line, previous_logical, tokens):
r"""Check multi line docstring summary is separated with empty line.
OpenStack HACKING guide recommendation for docstring:
Docstring should start with a one-line summary, less than 80 characters.
Okay: def foo():\n a = '''\nnot\na docstring\n'''
Okay: '''foobar\n\nfoo\nbar\n'''
H405: def foo():\n '''foobar\nfoo\nbar\n'''
H405: def foo():\n r'''foobar\nfoo\nbar\n'''
H405: def foo():\n '''foobar\n'''
"""
docstring = is_docstring(tokens, previous_logical)
if docstring:
if '\n' not in docstring:
# not a multi line docstring
return
lines = docstring.split('\n')
if len(lines) > 1 and len(lines[1].strip()) is not 0:
# docstrings get tokenized on the last line of the docstring, so
# we don't know the exact position.
return (0, "H405: multi line docstring "
"summary not separated with an empty line")
def is_docstring(tokens, previous_logical):
"""Return found docstring
'A docstring is a string literal that occurs as the first statement in a
module, function, class,'
http://www.python.org/dev/peps/pep-0257/#what-is-a-docstring
"""
for token_type, text, start, _, _ in tokens:
if token_type == tokenize.STRING:
break
elif token_type != tokenize.INDENT:
return False
else:
return False
line = text.lstrip()
start, start_triple = _find_first_of(line, START_DOCSTRING_TRIPLE)
if (previous_logical.startswith("def ") or
previous_logical.startswith("class ")):
if start == 0:
return text
def _find_first_of(line, substrings):
"""Find earliest occurrence of one of substrings in line.
Returns pair of index and found substring, or (-1, None)
if no occurrences of any of substrings were found in line.
"""
starts = ((line.find(i), i) for i in substrings)
found = [(i, sub) for i, sub in starts if i != -1]
if found:
return min(found)
else:
return -1, None
|
zancas/hacking
|
hacking/checks/docstrings.py
|
Python
|
apache-2.0
| 6,197
|
import pickle
import random
from nose.tools import assert_raises, eq_
from dbdb.binary_tree import BinaryNode, BinaryTree, BinaryNodeRef, ValueRef
class StubStorage(object):
def __init__(self):
self.d = [0]
self.locked = False
def lock(self):
if not self.locked:
self.locked = True
return True
else:
return False
def unlock(self):
pass
def get_root_address(self):
return 0
def write(self, string):
address = len(self.d)
self.d.append(string)
return address
def read(self, address):
return self.d[address]
class TestBinaryTree(object):
def setup(self):
self.tree = BinaryTree(StubStorage())
def test_get_missing_key_raises_key_error(self):
with assert_raises(KeyError):
self.tree.get('Not A Key In The Tree')
def test_set_and_get_key(self):
self.tree.set('a', 'b')
eq_(self.tree.get('a'), 'b')
def test_random_set_and_get_keys(self):
ten_k = list(range(10000))
pairs = list(zip(random.sample(ten_k, 10), random.sample(ten_k, 10)))
for i, (k, v) in enumerate(pairs, start=1):
self.tree.set(k, v)
eq_(len(self.tree), i)
for k, v in pairs:
eq_(self.tree.get(k), v)
random.shuffle(pairs)
for i, (k, v) in enumerate(pairs, start=1):
self.tree.pop(k)
eq_(len(self.tree), len(pairs) - i)
def test_overwrite_and_get_key(self):
self.tree.set('a', 'b')
self.tree.set('a', 'c')
eq_(self.tree.get('a'), 'c')
def test_pop_non_existent_key(self):
with assert_raises(KeyError):
self.tree.pop('Not A Key In The Tree')
def test_del_leaf_key(self):
self.tree.set('b', '2')
self.tree.pop('b')
with assert_raises(KeyError):
self.tree.get('b')
def test_del_left_node_key(self):
self.tree.set('b', '2')
self.tree.set('a', '1')
self.tree.pop('b')
with assert_raises(KeyError):
self.tree.get('b')
self.tree.get('a')
def test_del_right_node_key(self):
self.tree.set('b', '2')
self.tree.set('c', '3')
self.tree.pop('b')
with assert_raises(KeyError):
self.tree.get('b')
self.tree.get('c')
def test_del_full_node_key(self):
self.tree.set('b', '2')
self.tree.set('a', '1')
self.tree.set('c', '3')
self.tree.pop('b')
with assert_raises(KeyError):
self.tree.get('b')
self.tree.get('a')
self.tree.get('c')
class TestBinaryNodeRef(object):
def test_to_string_leaf(self):
n = BinaryNode(BinaryNodeRef(), 'k', ValueRef(address=999), BinaryNodeRef(), 1)
pickled = BinaryNodeRef.referent_to_string(n)
d = pickle.loads(pickled)
eq_(d['left'], 0)
eq_(d['key'], 'k')
eq_(d['value'], 999)
eq_(d['right'], 0)
def test_to_string_nonleaf(self):
left_ref = BinaryNodeRef(address=123)
right_ref = BinaryNodeRef(address=321)
n = BinaryNode(left_ref, 'k', ValueRef(address=999), right_ref, 3)
pickled = BinaryNodeRef.referent_to_string(n)
d = pickle.loads(pickled)
eq_(d['left'], 123)
eq_(d['key'], 'k')
eq_(d['value'], 999)
eq_(d['right'], 321)
|
yntantan/beetles
|
dbdb/tests/test_binary_tree.py
|
Python
|
apache-2.0
| 3,440
|
# 341. Flatten Nested List Iterator
#
# Given a nested list of integers, implement an iterator to flatten it.
#
# Each element is either an integer, or a list -- whose elements may also be integers or other lists.
#
# Example 1:
# Given the list [[1,1],2,[1,1]],
#
# By calling next repeatedly until hasNext returns false, the order of elements returned by next should be: [1,1,2,1,1].
#
# Example 2:
# Given the list [1,[4,[6]]],
#
# By calling next repeatedly until hasNext returns false, the order of elements returned by next should be: [1,4,6].
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class NestedIterator(object):
def __init__(self, nestedList):
"""
Initialize your data structure here.
:type nestedList: List[NestedInteger]
"""
self.stack = nestedList[::-1]
def next(self):
"""
:rtype: int
"""
return self.stack.pop().getInteger()
def hasNext(self):
"""
:rtype: bool
"""
while self.stack:
top = self.stack[-1]
if top.isInteger():
return True
# flat the nested list and add it back to the top of stack
self.stack = self.stack[:-1] + top.getList()[::-1]
return False
# Your NestedIterator object will be instantiated and called as such:
# i, v = NestedIterator(nestedList), []
# while i.hasNext(): v.append(i.next())
|
gengwg/leetcode
|
341_flatten_nested_list_iterator.py
|
Python
|
apache-2.0
| 2,196
|
import os
import sys
import ctypes
import ctypes.util
from ctypes import c_char_p, c_int, c_size_t, c_void_p
glfs_dll = ctypes.util.find_library("libglusterfs")
if glfs_dll:
glfs = ctypes.CDLL(glfs_dll)
xdr_dll = ctypes.util.find_library("libgfxdr")
if xdr_dll:
xdr = ctypes.CDLL(xdr_dll)
api_gfapi_dll = ctypes.util.find_library("libgfapi")
if api_gfapi_dll:
api = ctypes.CDLL(api_gfapi_dll)
class GFApi:
def __init__(self, **argv):
self.fs = api.glfs_new(argv['volume'])
self.directory = argv['directory']
self.filename = argv['filename']
self.dirfd = c_void_p
api.glfs_set_logging(self.fs, "/dev/stderr", 7)
api.glfs_set_volfile_server(self.fs, "tcp", "localhost", 24007)
api.glfs_init(self.fs)
def gf_opendir(self):
self.dirfd = api.glfs_opendir(self.fs, self.directory)
def gf_readdir(self):
api.glfs_readdir_r(self.dirfd, self.entry, self.results)
def gf_open(self):
self.fd = api.glfs_open(self.fs, self.filename, os.O_RDWR)
## TODO
|
harshavardhana/bungee-plugins
|
pygfapi/__init__.py
|
Python
|
apache-2.0
| 997
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2013 The Font Bakery Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
from __future__ import print_function
import os
import sys
import yaml
from fontaine.font import FontFactory
from fontaine.cmap import Library
from bakery_cli.utils import UpstreamDirectory
class Widgets(object):
def __init__(self, app):
self.app = app
self.commit = urwid.Edit(edit_text=app.commit)
self.ttfautohint = urwid.Edit(edit_text=app.ttfautohint)
self.newfamily = urwid.Edit(edit_text=app.newfamily)
self.pyftsubset = urwid.Edit(edit_text=app.pyftsubset)
self.notes = urwid.Edit(edit_text=app.notes)
self.afdko_parameters = urwid.Edit(edit_text=app.afdko)
self.compiler = []
self.licenses = []
self.process_files = []
self.subset = []
def on_checkbox_state_change(self, widget, state, user_data):
self.app.config[user_data['name']] = state
def on_subset_state_change(self, widget, state, user_data):
if 'subset' not in self.app.config:
self.app.config['subset'] = []
try:
index = self.app.config['subset'].index(user_data['name'])
if user_data['name'] == 'devanagari':
self.ttfautohint.edit_text = self.ttfautohint.edit_text.replace('-f deva', '')
self.ttfautohint.edit_text = self.ttfautohint.edit_text.replace('-D deva', '')
self.ttfautohint.edit_text = self.ttfautohint.edit_text.strip()
self.ttfautohint.edit_text = self.ttfautohint.edit_text.replace(' ', ' ')
del self.app.config['subset'][index]
except ValueError:
if user_data['name'] == 'devanagari':
self.ttfautohint.edit_text += ' -f deva -D deva'
self.app.config['subset'].append(user_data['name'])
def create_checkbox(self, title, name, state=False):
return urwid.CheckBox(title, user_data={'name': name}, state=state,
on_state_change=self.on_checkbox_state_change)
def create_process_file(self, filepath):
try:
state = [x.lstrip('./') for x in self.app.process_files].index(filepath) >= 0
except ValueError:
state = False
widget = urwid.CheckBox(filepath, state=state)
self.process_files.append(widget)
return widget
def create_subset_widget(self, subsetname, coverage):
widget = urwid.CheckBox('{0} ({1})'.format(subsetname, coverage),
state=bool(subsetname in app.subset),
user_data={'name': subsetname},
on_state_change=self.on_subset_state_change)
self.subset.append(widget)
return widget
class App(object):
commit = 'HEAD'
process_files = []
subset = []
compiler = 'fontforge'
ttfautohint = '-l 7 -r 28 -G 50 -x 13 -w "G"'
afdko = ''
downstream = True
optimize = True
license = ''
pyftsubset = '--notdef-outline --name-IDs=* --hinting'
notes = ''
newfamily = ''
fontcrunch = False
config = {}
configfile = 'bakery.yaml'
def __init__(self, directory):
os.chdir(directory)
if os.path.exists('bakery.yaml'):
self.configfile = 'bakery.yaml'
self.config = yaml.load(open('bakery.yaml'))
elif os.path.exists('bakery.yml'):
self.config = yaml.load(open('bakery.yml'))
self.configfile = 'bakery.yml'
self.commit = self.config.get('commit', 'HEAD')
self.process_files = self.config.get('process_files', [])
self.subset = self.config.get('subset', [])
self.compiler = self.config.get('compiler', 'fontforge')
self.ttfautohint = self.config.get('ttfautohint', '-l 7 -r 28 -G 50 -x 13 -w "G"')
self.afdko = self.config.get('afdko', '')
self.license = self.config.get('license', '')
self.pyftsubset = self.config.get('pyftsubset',
'--notdef-outline --name-IDs=* --hinting')
self.notes = self.config.get('notes', '')
self.newfamily = self.config.get('newfamily', '')
self.widgets = Widgets(self)
def save(self, *args, **kwargs):
if os.path.exists(self.configfile):
print('{} exists...'.format(self.configfile))
self.configfile = '{}.new'.format(self.configfile)
while os.path.exists(self.configfile):
self.configfile = '{}.new'.format(self.configfile)
self.config['commit'] = self.widgets.commit.get_edit_text()
if not self.config['commit']:
del self.config['commit']
self.config['ttfautohint'] = self.widgets.ttfautohint.get_edit_text()
self.config['newfamily'] = self.widgets.newfamily.get_edit_text()
if not self.config['newfamily']:
del self.config['newfamily']
self.config['pyftsubset'] = self.widgets.pyftsubset.get_edit_text()
self.config['process_files'] = [w.get_label()
for w in self.widgets.process_files
if w.get_state()]
self.config['compiler'] = ', '.join([w.get_label()
for w in self.widgets.compiler
if w.get_state()])
self.config['license'] = ', '.join([w.get_label().replace(' (exists)', '')
for w in self.widgets.licenses
if w.get_state()])
self.config['notes'] = self.widgets.notes.get_edit_text()
if not self.config['notes']:
del self.config['notes']
self.config['afdko'] = self.widgets.afdko_parameters.get_edit_text()
if not self.config['afdko']:
del self.config['afdko']
yaml.safe_dump(self.config, open(self.configfile, 'w'))
print('Wrote {}'.format(self.configfile))
raise urwid.ExitMainLoop()
def get_subsets_coverage_data(source_fonts_paths):
""" Return dict mapping key to the corresponding subsets coverage
{'subsetname':
{'fontname-light': 13, 'fontname-bold': 45},
'subsetname':
{'fontname-light': 9, 'fontname-bold': 100}
}
"""
library = Library(collections=['subsets'])
subsets = {}
for fontpath in source_fonts_paths:
if fontpath.lower().endswith('.sfd'):
continue
try:
font = FontFactory.openfont(fontpath)
except AssertionError:
continue
for info in font.get_orthographies(_library=library):
subsetname = info.charset.common_name.replace('Subset ', '')
if subsetname not in subsets:
subsets[subsetname] = {}
subsets[subsetname][fontpath] = info.coverage
return subsets
def generate_subsets_coverage_list():
directory = UpstreamDirectory('.')
source_fonts_paths = []
# `get_sources_list` returns list of paths relative to root.
# To complete to absolute paths use python os.path.join method
# on root and path
for p in directory.ALL_FONTS:
if p.lower().endswith('.sfd'):
continue
source_fonts_paths.append(p)
return get_subsets_coverage_data(source_fonts_paths)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('directory')
args = parser.parse_args()
directory = UpstreamDirectory(args.directory)
process_files = [x for x in directory.ALL_FONTS if not x.lower().endswith('.sfd')]
import urwid.curses_display
import urwid.raw_display
import urwid.web_display
import urwid
def show_or_exit(key):
if key in ('q', 'Q', 'esc'):
raise urwid.ExitMainLoop()
header = urwid.Text("Fontbakery Setup. Q exits.")
app = App(args.directory)
widgets = []
if os.path.exists('.git/config'):
githead = urwid.Text(u"Build a specific git commit, or HEAD? ")
widgets.append(urwid.AttrMap(githead, 'key'))
widgets.append(urwid.LineBox(app.widgets.commit))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(urwid.Text('Which files to process?'), 'key'))
for f in process_files:
widgets.append(app.widgets.create_process_file(f))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(
urwid.Text('License filename?'), 'key'))
for f in ['OFL.txt', 'LICENSE.txt', 'LICENSE']:
if os.path.exists(f):
widgets.append(urwid.RadioButton(app.widgets.licenses, f + ' (exists)',
state=bool(f == app.license)))
else:
widgets.append(urwid.RadioButton(app.widgets.licenses, f,
state=bool(f == app.license)))
widgets.append(urwid.Divider())
widgets.append(
urwid.AttrMap(
urwid.Text('What subsets do you want to create?'), 'key'))
subsets = generate_subsets_coverage_list()
for s in sorted(subsets):
ll = ', '.join(set(['{}%'.format(subsets[s][k])
for k in subsets[s] if subsets[s][k]]))
if ll:
widgets.append(app.widgets.create_subset_widget(s, ll))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(
urwid.Text('ttfautohint command line parameters?'), 'key'))
widgets.append(urwid.LineBox(app.widgets.ttfautohint))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(
urwid.Text(('New font family name (ie, replacing repo'
' codename with RFN)?')), 'key'))
widgets.append(urwid.LineBox(app.widgets.newfamily))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(app.widgets.create_checkbox('Use FontCrunch?', 'fontcrunch', app.fontcrunch), 'key'))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(app.widgets.create_checkbox('Run tests?', 'downstream', app.downstream), 'key'))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(app.widgets.create_checkbox('Run optimize?', 'optimize', app.optimize), 'key'))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(
urwid.Text('pyftsubset defaults parameters?'), 'key'))
widgets.append(urwid.LineBox(app.widgets.pyftsubset))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(
urwid.Text('Which compiler to use?'), 'key'))
widgets.append(urwid.Divider())
quote = ('By default, bakery uses fontforge to build fonts from ufo.'
' But some projects use automake, or their own build system'
' and perhaps the AFDKO.')
widgets.append(urwid.Padding(urwid.Text(quote), left=4))
widgets.append(urwid.Divider())
choices = ['fontforge', 'afdko', 'make', 'build.py']
for choice in choices:
widgets.append(urwid.RadioButton(app.widgets.compiler, choice,
state=bool(choice == app.compiler)))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(
urwid.Text('afdko default command line parameters?'), 'key'))
widgets.append(urwid.LineBox(app.widgets.afdko_parameters))
widgets.append(urwid.Divider())
widgets.append(urwid.AttrMap(
urwid.Text('Notes to display on Summary page?'), 'key'))
widgets.append(urwid.LineBox(app.widgets.notes))
widgets.append(urwid.Button(u'Save and Exit', on_press=app.save))
header = urwid.AttrWrap(header, 'header')
lw = urwid.SimpleListWalker(widgets)
listbox = urwid.ListBox(lw)
listbox = urwid.AttrWrap(listbox, 'listbox')
top = urwid.Frame(listbox, header)
palette = [('header', 'black', 'dark cyan', 'standout'),
('key', 'white', 'dark blue', 'bold'),
('listbox', 'light gray', 'black')]
loop = urwid.MainLoop(top, palette, unhandled_input=show_or_exit)
loop.run()
|
jessamynsmith/fontbakery
|
tools/fontbakery-setup.py
|
Python
|
apache-2.0
| 12,295
|
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Publish a sample using the preferred RPC mechanism.
"""
import abc
import itertools
import operator
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from oslo_utils import encodeutils
from oslo_utils import excutils
import six
import six.moves.urllib.parse as urlparse
from ceilometer.i18n import _, _LE, _LI
from ceilometer import messaging
from ceilometer import publisher
from ceilometer.publisher import utils
LOG = log.getLogger(__name__)
NOTIFIER_OPTS = [
cfg.StrOpt('metering_topic',
default='metering',
help='The topic that ceilometer uses for metering '
'notifications.',
),
cfg.StrOpt('event_topic',
default='event',
help='The topic that ceilometer uses for event '
'notifications.',
),
cfg.StrOpt('telemetry_driver',
default='messagingv2',
help='The driver that ceilometer uses for metering '
'notifications.',
deprecated_name='metering_driver',
)
]
cfg.CONF.register_opts(NOTIFIER_OPTS,
group="publisher_notifier")
cfg.CONF.import_opt('host', 'ceilometer.service')
class DeliveryFailure(Exception):
def __init__(self, message=None, cause=None):
super(DeliveryFailure, self).__init__(message)
self.cause = cause
def raise_delivery_failure(exc):
excutils.raise_with_cause(DeliveryFailure,
encodeutils.exception_to_unicode(exc),
cause=exc)
@six.add_metaclass(abc.ABCMeta)
class MessagingPublisher(publisher.PublisherBase):
def __init__(self, parsed_url):
options = urlparse.parse_qs(parsed_url.query)
# the value of options is a list of url param values
# only take care of the latest one if the option
# is provided more than once
self.per_meter_topic = bool(int(
options.get('per_meter_topic', [0])[-1]))
self.policy = options.get('policy', ['default'])[-1]
self.max_queue_length = int(options.get(
'max_queue_length', [1024])[-1])
self.max_retry = 0
self.local_queue = []
if self.policy in ['default', 'queue', 'drop']:
LOG.info(_LI('Publishing policy set to %s') % self.policy)
else:
LOG.warning(_('Publishing policy is unknown (%s) force to '
'default') % self.policy)
self.policy = 'default'
self.retry = 1 if self.policy in ['queue', 'drop'] else None
def publish_samples(self, context, samples):
"""Publish samples on RPC.
:param context: Execution context from the service or RPC call.
:param samples: Samples from pipeline after transformation.
"""
meters = [
utils.meter_message_from_counter(
sample, cfg.CONF.publisher.telemetry_secret)
for sample in samples
]
topic = cfg.CONF.publisher_notifier.metering_topic
self.local_queue.append((context, topic, meters))
if self.per_meter_topic:
for meter_name, meter_list in itertools.groupby(
sorted(meters, key=operator.itemgetter('counter_name')),
operator.itemgetter('counter_name')):
meter_list = list(meter_list)
topic_name = topic + '.' + meter_name
LOG.debug('Publishing %(m)d samples on %(n)s',
{'m': len(meter_list), 'n': topic_name})
self.local_queue.append((context, topic_name, meter_list))
self.flush()
def flush(self):
# NOTE(sileht):
# this is why the self.local_queue is emptied before processing the
# queue and the remaining messages in the queue are added to
# self.local_queue after in case of another call having already added
# something in the self.local_queue
queue = self.local_queue
self.local_queue = []
self.local_queue = (self._process_queue(queue, self.policy) +
self.local_queue)
if self.policy == 'queue':
self._check_queue_length()
def _check_queue_length(self):
queue_length = len(self.local_queue)
if queue_length > self.max_queue_length > 0:
count = queue_length - self.max_queue_length
self.local_queue = self.local_queue[count:]
LOG.warning(_("Publisher max local_queue length is exceeded, "
"dropping %d oldest samples") % count)
def _process_queue(self, queue, policy):
current_retry = 0
while queue:
context, topic, data = queue[0]
try:
self._send(context, topic, data)
except DeliveryFailure:
data = sum([len(m) for __, __, m in queue])
if policy == 'queue':
LOG.warning(_("Failed to publish %d datapoints, queue "
"them"), data)
return queue
elif policy == 'drop':
LOG.warning(_("Failed to publish %d datapoints, "
"dropping them"), data)
return []
current_retry += 1
if current_retry >= self.max_retry:
LOG.exception(_LE("Failed to retry to send sample data "
"with max_retry times"))
raise
else:
queue.pop(0)
return []
def publish_events(self, context, events):
"""Send an event message for publishing
:param context: Execution context from the service or RPC call
:param events: events from pipeline after transformation
"""
ev_list = [utils.message_from_event(
event, cfg.CONF.publisher.telemetry_secret) for event in events]
topic = cfg.CONF.publisher_notifier.event_topic
self.local_queue.append((context, topic, ev_list))
self.flush()
@abc.abstractmethod
def _send(self, context, topic, meters):
"""Send the meters to the messaging topic."""
class NotifierPublisher(MessagingPublisher):
def __init__(self, parsed_url, default_topic):
super(NotifierPublisher, self).__init__(parsed_url)
options = urlparse.parse_qs(parsed_url.query)
topic = options.get('topic', [default_topic])[-1]
self.notifier = oslo_messaging.Notifier(
messaging.get_transport(),
driver=cfg.CONF.publisher_notifier.telemetry_driver,
publisher_id='telemetry.publisher.%s' % cfg.CONF.host,
topic=topic,
retry=self.retry
)
def _send(self, context, event_type, data):
try:
self.notifier.sample(context.to_dict(), event_type=event_type,
payload=data)
except oslo_messaging.MessageDeliveryFailure as e:
raise_delivery_failure(e)
class SampleNotifierPublisher(NotifierPublisher):
def __init__(self, parsed_url):
super(SampleNotifierPublisher, self).__init__(
parsed_url, cfg.CONF.publisher_notifier.metering_topic)
class EventNotifierPublisher(NotifierPublisher):
def __init__(self, parsed_url):
super(EventNotifierPublisher, self).__init__(
parsed_url, cfg.CONF.publisher_notifier.event_topic)
|
idegtiarov/ceilometer
|
ceilometer/publisher/messaging.py
|
Python
|
apache-2.0
| 8,139
|
# Copyright (c) 2016-2019 Chris Reed
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import instructions
from .decoder import (DECODER_TREE, UndefinedInstructionError)
decoder = DECODER_TREE
decoder.build()
class Disassembler(object):
def __init__(self):
pass
def disasm(self, data, address=0):
length = len(data)
endAddress = address + length
offset = 0
while address < endAddress:
# Decode the next instruction.
try:
i = decoder.decode(data[offset:], address)
except UndefinedInstructionError:
# Ignore the undefined error if it's the last few bytes.
if endAddress - address < 4:
return
raise
# Return this instruction to the caller.
yield i
# Update address based on instruction length.
address += i.size
offset += i.size
|
flit/cmdis
|
cmdis/disasm.py
|
Python
|
apache-2.0
| 1,507
|
from django.conf import settings
from storages.backends.s3boto3 import S3Boto3Storage
class DefaultStorage(S3Boto3Storage):
location = settings.DEFAULT_LOCATION
file_overwrite = True
class StaticStorage(S3Boto3Storage):
location = settings.STATICFILES_LOCATION
file_overwrite = True
class LogoStorage(S3Boto3Storage):
location = settings.LOGO_LOCATION
file_overwrite = True
class AgencyImageStorage(S3Boto3Storage):
location = settings.AGENCY_IMAGE_LOCATION
file_overwrite = True
class AgencyNationStorage(S3Boto3Storage):
location = settings.AGENCY_NATION_LOCATION
file_overwrite = True
class OrbiterImageStorage(S3Boto3Storage):
location = settings.ORBITER_IMAGE_LOCATION
file_overwrite = True
class LauncherImageStorage(S3Boto3Storage):
location = settings.LAUNCHER_IMAGE_LOCATION
file_overwrite = True
class LaunchImageStorage(S3Boto3Storage):
location = settings.LAUNCH_IMAGE_LOCATION
file_overwrite = True
class EventImageStorage(S3Boto3Storage):
location = settings.EVENT_IMAGE_LOCATION
file_overwrite = True
class AppImageStorage(S3Boto3Storage):
location = settings.APP_IMAGE_LOCATION
file_overwrite = True
class AstronautImageStorage(S3Boto3Storage):
location = settings.ASTRONAUT_IMAGE_LOCATION
file_overwrite = True
class SpaceStationImageStorage(S3Boto3Storage):
location = settings.SPACESTATION_IMAGE_LOCATION
file_overwrite = True
class LauncherCoreImageStorage(S3Boto3Storage):
location = settings.LAUNCHER_CORE_IMAGE_LOCATION
file_overwrite = True
|
ItsCalebJones/SpaceLaunchNow-Server
|
custom_storages.py
|
Python
|
apache-2.0
| 1,595
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.estimator.export import export as export_helpers
from tensorflow.python.estimator.export import export_output
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.summary import summary
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import device_setter
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.training import warm_starting_util
from tensorflow.python.util import compat
from tensorflow.python.util import compat_internal
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
_VALID_MODEL_FN_ARGS = set(
['features', 'labels', 'mode', 'params', 'self', 'config'])
@tf_export('estimator.Estimator')
class Estimator(object):
"""Estimator class to train and evaluate TensorFlow models.
The `Estimator` object wraps a model which is specified by a `model_fn`,
which, given inputs and a number of other parameters, returns the ops
necessary to perform training, evaluation, or predictions.
All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a
subdirectory thereof. If `model_dir` is not set, a temporary directory is
used.
The `config` argument can be passed `RunConfig` object containing information
about the execution environment. It is passed on to the `model_fn`, if the
`model_fn` has a parameter named "config" (and input functions in the same
manner). If the `config` parameter is not passed, it is instantiated by the
`Estimator`. Not passing config means that defaults useful for local execution
are used. `Estimator` makes config available to the model (for instance, to
allow specialization based on the number of workers available), and also uses
some of its fields to control internals, especially regarding checkpointing.
The `params` argument contains hyperparameters. It is passed to the
`model_fn`, if the `model_fn` has a parameter named "params", and to the input
functions in the same manner. `Estimator` only passes params along, it does
not inspect it. The structure of `params` is therefore entirely up to the
developer.
None of `Estimator`'s methods can be overridden in subclasses (its
constructor enforces this). Subclasses should use `model_fn` to configure
the base class, and may add methods implementing specialized functionality.
"""
def __init__(self, model_fn, model_dir=None, config=None, params=None,
warm_start_from=None):
"""Constructs an `Estimator` instance.
See @{$estimators} for more information. To warm-start an `Estimator`:
```python
estimator = tf.estimator.DNNClassifier(
feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
hidden_units=[1024, 512, 256],
warm_start_from="/path/to/checkpoint/dir")
```
For more details on warm-start configuration, see
@{tf.estimator.WarmStartSettings$WarmStartSettings}.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
* `labels`: This is the second item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same (for multi-head models). If
mode is `ModeKeys.PREDICT`, `labels=None` will be passed. If
the `model_fn`'s signature does not accept `mode`, the
`model_fn` must still be able to handle `labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your `model_fn` based on
configuration such as `num_ps_replicas`, or `model_dir`.
* Returns:
`EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `PathLike` object, the
path will be resolved. If `None`, the model_dir in `config` will be used
if set. If both are set, they must be same. If both are `None`, a
temporary directory will be used.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings`
object to fully configure warm-starting. If the string
filepath is provided instead of a `WarmStartSettings`,
then all variables are warm-started, and it is assumed
that vocabularies and Tensor names are unchanged.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`.
"""
Estimator._assert_members_are_not_overridden(self)
if config is None:
self._config = run_config.RunConfig()
logging.info('Using default config.')
else:
if not isinstance(config, run_config.RunConfig):
raise ValueError(
'config must be an instance of RunConfig, but provided %s.' %
config)
self._config = config
# The distribute field contains an instance of DistributionStrategy.
self._distribution = self._config.train_distribute
# Model directory.
model_dir = compat_internal.path_to_str(model_dir)
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(alanyee): remove this suppression after it is no longer needed
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
if self._config.session_config is None:
rewrite_opts = rewriter_config_pb2.RewriterConfig(
meta_optimizer_iterations=rewriter_config_pb2.RewriterConfig.ONE)
graph_opts = config_pb2.GraphOptions(rewrite_options=rewrite_opts)
self._session_config = config_pb2.ConfigProto(
allow_soft_placement=True, graph_options=graph_opts)
else:
self._session_config = self._config.session_config
self._device_fn = (
self._config.device_fn or _get_replica_device_setter(self._config))
if model_fn is None:
raise ValueError('model_fn must be provided to Estimator.')
_verify_model_fn_args(model_fn, params)
self._model_fn = model_fn
self._params = copy.deepcopy(params or {})
# pylint: disable=protected-access
self._warm_start_settings = _get_default_warm_start_settings(
warm_start_from)
# pylint: enable=protected-access
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return copy.deepcopy(self._config)
@property
def params(self):
return copy.deepcopy(self._params)
@property
def model_fn(self):
"""Returns the model_fn which is bound to self.params.
Returns:
The model_fn with following signature:
`def model_fn(features, labels, mode, config)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config)
return public_model_fn
# TODO(ispir): support a list of names
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string or a list of string, name of the tensor.
Returns:
Numpy array - value of the tensor.
Raises:
ValueError: If the Estimator has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
with context.graph_mode():
return training.load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
Raises:
ValueError: If the Estimator has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
with context.graph_mode():
return [name for name, _ in training.list_variables(self.model_dir)]
def latest_checkpoint(self):
"""Finds the filename of latest saved checkpoint file in `model_dir`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was
found.
"""
with context.graph_mode():
return saver.latest_checkpoint(self.model_dir)
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
"""Trains a model given training data input_fn.
Args:
input_fn: A function that provides input data for training as minibatches.
See @{$premade_estimators#create_input_functions} for more
information. The function should construct and return one of
the following:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple (features, labels) with same constraints as below.
* A tuple (features, labels): Where `features` is a `Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the training loop.
steps: Number of steps for which to train model. If `None`, train forever
or train until input_fn generates the `OutOfRange` error or
`StopIteration` exception. 'steps' works incrementally. If you call two
times train(steps=10) then training occurs in total 20 steps. If
`OutOfRange` or `StopIteration` occurs in the middle, training stops
before 20 steps. If you don't want to have incremental behavior please
set `max_steps` instead. If set, `max_steps` must be `None`.
max_steps: Number of total steps for which to train model. If `None`,
train forever or train until input_fn generates the `OutOfRange` error
or `StopIteration` exception. If set, `steps` must be `None`. If
`OutOfRange` or `StopIteration` occurs in the middle, training stops
before `max_steps` steps.
Two calls to `train(steps=100)` means 200 training
iterations. On the other hand, two calls to `train(max_steps=100)` means
that the second call will not do any iteration since first call did
all 100 steps.
saving_listeners: list of `CheckpointSaverListener` objects. Used for
callbacks that run immediately before or after checkpoint savings.
Returns:
`self`, for chaining.
Raises:
ValueError: If both `steps` and `max_steps` are not `None`.
ValueError: If either `steps` or `max_steps` is <= 0.
"""
with context.graph_mode():
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
if max_steps is not None:
start_step = _load_global_step_from_checkpoint_dir(self._model_dir)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_train_steps_to_hooks(steps, max_steps))
saving_listeners = _check_listeners_type(saving_listeners)
loss = self._train_model(input_fn, hooks, saving_listeners)
logging.info('Loss for final step: %s.', loss)
return self
def _convert_train_steps_to_hooks(self, steps, max_steps):
if steps is not None or max_steps is not None:
return [training.StopAtStepHook(steps, max_steps)]
else:
return []
def eval_dir(self, name=None):
"""Shows directory name where evaluation metrics are dumped.
Args:
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A string which is the path of directory contains evaluation metrics.
"""
return os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
def evaluate(self, input_fn, steps=None, hooks=None, checkpoint_path=None,
name=None):
"""Evaluates the model given evaluation data input_fn.
For each step, calls `input_fn`, which returns one batch of data.
Evaluates until:
- `steps` batches are processed, or
- `input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
Args:
input_fn: A function that constructs the input data for evaluation.
See @{$premade_estimators#create_input_functions} for more
information. The function should construct and return one of
the following:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple (features, labels) with same constraints as below.
* A tuple (features, labels): Where `features` is a `Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
steps: Number of steps for which to evaluate model. If `None`, evaluates
until `input_fn` raises an end-of-input exception.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the evaluation call.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, evaluation is run with newly initialized `Variables`
instead of restored from checkpoint.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A dict containing the evaluation metrics specified in `model_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed.
Raises:
ValueError: If `steps <= 0`.
ValueError: If no model has been trained, namely `model_dir`, or the
given `checkpoint_path` is empty.
"""
with context.graph_mode():
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_eval_steps_to_hooks(steps))
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
logging.info('Could not find trained model in model_dir: {}, running '
'initialization to evaluate.'.format(self._model_dir))
checkpoint_path = latest_path
with ops.Graph().as_default():
(scaffold, update_op,
eval_dict, all_hooks) = self._evaluate_build_graph(
input_fn, hooks, checkpoint_path)
return self._evaluate_run(
checkpoint_path=checkpoint_path,
scaffold=scaffold,
update_op=update_op,
eval_dict=eval_dict,
all_hooks=all_hooks,
output_dir=self.eval_dir(name))
def _convert_eval_steps_to_hooks(self, steps):
if steps is None:
return []
if steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
return [evaluation._StopAfterNEvalsHook(num_evals=steps)] # pylint: disable=protected-access
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
"""Yields predictions for given features.
Args:
input_fn: A function that constructs the features. Prediction continues
until `input_fn` raises an end-of-input exception (`OutOfRangeError` or
`StopIteration`).
See @{$premade_estimators#create_input_functions} for more
information. The function should construct and return one of
the following:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must have
same constraints as below.
* features: A `Tensor` or a dictionary of string feature name to
`Tensor`. features are consumed by `model_fn`. They should satisfy
the expectation of `model_fn` from inputs.
* A tuple, in which case the first item is extracted as features.
predict_keys: list of `str`, name of the keys to predict. It is used if
the `EstimatorSpec.predictions` is a `dict`. If `predict_keys` is used
then rest of the predictions will be filtered from the dictionary. If
`None`, returns all.
hooks: List of `SessionRunHook` subclass instances. Used for callbacks
inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, prediction is run with newly initialized `Variables`
instead of restored from checkpoint.
yield_single_examples: If False, yield the whole batch as returned by the
`model_fn` instead of decomposing the batch into individual elements.
This is useful if `model_fn` returns some tensors whose first dimension
is not equal to the batch size.
Yields:
Evaluated values of `predictions` tensors.
Raises:
ValueError: Could not find a trained model in `model_dir`.
ValueError: If batch length of predictions is not the same and
`yield_single_examples` is True.
ValueError: If there is a conflict between `predict_keys` and
`predictions`. For example if `predict_keys` is not `None` but
`EstimatorSpec.predictions` is not a `dict`.
"""
with context.graph_mode():
hooks = _check_hooks_type(hooks)
# Check that model has been trained.
if not checkpoint_path:
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
logging.info('Could not find trained model in model_dir: {}, running '
'initialization to predict.'.format(self._model_dir))
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
self._create_and_assert_global_step(g)
features, input_hooks = self._get_features_from_input_fn(
input_fn, model_fn_lib.ModeKeys.PREDICT)
estimator_spec = self._call_model_fn(
features, None, model_fn_lib.ModeKeys.PREDICT, self.config)
# Call to warm_start has to be after model_fn is called.
self._maybe_warm_start(checkpoint_path)
predictions = self._extract_keys(
estimator_spec.predictions, predict_keys)
all_hooks = list(input_hooks)
all_hooks.extend(hooks)
all_hooks.extend(list(estimator_spec.prediction_hooks or []))
with training.MonitoredSession(
session_creator=training.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
master=self._config.master,
scaffold=estimator_spec.scaffold,
config=self._session_config),
hooks=all_hooks) as mon_sess:
while not mon_sess.should_stop():
preds_evaluated = mon_sess.run(predictions)
if not yield_single_examples:
yield preds_evaluated
elif not isinstance(predictions, dict):
for pred in preds_evaluated:
yield pred
else:
for i in range(self._extract_batch_length(preds_evaluated)):
yield {
key: value[i]
for key, value in six.iteritems(preds_evaluated)
}
def _assert_members_are_not_overridden(self):
"""Asserts members of `Estimator` are not overridden."""
allowed_overrides = set([
'_call_input_fn', '_create_global_step',
'_convert_train_steps_to_hooks', '_convert_eval_steps_to_hooks',
'_tf_api_names', '_validate_features_in_predict_input',
'_call_model_fn', '_add_meta_graph_for_mode'
])
estimator_members = set([m for m in Estimator.__dict__.keys()
if not m.startswith('__')])
subclass_members = set(self.__class__.__dict__.keys())
common_members = estimator_members & subclass_members - allowed_overrides
overridden_members = [
m for m in common_members
if Estimator.__dict__[m] != self.__class__.__dict__[m]]
if overridden_members:
raise ValueError(
'Subclasses of Estimator cannot override members of Estimator. '
'{} does override {}'.format(self.__class__, overridden_members))
def export_savedmodel(
self, export_dir_base, serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports inference graph as a SavedModel into given dir.
For a detailed guide, see
@{$saved_model#using_savedmodel_with_estimators$Using SavedModel with Estimators}.
This method builds a new graph by first calling the
serving_input_receiver_fn to obtain feature `Tensor`s, and then calling
this `Estimator`'s model_fn to generate the model graph based on those
features. It restores the given checkpoint (or, lacking that, the most
recent checkpoint) into this graph in a fresh session. Finally it creates
a timestamped export directory below the given export_dir_base, and writes
a `SavedModel` into it containing a single `MetaGraphDef` saved from this
session.
The exported `MetaGraphDef` will provide one `SignatureDef` for each
element of the export_outputs dict returned from the model_fn, named using
the same keys. One of these keys is always
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`ExportOutput`s, and the inputs are always the input receivers provided by
the serving_input_receiver_fn.
Extra assets may be written into the SavedModel via the assets_extra
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and
returns a `ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
The string path to the exported directory.
Raises:
ValueError: if no serving_input_receiver_fn is provided, no export_outputs
are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
return self._export_saved_model_for_mode(
export_dir_base,
serving_input_receiver_fn,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=strip_default_attrs,
mode=model_fn_lib.ModeKeys.PREDICT)
def _export_saved_model_for_mode(
self, export_dir_base, input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False,
mode=model_fn_lib.ModeKeys.PREDICT):
# pylint: disable=line-too-long
"""Exports a single train/eval/predict graph as a SavedModel.
This method is a wrapper for _export_all_saved_models, and wraps a raw
input_receiver_fn in a dictionary to pass in to that function.
See _export_all_saved_models for full docs.
See tf.contrib.estimator.export_saved_model_for_mode for the currently
exposed version of this function.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
input_receiver_fn: a function that takes no argument and
returns the appropriate subclass of `InputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
mode: tf.estimator.ModeKeys value indicating with mode will be exported.
Returns:
The string path to the exported directory.
Raises:
ValueError: if input_receiver_fn is None, no export_outputs
are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
if not input_receiver_fn:
raise ValueError('An input_receiver_fn must be defined.')
input_receiver_fn_map = {mode: input_receiver_fn}
return self._export_all_saved_models(
export_dir_base,
input_receiver_fn_map,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=strip_default_attrs)
def _export_all_saved_models(
self, export_dir_base, input_receiver_fn_map,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports a SavedModel containing MetaGraphDefs for each requested mode.
See tf.contrib.estimator.export_all_saved_models for the currently
exposed version of this function.
For each mode passed in via the input_receiver_fn_map,
this method builds a new graph by calling the input_receiver_fn to obtain
feature and label `Tensor`s. Next, this method calls the `Estimator`'s
model_fn in the passed mode to generate the model graph based on
those features and labels, and restores the given checkpoint
(or, lacking that, the most recent checkpoint) into the graph.
Only one of the modes is used for saving variables to the SavedModel
(order of preference: TRAIN, EVAL, then PREDICT), such that up to three
MetaGraphDefs are saved with a single set of variables in a single
SavedModel directory.
For the variables and MetaGraphDefs, a timestamped export directory below
export_dir_base, and writes a `SavedModel` into it containing
the `MetaGraphDef` for the given mode and its associated signatures.
For prediction, the exported `MetaGraphDef` will provide one `SignatureDef`
for each element of the export_outputs dict returned from the model_fn,
named using the same keys. One of these keys is always
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, indicating which
signature will be served when a serving request does not specify one.
For each signature, the outputs are provided by the corresponding
`ExportOutput`s, and the inputs are always the input receivers provided by
the serving_input_receiver_fn.
For training and evaluation, the train_op is stored in an extra collection,
and loss, metrics, and predictions are included in a SignatureDef for the
mode in question.
Extra assets may be written into the SavedModel via the assets_extra
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
input_receiver_fn_map: dict of tf.estimator.ModeKeys to input_receiver_fn
mappings, where the input_receiver_fn is a function that takes no
argument and returns the appropriate subclass of `InputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
A dict of tf.estimator.ModeKeys value to string path for each exported
directory.
Raises:
ValueError: if any input_receiver_fn is None, no export_outputs
are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
# TODO(b/65561022): Consider allowing multiple input_receiver_fns per mode.
with context.graph_mode():
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise ValueError("Couldn't find trained model at %s." % self._model_dir)
export_dir = export_helpers.get_timestamped_export_dir(export_dir_base)
temp_export_dir = export_helpers.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
save_variables = True
# Note that the order in which we run here matters, as the first
# mode we pass through will be used to save the variables. We run TRAIN
# first, as that is also the mode used for checkpoints, and therefore
# we are not likely to have vars in PREDICT that are not in the checkpoint
# created by TRAIN.
if input_receiver_fn_map.get(model_fn_lib.ModeKeys.TRAIN):
self._add_meta_graph_for_mode(
builder, input_receiver_fn_map, checkpoint_path,
strip_default_attrs, save_variables,
mode=model_fn_lib.ModeKeys.TRAIN)
save_variables = False
if input_receiver_fn_map.get(model_fn_lib.ModeKeys.EVAL):
self._add_meta_graph_for_mode(
builder, input_receiver_fn_map, checkpoint_path,
strip_default_attrs, save_variables,
mode=model_fn_lib.ModeKeys.EVAL)
save_variables = False
if input_receiver_fn_map.get(model_fn_lib.ModeKeys.PREDICT):
self._add_meta_graph_for_mode(
builder, input_receiver_fn_map, checkpoint_path,
strip_default_attrs, save_variables,
mode=model_fn_lib.ModeKeys.PREDICT)
save_variables = False
if save_variables:
raise ValueError('No valid modes for exporting found. Got {}.'.format(
input_receiver_fn_map.keys()))
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(temp_export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None):
# pylint: disable=line-too-long
"""Loads variables and adds them along with a MetaGraphDef for saving.
Args:
builder: instance of SavedModelBuilder that will be used for saving.
input_receiver_fn_map: dict of tf.estimator.ModeKeys to input_receiver_fn
mappings, where the input_receiver_fn is a function that takes no
argument and returns the appropriate subclass of `InputReceiver`.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
save_variables: bool, whether variables should be saved. If False, just
the MetaGraphDef will be saved. Note that save_variables should only be
True for the first call to this function, and the SavedModelBuilder will
raise an error if that is not the case.
mode: tf.estimator.ModeKeys value indicating which mode will be exported.
export_tags: The set of tags with which to save `MetaGraphDef`. If None,
a default set will be selected to matched the passed mode.
"""
# pylint: enable=line-too-long
if export_tags is None:
export_tags = model_fn_lib.EXPORT_TAG_MAP[mode]
input_receiver_fn = input_receiver_fn_map[mode]
with ops.Graph().as_default() as g:
self._create_and_assert_global_step(g)
random_seed.set_random_seed(self._config.tf_random_seed)
input_receiver = input_receiver_fn()
# Call the model_fn and collect the export_outputs.
estimator_spec = self._call_model_fn(
features=input_receiver.features,
labels=getattr(input_receiver, 'labels', None),
mode=mode,
config=self.config)
export_outputs = self._get_export_outputs_for_spec(estimator_spec)
# Build the SignatureDefs from receivers and all outputs
signature_def_map = export_helpers.build_all_signature_defs(
input_receiver.receiver_tensors,
export_outputs,
getattr(input_receiver, 'receiver_tensors_alternatives', None),
serving_only=(mode == model_fn_lib.ModeKeys.PREDICT))
with tf_session.Session(config=self._session_config) as session:
local_init_op = (
estimator_spec.scaffold.local_init_op or
monitored_session.Scaffold.default_local_init_op())
saver_for_restore = estimator_spec.scaffold.saver or saver.Saver(
sharded=True)
try:
saver_for_restore.restore(session, checkpoint_path)
except errors.NotFoundError as e:
msg = ('Could not load all requested variables from the checkpoint. '
'Please make sure your model_fn does not expect variables '
'that were not saved in the checkpoint.\n\n'
'Encountered error with mode `{}` while restoring checkpoint '
'from: `{}`. Full Traceback:\n\n{}').format(
mode, checkpoint_path, e)
raise ValueError(msg)
# We add the train op explicitly for now, so that we don't have to
# change the Builder public interface. Note that this is a no-op
# for prediction, where train_op is None.
builder._add_train_op(estimator_spec.train_op) # pylint: disable=protected-access
meta_graph_kwargs = dict(
tags=export_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
strip_default_attrs=strip_default_attrs,
legacy_init_op=local_init_op)
if save_variables:
builder.add_meta_graph_and_variables(
session, **meta_graph_kwargs)
else:
builder.add_meta_graph(**meta_graph_kwargs)
def _get_export_outputs_for_spec(self, estimator_spec):
"""Given an EstimatorSpec, determine what our export outputs should be.
EstimatorSpecs contain export_outputs that are used for serving, but for
training and eval graphs, we must wrap the tensors of interest in
appropriate ExportOutput objects.
Args:
estimator_spec: EstimatorSpec object that will be exported.
Returns:
a dict mapping export_output_name to ExportOutput object.
Raises:
ValueError: if an appropriate ExportOutput cannot be found for the
passed EstimatorSpec.mode
"""
mode = estimator_spec.mode
if mode == model_fn_lib.ModeKeys.PREDICT:
outputs = estimator_spec.export_outputs
else:
if mode == model_fn_lib.ModeKeys.TRAIN:
output_class = export_output.TrainOutput
elif mode == model_fn_lib.ModeKeys.EVAL:
output_class = export_output.EvalOutput
else:
raise ValueError(
'Export output type not found for mode: {}'.format(mode))
export_out = output_class(
loss=estimator_spec.loss,
predictions=estimator_spec.predictions,
metrics=estimator_spec.eval_metric_ops)
outputs = {mode: export_out}
return outputs
def _get_features_from_input_fn(self, input_fn, mode):
"""Extracts the `features` from return values of `input_fn`."""
result = self._call_input_fn(input_fn, mode)
input_hooks = []
if isinstance(result, dataset_ops.Dataset):
iterator = result.make_initializable_iterator()
input_hooks.append(_DatasetInitializerHook(iterator))
result = iterator.get_next()
if isinstance(result, (list, tuple)):
# Unconditionally drop the label (the second element of result).
result = result[0]
self._validate_features_in_predict_input(result)
return result, input_hooks
def _validate_features_in_predict_input(self, result):
if not _has_dataset_or_queue_runner(result):
logging.warning('Input graph does not use tf.data.Dataset or contain a '
'QueueRunner. That means predict yields forever. '
'This is probably a mistake.')
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
"""Extracts the `features` and labels from return values of `input_fn`."""
input_hooks = []
if self._distribution is not None and mode == model_fn_lib.ModeKeys.TRAIN:
result = self._distribution.distribute_dataset(
lambda: self._call_input_fn(input_fn, mode))
iterator = result.make_initializable_iterator()
input_hooks.append(_DatasetInitializerHook(iterator))
result = iterator.get_next()
else:
result = self._call_input_fn(input_fn, mode)
if isinstance(result, dataset_ops.Dataset):
iterator = result.make_initializable_iterator()
input_hooks.append(_DatasetInitializerHook(iterator))
result = iterator.get_next()
if isinstance(result, (list, tuple)):
if len(result) != 2:
raise ValueError(
'input_fn should return (features, labels) as a len 2 tuple.')
return result[0], result[1], input_hooks
return result, None, input_hooks
def _extract_batch_length(self, preds_evaluated):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in six.iteritems(preds_evaluated):
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length than others.' % key)
return batch_length
def _extract_keys(self, predictions, predict_keys):
"""Extracts `predict_keys` from `predictions`."""
if not predict_keys:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'predict_keys argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in predict_keys
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, predict_keys))
return predictions
def _create_global_step(self, graph):
"""Creates the global step tensor in graph.
The global step tensor must be an integer type with name 'global_step' and
be added to the collection @{tf.GraphKeys.GLOBAL_STEP}.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `Tensor`.
"""
return training.create_global_step(graph)
def _create_and_assert_global_step(self, graph):
"""Creates and asserts properties of the global step.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `Tensor`.
"""
step = self._create_global_step(graph)
assert step == training.get_global_step()
assert step.dtype.is_integer
return step
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments.
"""
input_fn_args = function_utils.fn_args(input_fn)
kwargs = {}
if 'mode' in input_fn_args:
kwargs['mode'] = mode
if 'params' in input_fn_args:
kwargs['params'] = self.params
if 'config' in input_fn_args:
kwargs['config'] = self.config
with ops.device('/cpu:0'):
return input_fn(**kwargs)
def _call_model_fn(self, features, labels, mode, config):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
config: RunConfig
Returns:
An `EstimatorSpec` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = config
logging.info('Calling model_fn.')
model_fn_results = self._model_fn(features=features, **kwargs)
logging.info('Done calling model_fn.')
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
def _train_model(self, input_fn, hooks, saving_listeners):
if self._distribution:
return self._train_model_distributed(input_fn, hooks, saving_listeners)
else:
return self._train_model_default(input_fn, hooks, saving_listeners)
def _train_model_default(self, input_fn, hooks, saving_listeners):
worker_hooks = []
with ops.Graph().as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = self._create_and_assert_global_step(g)
training_util._get_or_create_global_step_read() # pylint: disable=protected-access
features, labels, input_hooks = (
self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.TRAIN))
worker_hooks.extend(input_hooks)
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
return self._train_with_estimator_spec(estimator_spec, worker_hooks,
hooks, global_step_tensor,
saving_listeners)
def _train_model_distributed(self, input_fn, hooks, saving_listeners):
self._distribution.configure(self._session_config)
worker_hooks = []
with ops.Graph().as_default() as g:
with self._distribution.scope():
random_seed.set_random_seed(self._config.tf_random_seed)
features, labels, input_hooks = (
self._get_features_and_labels_from_input_fn(
input_fn, model_fn_lib.ModeKeys.TRAIN))
worker_hooks.extend(input_hooks)
global_step_tensor = self._create_and_assert_global_step(g)
# The default destination for the global_step_tensor fetch call is the
# CPU.
global_step_read_tensor = self._distribution.fetch(global_step_tensor)
# we want to add to the global collection in the main thread not the
# tower threads.
ops.add_to_collection(training_util.GLOBAL_STEP_READ_KEY,
global_step_read_tensor)
grouped_estimator_spec = self._distribution.call_for_each_tower(
self._call_model_fn,
features,
labels, # although this will be None it seems
model_fn_lib.ModeKeys.TRAIN,
self.config)
# TODO(anjalisridhar): Figure out how to resolve the following scaffold
# parameters: init_feed_dict, init_fn.
scaffold_list = self._distribution.unwrap(
grouped_estimator_spec.scaffold)
init_feed_dict = [
s.init_feed_dict
for s in scaffold_list
if s.init_feed_dict is not None
]
if init_feed_dict:
init_feed_dict = self._distribution.group(init_feed_dict)
else:
init_feed_dict = None
init_fn = [s.init_fn for s in scaffold_list if s.init_fn is not None]
if init_fn:
init_fn = self._distribution.group(init_fn)
else:
init_fn = None
init_op = [s.init_op for s in scaffold_list if s.init_op is not None]
if init_op:
init_op = self._distribution.group(init_op)
else:
init_op = None
ready_op = self._distribution.call_for_each_tower(
create_per_tower_ready_op, grouped_estimator_spec.scaffold)
if ready_op is not None:
ready_op = self._distribution.group(ready_op)
else:
ready_op = None
ready_for_local_init_op = self._distribution.call_for_each_tower(
create_per_tower_ready_for_local_init_op,
grouped_estimator_spec.scaffold)
if ready_for_local_init_op is not None:
ready_for_local_init_op = self._distribution.group(
ready_for_local_init_op)
else:
ready_for_local_init_op = None
local_init_op = [
s.local_init_op
for s in scaffold_list
if s.local_init_op is not None
]
if local_init_op:
local_init_op = self._distribution.group(local_init_op)
else:
local_init_op = None
summary_op = [
s.summary_op for s in scaffold_list if s.summary_op is not None
]
if summary_op:
summary_op = self._distribution.group(summary_op)
else:
summary_op = None
scaffold = monitored_session.Scaffold(
init_op=init_op,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op,
local_init_op=local_init_op,
summary_op=summary_op,
init_feed_dict=init_feed_dict,
init_fn=init_fn)
def get_hooks_from_the_first_device(per_device_hooks):
hooks_list = self._distribution.unwrap(per_device_hooks)
assert hooks_list
return hooks_list[0]
training_hooks = get_hooks_from_the_first_device(
grouped_estimator_spec.training_hooks)
training_chief_hooks = get_hooks_from_the_first_device(
grouped_estimator_spec.training_chief_hooks)
estimator_spec = model_fn_lib.EstimatorSpec(
mode=grouped_estimator_spec.mode,
loss=self._distribution.unwrap(
self._distribution.reduce(distribute_lib.get_loss_reduction(),
grouped_estimator_spec.loss,
destinations='/device:CPU:0'))[0],
train_op=self._distribution.group(grouped_estimator_spec.train_op),
training_hooks=training_hooks,
training_chief_hooks=training_chief_hooks,
scaffold=scaffold)
return self._train_with_estimator_spec(estimator_spec, worker_hooks,
hooks, global_step_read_tensor,
saving_listeners)
def _train_with_estimator_spec(self, estimator_spec, worker_hooks, hooks,
global_step_tensor, saving_listeners):
"""Train a model with the given Estimator Spec."""
if self._warm_start_settings:
logging.info('Warm-starting with WarmStartSettings: %s' %
(self._warm_start_settings,))
warm_starting_util.warm_start(*self._warm_start_settings)
# Check if the user created a loss summary, and add one if they didn't.
# We assume here that the summary is called 'loss'. If it is not, we will
# make another one with the name 'loss' to ensure it shows up in the right
# graph in TensorBoard.
if not any([x.op.name == 'loss'
for x in ops.get_collection(ops.GraphKeys.SUMMARIES)]):
summary.scalar('loss', estimator_spec.loss)
ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
worker_hooks.extend(hooks)
worker_hooks.append(
training.NanTensorHook(estimator_spec.loss)
)
if self._config.log_step_count_steps is not None:
worker_hooks.append(
training.LoggingTensorHook(
{
'loss': estimator_spec.loss,
'step': global_step_tensor
},
every_n_iter=self._config.log_step_count_steps)
)
worker_hooks.extend(estimator_spec.training_hooks)
if not (estimator_spec.scaffold.saver or
ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
training.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
all_hooks = worker_hooks + list(estimator_spec.training_chief_hooks)
saver_hooks = [
h for h in all_hooks if isinstance(h, training.CheckpointSaverHook)]
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
if not saver_hooks:
chief_hooks = [
training.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=estimator_spec.scaffold)
]
saver_hooks = [chief_hooks[0]]
if saving_listeners:
if not saver_hooks:
raise ValueError(
'There should be a CheckpointSaverHook to use saving_listeners. '
'Please set one of the RunConfig.save_checkpoints_steps or '
'RunConfig.save_checkpoints_secs.')
else:
# It is expected to have one CheckpointSaverHook. If multiple, we pick
# up the first one to add listener.
saver_hooks[0]._listeners.extend(saving_listeners) # pylint: disable=protected-access
with training.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=estimator_spec.scaffold,
hooks=worker_hooks,
chief_only_hooks=(
tuple(chief_hooks) + tuple(estimator_spec.training_chief_hooks)),
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config,
log_step_count_steps=self._config.log_step_count_steps) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
return loss
def _evaluate_build_graph(self, input_fn, hooks=None, checkpoint_path=None):
"""Builds the graph and related hooks to run evaluation."""
random_seed.set_random_seed(self._config.tf_random_seed)
global_step_tensor = self._create_and_assert_global_step(
ops.get_default_graph())
features, labels, input_hooks = (
self._get_features_and_labels_from_input_fn(input_fn,
model_fn_lib.ModeKeys.EVAL))
estimator_spec = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL, self.config)
# Call to warm_start has to be after model_fn is called.
self._maybe_warm_start(checkpoint_path)
if model_fn_lib.LOSS_METRIC_KEY in estimator_spec.eval_metric_ops:
raise ValueError(
'Metric with name "%s" is not allowed, because Estimator ' %
(model_fn_lib.LOSS_METRIC_KEY) +
'already defines a default metric with the same name.')
estimator_spec.eval_metric_ops[
model_fn_lib.LOSS_METRIC_KEY] = metrics_lib.mean(estimator_spec.loss)
update_op, eval_dict = _extract_metric_update_ops(
estimator_spec.eval_metric_ops)
if ops.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because Estimator '
'already defines a default metric with the same name.')
eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step_tensor
all_hooks = list(input_hooks)
all_hooks.extend(hooks)
all_hooks.extend(list(estimator_spec.evaluation_hooks or []))
return estimator_spec.scaffold, update_op, eval_dict, all_hooks
def _evaluate_run(self, checkpoint_path, scaffold, update_op, eval_dict,
all_hooks, output_dir):
"""Run evaluation."""
eval_results = evaluation._evaluate_once( # pylint: disable=protected-access
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=all_hooks,
config=self._session_config)
_write_dict_to_summary(
output_dir=output_dir,
dictionary=eval_results,
current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])
return eval_results
def _maybe_warm_start(self, checkpoint_path):
if not checkpoint_path and self._warm_start_settings:
logging.info('Warm-starting with WarmStartSettings: %s' %
(self._warm_start_settings,))
warm_starting_util.warm_start(*self._warm_start_settings)
def create_per_tower_ready_op(scaffold):
"""Create a Scaffold.ready_op inside a tower."""
if scaffold.ready_op:
return scaffold.ready_op
def default_ready_op():
return array_ops.concat([
variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
return monitored_session.Scaffold.get_or_default(
'ready_op', ops.GraphKeys.READY_OP, default_ready_op)
def create_per_tower_ready_for_local_init_op(scaffold):
"""Create a Scaffold.ready_for_local_init_op inside a tower."""
if scaffold.ready_for_local_init_op:
return scaffold.ready_for_local_init_op
def default_ready_for_local_init_op():
return variables.report_uninitialized_variables(
variables.global_variables())
return monitored_session.Scaffold.get_or_default(
'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
def _check_checkpoint_available(model_dir):
latest_path = saver.latest_checkpoint(model_dir)
if not latest_path:
raise ValueError(
'Could not find trained model in model_dir: {}.'.format(model_dir))
def _check_hooks_type(hooks):
"""Returns hooks if all are SessionRunHook, raises TypeError otherwise."""
hooks = list(hooks or [])
for h in hooks:
if not isinstance(h, training.SessionRunHook):
raise TypeError('Hooks must be a SessionRunHook, given: {}'.format(h))
return hooks
def _check_listeners_type(saving_listeners):
"""Check listeners type."""
listeners = list(saving_listeners or [])
for l in listeners:
if not isinstance(l, training.CheckpointSaverListener):
raise TypeError(
'saving_listeners must be a list of CheckpointSaverListener, '
'given: {}'.format(l))
return listeners
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default device_fn.
`Estimator` uses ReplicaDeviceSetter as a default device placer. It sets the
distributed related arguments such as number of ps_replicas based on given
config.
Args:
config: A `RunConfig` instance.
Returns:
A replica device setter, or None.
"""
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return training.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=list(device_setter.STANDARD_PS_OPS),
cluster=config.cluster_spec)
else:
return None
def _verify_model_fn_args(model_fn, params):
"""Verifies model fn arguments."""
args = set(function_utils.fn_args(model_fn))
if 'features' not in args:
raise ValueError('model_fn (%s) must include features argument.' % model_fn)
if params is not None and 'params' not in args:
raise ValueError('model_fn (%s) does not include params argument, '
'but params (%s) is passed to Estimator.' % (model_fn,
params))
if params is None and 'params' in args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
non_valid_args = list(args - _VALID_MODEL_FN_ARGS)
if non_valid_args:
raise ValueError('model_fn (%s) has following not expected args: %s' %
(model_fn, non_valid_args))
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = training.NewCheckpointReader(
training.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(ops.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def _extract_metric_update_ops(eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name, metric_ops in sorted(six.iteritems(eval_dict)):
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
if update_ops:
update_op = control_flow_ops.group(*update_ops)
else:
update_op = None
return update_op, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(six.iteritems(dictionary))
if not isinstance(v, six.binary_type))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.binary_type):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = '%s/%d' % (key, i)
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or a serialized string of Summary.', key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
def _has_dataset_or_queue_runner(maybe_tensor):
"""Returns True if TF dataset or QueueRunner has been used."""
# Check TF dataset first. Here, we use a simple algorithm to check the top
# level Tensors only, which should be sufficient for most users.
tensors = [x for x in nest.flatten(maybe_tensor) if isinstance(x, ops.Tensor)]
if any([t.op.type == 'IteratorGetNext' for t in tensors]):
return True
# Now, check queue.
return ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS)
class _DatasetInitializerHook(training.SessionRunHook):
def __init__(self, iterator):
self._iterator = iterator
def begin(self):
self._initializer = self._iterator.initializer
def after_create_session(self, session, coord):
del coord
session.run(self._initializer)
VocabInfo = warm_starting_util.VocabInfo # pylint: disable=invalid-name
tf_export('estimator.VocabInfo', allow_multiple_exports=True)(VocabInfo)
@tf_export('estimator.WarmStartSettings')
class WarmStartSettings(
collections.namedtuple('WarmStartSettings', [
'ckpt_to_initialize_from',
'vars_to_warm_start',
'var_name_to_vocab_info',
'var_name_to_prev_var_name',
])):
"""Settings for warm-starting in Estimators.
Example Use with canned `DNNEstimator`:
```
emb_vocab_file = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_file(
"sc_vocab_file", "new_vocab.txt", vocab_size=100),
dimension=8)
emb_vocab_list = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
"sc_vocab_list", vocabulary_list=["a", "b"]),
dimension=8)
estimator = tf.estimator.DNNClassifier(
hidden_units=[128, 64], feature_columns=[emb_vocab_file, emb_vocab_list],
warm_start_from=ws)
```
where `ws` could be defined as:
Warm-start all weights in the model (input layer and hidden weights).
Either the directory or a specific checkpoint can be provided (in the case
of the former, the latest checkpoint will be used):
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp")
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp/model-1000")
```
Warm-start only the embeddings (input layer):
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp",
vars_to_warm_start=".*input_layer.*")
```
Warm-start all weights but the embedding parameters corresponding to
`sc_vocab_file` have a different vocab from the one used in the current
model:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt"
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start only `sc_vocab_file` embeddings (and no other variables), which
have a different vocab from the one used in the current model:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt"
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
vars_to_warm_start=None,
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start all weights but the parameters corresponding to `sc_vocab_file`
have a different vocab from the one used in current checkpoint, and only
100 of those entries were used:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt",
old_vocab_size=100
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start all weights but the parameters corresponding to `sc_vocab_file`
have a different vocab from the one used in current checkpoint and the
parameters corresponding to `sc_vocab_list` have a different name from the
current checkpoint:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt",
old_vocab_size=100
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
},
var_name_to_prev_var_name={
"input_layer/sc_vocab_list_embedding/embedding_weights":
"old_tensor_name"
})
```
Attributes:
ckpt_to_initialize_from: [Required] A string specifying the directory with
checkpoint file(s) or path to checkpoint from which to warm-start the
model parameters.
vars_to_warm_start: [Optional] One of the following:
- A regular expression (string) that captures which variables to
warm-start (see tf.get_collection). This expression will only consider
variables in the TRAINABLE_VARIABLES collection.
- A list of Variables to warm-start.
- A list of strings, each representing a full variable name to warm-start.
- `None`, in which case only variables specified in
`var_name_to_vocab_info` will be warm-started.
Defaults to `'.*'`, which warm-starts all variables in the
TRAINABLE_VARIABLES collection. Note that this excludes variables such as
accumulators and moving statistics from batch norm.
var_name_to_vocab_info: [Optional] Dict of variable names (strings) to
VocabInfo. The variable names should be "full" variables, not the names
of the partitions. If not explicitly provided, the variable is assumed to
have no vocabulary.
var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to
name of the previously-trained variable in `ckpt_to_initialize_from`. If
not explicitly provided, the name of the variable is assumed to be same
between previous checkpoint and current model.
"""
def __new__(cls,
ckpt_to_initialize_from,
vars_to_warm_start='.*',
var_name_to_vocab_info=None,
var_name_to_prev_var_name=None):
if not ckpt_to_initialize_from:
raise ValueError(
'`ckpt_to_initialize_from` MUST be set in WarmStartSettings')
return super(WarmStartSettings, cls).__new__(
cls,
ckpt_to_initialize_from,
vars_to_warm_start,
var_name_to_vocab_info or {},
var_name_to_prev_var_name or {},
)
def _get_default_warm_start_settings(warm_start_from):
"""Returns default WarmStartSettings.
Args:
warm_start_from: Either a string representing the filepath of a checkpoint
or SavedModel to initialize from, or an instance of WarmStartSettings.
Returns:
Either None or an instance of WarmStartSettings.
Raises:
ValueError: If warm_start_from is not None but is neither a string nor an
instance of WarmStartSettings.
"""
if warm_start_from is None:
return None
if isinstance(warm_start_from, (six.string_types, six.binary_type)):
# Infer that this is a SavedModel if export_path +
# 'variables/variables.index' exists, and if so, construct the
# WarmStartSettings pointing to export_path + 'variables/variables'.
if gfile.Exists(os.path.join(compat.as_bytes(warm_start_from),
compat.as_bytes('variables/variables.index'))):
logging.info('Warm-starting from a SavedModel')
return WarmStartSettings(ckpt_to_initialize_from=os.path.join(
compat.as_bytes(warm_start_from),
compat.as_bytes('{}/{}'.format(constants.VARIABLES_DIRECTORY,
constants.VARIABLES_FILENAME))))
return WarmStartSettings(ckpt_to_initialize_from=warm_start_from)
elif isinstance(warm_start_from, WarmStartSettings):
return warm_start_from
else:
raise ValueError('warm_start_from must be a string or a WarmStartSettings, '
'instead got {}'.format(type(warm_start_from)))
|
nburn42/tensorflow
|
tensorflow/python/estimator/estimator.py
|
Python
|
apache-2.0
| 76,526
|
"""
We are given the head node root of a binary tree, where additionally every node's value is either a 0 or a 1.
Return the same tree where every subtree (of the given tree) not containing a 1 has been removed.
(Recall that the subtree of a node X is X, plus every node that is a descendant of X.)
Example 1:
Input: [1,null,0,0,1]
Output: [1,null,0,null,1]
Explanation:
Only the red nodes satisfy the property "every subtree not containing a 1".
The diagram on the right represents the answer.
Example 2:
Input: [1,0,1,0,0,0,1]
Output: [1,null,1,null,1]
Example 3:
Input: [1,1,0,1,1,0,1,0]
Output: [1,1,0,1,1,null,1]
Note:
The binary tree will have at most 100 nodes.
The value of each node will only be 0 or 1.
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def pruneTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return root
needRemove = self.needRemove(root)
if needRemove:
return None
else:
return root
def needRemove(self, node):
if node.val == 0 and not node.left and not node.right:
return True
leftNeedRemove = True
rightNeedRemove = True
if node.left:
leftNeedRemove = self.needRemove(node.left)
if leftNeedRemove:
node.left = None
if node.right:
rightNeedRemove = self.needRemove(node.right)
if rightNeedRemove:
node.right = None
if node.val == 0 and not node.left and not node.right:
return True
else:
return False
|
danielsunzhongyuan/my_leetcode_in_python
|
binary_tree_pruning_814.py
|
Python
|
apache-2.0
| 1,807
|
# Copyright 2019-present Kensho Technologies, LLC.
import bisect
import itertools
from typing import Any, Iterator, List, cast
from ..cost_estimation.analysis import QueryPlanningAnalysis
from ..cost_estimation.filter_selectivity_utils import get_integer_interval_for_filters_on_field
from ..cost_estimation.helpers import is_uuid4_type
from ..cost_estimation.int_value_conversion import (
MAX_UUID_INT,
MIN_UUID_INT,
convert_int_to_field_value,
)
from ..cost_estimation.interval import Interval, intersect_int_intervals, measure_int_interval
from ..schema.schema_info import QueryPlanningSchemaInfo
from .pagination_planning import VertexPartitionPlan
def _deduplicate_sorted_generator(generator: Iterator[Any]) -> Iterator[Any]:
"""Return a generator that skips repeated values in the given sorted generator."""
prev = object()
for i in generator:
if i != prev:
yield i
prev = i
def _choose_parameter_values(
relevant_quantiles: List[Any], desired_num_splits: int
) -> Iterator[Any]:
"""Choose parameter values as evenly as possible.
Choose parameter values by picking at most (desired_num_splits - 1) of the quantiles given that
split the value space into desired_num_split splits. There are many ways to do that, and this
function tries to have the size of the splits be as even as possible.
It is possible that the desired number of splits cannot be achieved if there are not enough
quantiles. In that case fewer than the desired number of splits are created.
Args:
relevant_quantiles: N quantiles dividing the space of values into N+1 regions.
desired_num_splits: A split is a union of consecutive regions.
Returns:
at most (desired_num_splits - 1) values that define the splits
"""
if desired_num_splits < 2:
raise AssertionError(
f"Unexpectedly received desired_num_splits = {desired_num_splits}, which this "
f"function is not able to handle."
)
num_regions = len(relevant_quantiles) + 1
if desired_num_splits >= num_regions:
return _deduplicate_sorted_generator(quantile for quantile in relevant_quantiles)
# We can't have all the splits be the same number of regions, but we can make sure
# the number of splits per region varies by at most one.
small_split_region_count = num_regions // desired_num_splits
large_split_region_count = small_split_region_count + 1
large_split_count = num_regions - small_split_region_count * desired_num_splits
small_split_count = desired_num_splits - large_split_count
# Compute 1-based indexes for which quantiles define the desired splits
quantile_indexes = itertools.accumulate(
itertools.chain(
itertools.repeat(large_split_region_count, large_split_count),
itertools.repeat(small_split_region_count, small_split_count - 1),
)
)
# TODO(bojanserafimov): We deduplicate the results to make sure we don't generate pages
# that are known to be empty. This can cause the number of generated
# pages to be less than the desired number of pages.
return _deduplicate_sorted_generator(
relevant_quantiles[index - 1] for index in quantile_indexes
)
def _convert_int_interval_to_field_value_interval(
schema_info: QueryPlanningSchemaInfo, vertex_type: str, field: str, interval: Interval[int]
) -> Interval[Any]:
"""Convert the endpoints of an interval. See int_value_conversion for the conversion spec."""
lower_bound = None
upper_bound = None
if interval.lower_bound is not None:
lower_bound = convert_int_to_field_value(
schema_info, vertex_type, field, interval.lower_bound
)
if interval.upper_bound is not None:
upper_bound = convert_int_to_field_value(
schema_info, vertex_type, field, interval.upper_bound
)
return Interval(lower_bound, upper_bound)
def _compute_parameters_for_uuid_field(
schema_info: QueryPlanningSchemaInfo,
integer_interval: Interval[int],
vertex_partition: VertexPartitionPlan,
vertex_type: str,
field: str,
) -> Iterator[Any]:
"""Return a generator of parameter values for the vertex partition at a uuid field.
See generate_parameters_for_vertex_partition for more details.
Args:
schema_info: contains statistics and relevant schema information
integer_interval: the interval of values for the field, constrained by existing filters
in the query, in int form. See the int_value_conversion module for
the definition of an int-equivalent of a uuid.
vertex_partition: the pagination plan we are working on
vertex_type: the name of the vertex type where the pagination field is
field: the name of the pagination field
Returns:
generator of field values. See generate_parameters_for_vertex_partition for more details.
"""
uuid_int_universe = Interval(MIN_UUID_INT, MAX_UUID_INT)
integer_interval = intersect_int_intervals(integer_interval, uuid_int_universe)
int_value_splits = (
cast(int, integer_interval.lower_bound)
+ int(
float(
cast(int, measure_int_interval(integer_interval))
* i
// vertex_partition.number_of_splits
)
)
for i in range(1, vertex_partition.number_of_splits)
)
return (
convert_int_to_field_value(schema_info, vertex_type, field, int_value)
for int_value in int_value_splits
)
def _compute_parameters_for_non_uuid_field(
schema_info: QueryPlanningSchemaInfo,
field_value_interval: Interval[Any],
vertex_partition: VertexPartitionPlan,
vertex_type: str,
field: str,
) -> Iterator[Any]:
"""Return a generator of parameter values for the vertex partition at a non-uuid field.
See generate_parameters_for_vertex_partition for more details.
Args:
schema_info: contains statistics and relevant schema information
field_value_interval: the interval of values for the field, constrained by existing filters
in the query
vertex_partition: the pagination plan we are working on
vertex_type: the name of the vertex type where the pagination field is
field: the name of the pagination field
Returns:
generator of field values. See generate_parameters_for_vertex_partition for more details.
"""
quantiles = schema_info.statistics.get_field_quantiles(vertex_type, field)
if quantiles is None or len(quantiles) <= vertex_partition.number_of_splits:
raise AssertionError(
"Invalid vertex partition {}. Not enough quantile data.".format(vertex_partition)
)
# Since we can't be sure the minimum observed value is the
# actual minimum value, we treat values less than it as part
# of the first quantile. That's why we drop the minimum and
# maximum observed values from the quantile list.
proper_quantiles = quantiles[1:-1]
# Get the relevant quantiles (ones inside the field_value_interval)
# TODO(bojanserafimov): It's possible that the planner thought there are enough quantiles
# to paginate, but didn't notice that there are filters that restrict
# the range of values into a range for which there are not enough
# quantiles. In this case, the pagination plan is not fully realized.
# The generated query will have fewer pages than the plan specified.
#
# One solution is to push all the pagination capacity logic
# into the cost estimator, and make it return along with the
# cardinality estimate some other metadata that the paginator would
# rely on.
min_quantile = 0
max_quantile = len(proper_quantiles)
if field_value_interval.lower_bound is not None:
min_quantile = bisect.bisect_right(proper_quantiles, field_value_interval.lower_bound)
if field_value_interval.upper_bound is not None:
max_quantile = bisect.bisect_left(proper_quantiles, field_value_interval.upper_bound)
relevant_quantiles = proper_quantiles[min_quantile:max_quantile]
return _choose_parameter_values(relevant_quantiles, vertex_partition.number_of_splits)
def generate_parameters_for_vertex_partition(
analysis: QueryPlanningAnalysis,
vertex_partition: VertexPartitionPlan,
) -> Iterator[Any]:
"""Return a generator of parameter values that realize the vertex partition.
Composability guarantee: The values returned can be used to create
vertex_partition.number_of_splits pages, or just the first value can be used to separate
the first page from the remainder. Splitting the remainder recursively should produce
the same results.
Args:
analysis: the query augmented with various analysis steps
vertex_partition: the pagination plan we are working on
Returns:
Returns a generator of (vertex_partition.number_of_splits - 1) values that split the
values at vertex_partition.pagination_field into vertex_partition.number_of_splits
almost equal chunks.
"""
pagination_field = vertex_partition.pagination_field
if vertex_partition.number_of_splits < 2:
raise AssertionError("Invalid number of splits {}".format(vertex_partition))
# Find the FilterInfos on the pagination field
vertex_type = analysis.types[vertex_partition.query_path].name
filter_infos = analysis.filters[vertex_partition.query_path]
filters_on_field = {
filter_info for filter_info in filter_infos if filter_info.fields == (pagination_field,)
}
# Get the value interval currently imposed by existing filters
integer_interval = get_integer_interval_for_filters_on_field(
analysis.schema_info,
filters_on_field,
vertex_type,
pagination_field,
analysis.ast_with_parameters.parameters,
)
field_value_interval = _convert_int_interval_to_field_value_interval(
analysis.schema_info, vertex_type, pagination_field, integer_interval
)
# Compute parameters
if is_uuid4_type(analysis.schema_info, vertex_type, pagination_field):
return _compute_parameters_for_uuid_field(
analysis.schema_info, integer_interval, vertex_partition, vertex_type, pagination_field
)
else:
return _compute_parameters_for_non_uuid_field(
analysis.schema_info,
field_value_interval,
vertex_partition,
vertex_type,
pagination_field,
)
|
kensho-technologies/graphql-compiler
|
graphql_compiler/query_pagination/parameter_generator.py
|
Python
|
apache-2.0
| 10,879
|
# -*- coding: utf-8 -*-
"""Circular buffer for storing event objects."""
class CircularBuffer(object):
"""Class that defines a circular buffer for storing event objects."""
def __init__(self, size):
"""Initializes a circular buffer object.
Args:
size (int): number of elements in the buffer.
"""
super(CircularBuffer, self).__init__()
self._index = 0
self._list = []
self._size = size
def __iter__(self):
"""Return all elements from the list."""
for index in range(0, self._size):
try:
yield self._list[(self._index + index) % self._size]
except IndexError:
pass
def __len__(self):
"""Return the length (the fixed size)."""
return self._size
@property
def size(self):
"""int: number of elements in the buffer."""
return self._size
def Append(self, item):
"""Add an item to the list.
Args:
item (object): item.
"""
if self._index >= self._size:
self._index = self._index % self._size
try:
self._list[self._index] = item
except IndexError:
self._list.append(item)
self._index += 1
def Clear(self):
"""Removes all elements from the list."""
self._index = 0
self._list = []
def Flush(self):
"""Returns a generator for all items and clear the buffer."""
for item in self:
yield item
self.Clear()
def GetCurrent(self):
"""Retrieves the current item that index points to.
Return:
object: item.
"""
index = self._index - 1
if index < 0:
return
return self._list[index]
|
dc3-plaso/plaso
|
plaso/lib/bufferlib.py
|
Python
|
apache-2.0
| 1,600
|
"""
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
adding order indexes
Revision ID: 63ed28aeec3d
Revises: 3781a8185e42
Create Date: 2016-08-03 04:36:32.284892
"""
# revision identifiers, used by Alembic.
revision = '63ed28aeec3d'
down_revision = '3781a8185e42'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_action_instances_order'), 'action_instances', ['order'], unique=False)
op.create_index(op.f('ix_actions_order'), 'actions', ['order'], unique=False)
op.create_index(op.f('ix_stage_instances_order'), 'stage_instances', ['order'], unique=False)
op.create_index(op.f('ix_stages_order'), 'stages', ['order'], unique=False)
op.create_index(op.f('ix_workflow_instances_order'), 'workflow_instances', ['order'], unique=False)
op.create_index(op.f('ix_workflows_order'), 'workflows', ['order'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_workflows_order'), table_name='workflows')
op.drop_index(op.f('ix_workflow_instances_order'), table_name='workflow_instances')
op.drop_index(op.f('ix_stages_order'), table_name='stages')
op.drop_index(op.f('ix_stage_instances_order'), table_name='stage_instances')
op.drop_index(op.f('ix_actions_order'), table_name='actions')
op.drop_index(op.f('ix_action_instances_order'), table_name='action_instances')
### end Alembic commands ###
|
BambooHR/rapid
|
rapid/master/data/migrations/versions/63ed28aeec3d_adding_order_indexes.py
|
Python
|
apache-2.0
| 2,123
|
"""Support for AdGuard Home."""
from __future__ import annotations
import logging
from adguardhome import AdGuardHome, AdGuardHomeConnectionError, AdGuardHomeError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_HASSIO, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_URL,
CONF_USERNAME,
CONF_VERIFY_SSL,
Platform,
)
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import (
CONF_FORCE,
DATA_ADGUARD_CLIENT,
DATA_ADGUARD_VERSION,
DOMAIN,
SERVICE_ADD_URL,
SERVICE_DISABLE_URL,
SERVICE_ENABLE_URL,
SERVICE_REFRESH,
SERVICE_REMOVE_URL,
)
_LOGGER = logging.getLogger(__name__)
SERVICE_URL_SCHEMA = vol.Schema({vol.Required(CONF_URL): cv.url})
SERVICE_ADD_URL_SCHEMA = vol.Schema(
{vol.Required(CONF_NAME): cv.string, vol.Required(CONF_URL): cv.url}
)
SERVICE_REFRESH_SCHEMA = vol.Schema(
{vol.Optional(CONF_FORCE, default=False): cv.boolean}
)
PLATFORMS = [Platform.SENSOR, Platform.SWITCH]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up AdGuard Home from a config entry."""
session = async_get_clientsession(hass, entry.data[CONF_VERIFY_SSL])
adguard = AdGuardHome(
entry.data[CONF_HOST],
port=entry.data[CONF_PORT],
username=entry.data[CONF_USERNAME],
password=entry.data[CONF_PASSWORD],
tls=entry.data[CONF_SSL],
verify_ssl=entry.data[CONF_VERIFY_SSL],
session=session,
)
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {DATA_ADGUARD_CLIENT: adguard}
try:
await adguard.version()
except AdGuardHomeConnectionError as exception:
raise ConfigEntryNotReady from exception
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
async def add_url(call: ServiceCall) -> None:
"""Service call to add a new filter subscription to AdGuard Home."""
await adguard.filtering.add_url(
allowlist=False, name=call.data[CONF_NAME], url=call.data[CONF_URL]
)
async def remove_url(call: ServiceCall) -> None:
"""Service call to remove a filter subscription from AdGuard Home."""
await adguard.filtering.remove_url(allowlist=False, url=call.data[CONF_URL])
async def enable_url(call: ServiceCall) -> None:
"""Service call to enable a filter subscription in AdGuard Home."""
await adguard.filtering.enable_url(allowlist=False, url=call.data[CONF_URL])
async def disable_url(call: ServiceCall) -> None:
"""Service call to disable a filter subscription in AdGuard Home."""
await adguard.filtering.disable_url(allowlist=False, url=call.data[CONF_URL])
async def refresh(call: ServiceCall) -> None:
"""Service call to refresh the filter subscriptions in AdGuard Home."""
await adguard.filtering.refresh(allowlist=False, force=call.data[CONF_FORCE])
hass.services.async_register(
DOMAIN, SERVICE_ADD_URL, add_url, schema=SERVICE_ADD_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_REMOVE_URL, remove_url, schema=SERVICE_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_ENABLE_URL, enable_url, schema=SERVICE_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_DISABLE_URL, disable_url, schema=SERVICE_URL_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_REFRESH, refresh, schema=SERVICE_REFRESH_SCHEMA
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload AdGuard Home config entry."""
hass.services.async_remove(DOMAIN, SERVICE_ADD_URL)
hass.services.async_remove(DOMAIN, SERVICE_REMOVE_URL)
hass.services.async_remove(DOMAIN, SERVICE_ENABLE_URL)
hass.services.async_remove(DOMAIN, SERVICE_DISABLE_URL)
hass.services.async_remove(DOMAIN, SERVICE_REFRESH)
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
del hass.data[DOMAIN]
return unload_ok
class AdGuardHomeEntity(Entity):
"""Defines a base AdGuard Home entity."""
def __init__(
self,
adguard: AdGuardHome,
entry: ConfigEntry,
name: str,
icon: str,
enabled_default: bool = True,
) -> None:
"""Initialize the AdGuard Home entity."""
self._available = True
self._enabled_default = enabled_default
self._icon = icon
self._name = name
self._entry = entry
self.adguard = adguard
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
async def async_update(self) -> None:
"""Update AdGuard Home entity."""
if not self.enabled:
return
try:
await self._adguard_update()
self._available = True
except AdGuardHomeError:
if self._available:
_LOGGER.debug(
"An error occurred while updating AdGuard Home sensor",
exc_info=True,
)
self._available = False
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
raise NotImplementedError()
class AdGuardHomeDeviceEntity(AdGuardHomeEntity):
"""Defines a AdGuard Home device entity."""
@property
def device_info(self) -> DeviceInfo:
"""Return device information about this AdGuard Home instance."""
if self._entry.source == SOURCE_HASSIO:
config_url = "homeassistant://hassio/ingress/a0d7b954_adguard"
else:
if self.adguard.tls:
config_url = f"https://{self.adguard.host}:{self.adguard.port}"
else:
config_url = f"http://{self.adguard.host}:{self.adguard.port}"
return DeviceInfo(
entry_type=DeviceEntryType.SERVICE,
identifiers={
(DOMAIN, self.adguard.host, self.adguard.port, self.adguard.base_path) # type: ignore
},
manufacturer="AdGuard Team",
name="AdGuard Home",
sw_version=self.hass.data[DOMAIN][self._entry.entry_id].get(
DATA_ADGUARD_VERSION
),
configuration_url=config_url,
)
|
mezz64/home-assistant
|
homeassistant/components/adguard/__init__.py
|
Python
|
apache-2.0
| 7,189
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# django
from django.conf import settings
from django.conf.urls import url, include
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.views.generic.base import TemplateView, RedirectView
# third-party
from rest_framework.documentation import include_docs_urls
# dashboard
from dashboard.services.urls import api_urls
from dashboard.views import (
TranStatusPackageView, TranStatusReleasesView, TranStatusReleaseView, DeletePackageView, DeleteGraphRuleView,
TransPlatformSettingsView, LanguagesSettingsView, PackageSettingsView, AddPackageCIPipeline, hide_ci_pipeline,
JobsView, JobsLogsView, JobsArchiveView, JobsLogsPackageView, NewPackageView, UpdatePackageView, TransCoverageView,
StreamBranchesSettingsView, NewReleaseBranchView, GraphRulesSettingsView, NewGraphRuleView, YMLBasedJobs,
NewLanguageView, UpdateLanguageView, NewLanguageSetView, UpdateLanguageSetView, NewTransPlatformView,
UpdateTransPlatformView, UpdateGraphRuleView, JobDetailView, refresh_package, release_graph, schedule_job,
tabular_data, export_packages, generate_reports, read_file_logs, get_build_tags, change_lang_status,
LanguageDetailView, LanguageReleaseView, TerritoryView, CleanUpJobs, get_repo_branches, get_target_langs,
refresh_ci_pipeline, graph_data, job_template, PipelineDetailView, PipelineHistoryView, PipelineConfigurationView,
ReleasePipelinesView, PipelinesView, AddCIPipeline, get_workflow_steps, get_pipeline_job_template,
ajax_save_pipeline_config, ajax_run_pipeline_config, ajax_toggle_pipeline_config, ajax_delete_pipeline_config
)
LOGIN_URL = "oidc_authentication_init" if settings.FAS_AUTH else "admin:index"
app_job_urls = [
url(r'^$', login_required(JobsView.as_view(), login_url=LOGIN_URL), name="jobs"),
url(r'^cleanup$', login_required(CleanUpJobs.as_view(), login_url=LOGIN_URL), name="jobs-cleanup"),
url(r'^logs$', JobsLogsView.as_view(), name="jobs-logs"),
url(r'^archive$', JobsArchiveView.as_view(), name="jobs-archive"),
url(r'^templates$', YMLBasedJobs.as_view(), name="jobs-yml-based"),
url(r'^log/(?P<job_id>[0-9a-f-]+)/detail$', JobDetailView.as_view(), name="log-detail"),
url(r'^logs/package/(?P<package_name>[\w\-\+]+)$', JobsLogsPackageView.as_view(),
name="jobs-logs-package")
]
app_pipeline_urls = [
url(r'^$', PipelinesView.as_view(), name="pipelines"),
url(r'^new$', staff_member_required(AddCIPipeline.as_view(), login_url=LOGIN_URL), name="add-ci-pipeline"),
url(r'^(?P<release_slug>[\w\-\+]+)$', ReleasePipelinesView.as_view(), name="release-pipelines"),
url(r'^(?P<pipeline_id>[0-9a-f-]+)/details$', PipelineDetailView.as_view(), name="pipeline-details"),
url(r'^(?P<pipeline_id>[0-9a-f-]+)/history$', PipelineHistoryView.as_view(), name="pipeline-history"),
url(r'^(?P<pipeline_id>[0-9a-f-]+)/configurations$', PipelineConfigurationView.as_view(),
name="pipeline-configuration"),
]
app_setting_urls = [
url(r'^$', RedirectView.as_view(permanent=False, url='/settings/languages'), name="settings"),
url(r'^languages$', RedirectView.as_view(permanent=True, url='/languages')),
url(r'^translation-platforms$', RedirectView.as_view(permanent=True, url='/translation-platforms')),
url(r'^packages$', RedirectView.as_view(permanent=True, url='/packages')),
url(r'^products$', RedirectView.as_view(permanent=True, url='/products')),
url(r'^graph-rules$', RedirectView.as_view(permanent=True, url='/coverage')),
url(r'^notification$', TemplateView.as_view(template_name="settings/notification.html"),
name="settings-notification"),
]
ajax_urls = [
url(r'^schedule-job$', schedule_job, name="ajax-schedule-job"),
url(r'^graph-data$', graph_data, name="ajax-graph-data"),
url(r'^tabular-data$', tabular_data, name="ajax-tabular-data"),
url(r'^refresh-package$', refresh_package, name="ajax-refresh-package"),
url(r'^release-graph$', release_graph, name="ajax-release-graph"),
url(r'^generate-reports$', generate_reports, name="ajax-releases-report"),
url(r'^read-file-logs$', read_file_logs, name="ajax-read-logs"),
url(r'^build-tags$', get_build_tags, name="ajax-build-tags"),
url(r'^repo-branches$', get_repo_branches, name="ajax-repo-branches"),
url(r'^job-template$', job_template, name="ajax-job-template"),
url(r'^change-lang-status$', staff_member_required(change_lang_status),
name="ajax-change-lang-status"),
url(r'^remove-pipeline$', login_required(hide_ci_pipeline),
name="ajax-remove-pipeline"),
url(r'^refresh-pipeline$', refresh_ci_pipeline,
name="ajax-refresh-pipeline"),
url(r'^target-langs$', get_target_langs, name="ajax-target-langs"),
url(r'^workflow-steps$', get_workflow_steps, name="ajax-workflow-steps"),
url(r'^ajax-pipeline-job-template$', get_pipeline_job_template, name="ajax-pipeline-job-template"),
url(r'^ajax-save-pipeline-config$', login_required(ajax_save_pipeline_config),
name='ajax-save-pipeline-config'),
url(r'^ajax-run-pipeline-config$', login_required(ajax_run_pipeline_config),
name='ajax-run-pipeline-config'),
url(r'^ajax-toggle-pipeline-config$', login_required(ajax_toggle_pipeline_config),
name='ajax-toggle-pipeline-config'),
url(r'^ajax-delete-pipeline-config$', login_required(ajax_delete_pipeline_config),
name='ajax-delete-pipeline-config')
]
coverage_urls = [
url(r'^$', GraphRulesSettingsView.as_view(), name="settings-graph-rules"),
url(r'^view/(?P<coverage_rule>[\w\-\+]+)$', TransCoverageView.as_view(), name="custom-graph"),
url(r'^new$', login_required(NewGraphRuleView.as_view(), login_url=LOGIN_URL),
name="settings-graph-rules-new"),
url(r'^edit/(?P<slug>[\w-]+)$', login_required(UpdateGraphRuleView.as_view(), login_url=LOGIN_URL),
name="graph-rule-update"),
url(r'^remove/(?P<slug>[\w-]+)$', login_required(DeleteGraphRuleView.as_view(), login_url=LOGIN_URL),
name="graph-rule-delete"),
]
geolocation_urls = [
url(r'^view/(?P<country_code>[\w]+)/$', TerritoryView.as_view(), name="territory-view"),
]
languages_urls = [
url(r'^$', LanguagesSettingsView.as_view(), name="settings-languages"),
url(r'^new$', staff_member_required(NewLanguageView.as_view()),
name="language-new"),
url(r'^view/(?P<pk>[\w@-]+)$', LanguageDetailView.as_view(), name="language-view"),
url(r'^view/(?P<locale>[\w@-]+)/(?P<release_slug>[\w\-\+]+)$',
LanguageReleaseView.as_view(), name="language-release-view"),
url(r'^edit/(?P<pk>[\w@-]+)$', staff_member_required(UpdateLanguageView.as_view()),
name="language-update"),
url(r'^set/new$', staff_member_required(NewLanguageSetView.as_view()),
name="language-set-new"),
url(r'^set/edit/(?P<slug>[\w-]+)$', staff_member_required(UpdateLanguageSetView.as_view()),
name="language-set-update"),
]
packages_urls = [
url(r'^$', PackageSettingsView.as_view(), name="settings-packages"),
url(r'^new$', login_required(NewPackageView.as_view(), login_url=LOGIN_URL), name="package-new"),
url(r'^view/(?P<package_name>[\w\-\+]+)$', TranStatusPackageView.as_view(), name="package-view"),
url(r'^edit/(?P<slug>[\w-]+)$', login_required(UpdatePackageView.as_view(), login_url=LOGIN_URL),
name="package-update"),
url(r'^remove/(?P<slug>[\w-]+)$', staff_member_required(DeletePackageView.as_view(), login_url=LOGIN_URL),
name="package-delete"),
url(r'^add/(?P<slug>[\w-]+)/ci-pipeline$', login_required(AddPackageCIPipeline.as_view(), login_url=LOGIN_URL),
name="package-add-ci-pipeline"),
url(r'^export/(?P<format>[\w+]+)$', export_packages, name="packages-export"),
]
platforms_urls = [
url(r'^$', TransPlatformSettingsView.as_view(), name="settings-trans-platforms"),
url(r'^new$', staff_member_required(NewTransPlatformView.as_view()), name="transplatform-new"),
url(r'^edit/(?P<slug>[\w-]+)$', staff_member_required(UpdateTransPlatformView.as_view()),
name="transplatform-update"),
]
products_urls = [
url(r'^$', RedirectView.as_view(permanent=False, url='/releases'), name="settings-release-streams"),
url(r'^(?P<stream_slug>\w+)/', include([
url(r'^releases$', StreamBranchesSettingsView.as_view(), name="settings-stream-branches"),
url(r'^releases/new$', staff_member_required(NewReleaseBranchView.as_view()),
name="settings-stream-branches-new"),
])),
]
releases_urls = [
url(r'^$', TranStatusReleasesView.as_view(), name="trans-status-releases"),
url(r'^view/(?P<release_branch>[\w\-\+]+)$', TranStatusReleaseView.as_view(), name="trans-status-release"),
]
trans_status_urls = [
url(r'^$', RedirectView.as_view(permanent=False, url='/releases'),
name="trans-status"),
url(r'^packages$', RedirectView.as_view(permanent=True, url='/packages')),
url(r'^releases$', RedirectView.as_view(permanent=True, url='/releases')),
]
urlpatterns = [
url(r'^api/', include(api_urls)),
url(r'^api-docs/', include_docs_urls(title='Transtats APIs')),
url(r'^ajax/', include(ajax_urls)),
url(r'^settings/', include(app_setting_urls)),
url(r'^jobs/', include(app_job_urls)),
url(r'^pipelines/', include(app_pipeline_urls)),
# landing URLs
# url(r'^$', RedirectView.as_view(permanent=False, url='/translation-status/'), name="home"),
url(r'^$', TranStatusReleasesView.as_view(), name="home"),
url(r'^translation-status/', include(trans_status_urls)),
url(r'^translation-coverage/$', RedirectView.as_view(query_string=True,
permanent=True, url='/coverage/view/')),
url(r'^quick-start$', TemplateView.as_view(template_name="howto.html"), name="howto"),
url(r'^health$', RedirectView.as_view(permanent=False, url='/api/ping?format=json')),
# packages section urls
url(r'^packages/', include(packages_urls)),
# languages section urls
url(r'^languages/', include(languages_urls)),
# trans platforms section urls
url(r'^translation-platforms/', include(platforms_urls)),
# dashboard section urls
url(r'^releases/', include(releases_urls)),
url(r'^products/', include(products_urls)),
# coverage section urls (coverage_urls)
url(r'^coverage/', include(coverage_urls)),
# geolocation section urls (location_urls)
url(r'^territory/', include(geolocation_urls)),
]
|
sundeep-co-in/transtats
|
dashboard/urls.py
|
Python
|
apache-2.0
| 11,095
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from rnacentral_pipeline.databases import data
def test_can_build_correct_writeable():
annotation = data.GoTermAnnotation(
rna_id="a",
qualifier="part_of",
term_id="GO:01",
evidence_code="ECO:001",
extensions=[
data.AnnotationExtension(qualifier="talks_to", target="ENESMBL:1"),
],
assigned_by="Bob",
publications=[],
)
writeable = list(annotation.writeable())
assert len(writeable) == 1
assert writeable[0] == [
"a",
"part_of",
"GO:01",
"ECO:001",
json.dumps(
[
{
"qualifier": "talks_to",
"target": "ENESMBL:1",
}
]
),
"Bob",
]
|
RNAcentral/rnacentral-import-pipeline
|
tests/databases/data/go_annotations_test.py
|
Python
|
apache-2.0
| 1,410
|
from pythonDnn.utils.load_conf import load_conv_spec
from pythonDnn.utils.utils import parse_activation
from pythonDnn.models.cnn import CNN,DropoutCNN;
from dnn_predict import Predictor
class CNNPredictor(Predictor):
def __init__(self,model_config):
super(CNNPredictor, self).__init__(model_config,'CNN');
conv_config,conv_layer_config,mlp_config = load_conv_spec(self.model_config['nnet_spec'],
self.batch_size,
self.model_config['input_shape'])
activationFn = parse_activation(mlp_config['activation']);
if mlp_config['do_dropout'] or conv_config['do_dropout']:
self.model = DropoutCNN(self.numpy_rng,self.theano_rng,conv_layer_configs = conv_layer_config,
batch_size = self.batch_size, n_outs=self.model_config['n_outs'],
hidden_layer_configs=mlp_config, hidden_activation = activationFn,
use_fast = conv_config['use_fast'],l1_reg = mlp_config['l1_reg'],
l2_reg = mlp_config['l1_reg'],max_col_norm = mlp_config['max_col_norm'],
input_dropout_factor=conv_config['input_dropout_factor'])
else:
self.model = CNN(self.numpy_rng,self.theano_rng,conv_layer_configs = conv_layer_config,
batch_size = batch_size, n_outs=self.model_config['n_outs'],
hidden_layer_configs=mlp_config, hidden_activation = activationFn,
use_fast = conv_config['use_fast'],l1_reg = mlp_config['l1_reg'],
l2_reg = mlp_config['l1_reg'],max_col_norm = mlp_config['max_col_norm'])
self.__load_model__(self.model_config['input_file'],mlp_config['pretrained_layers']);
|
IITM-DONLAB/dnn-test-wrapper
|
src/dnn_predict/cnn.py
|
Python
|
apache-2.0
| 1,537
|
num_cassandra_east = 1
num_regular_east = 0
num_cassandra_west = 0
num_regular_west = 0
from common_funcs import *
from time import sleep
from os import system
#todo-verify
eastAMI = "ami-4dad7424"#BOLTON: ami-8ee848e7"# OLDER: "ami-7268b01b"
westAMI = "ami-ecf17ddc"
eastInstanceIPs = []
westInstanceIPs = []
def make_ec2_east(n):
if n == 0:
return
global eastAMI
f = raw_input("EAST: spinning up %d instances; okay? " % n)
if f != "Y" and f != "y":
exit(-1)
system("ec2-run-instances %s -n %d -g 'lipstick' --t m1.large -k 'suna-real'" % (eastAMI, n))
def make_ec2_west(n):
if n == 0:
return
global westAMI
f = raw_input("WEST: spinning up %d instances; okay? " % n)
if f != "Y" and f != "y":
exit(-1)
system("ec2-run-instances %s -n %d -g 'lipstick' --t m1.large --region us-west-2 -k 'watson' -b '/dev/sdb=ephemeral0' -b '/dev/sdc=ephemeral1'" % (westAMI, n))
def get_instances():
global eastInstanceIPs
global westInstanceIPs
system("rm instances.txt")
system("ec2-describe-instances --region us-east-1 >> instances.txt")
system("ec2-describe-instances --region us-west-2 >> instances.txt")
ret = []
for line in open("instances.txt"):
line = line.split()
if line[0] == "INSTANCE":
ip = line[3]
if ip == "terminated":
continue
status = line[5]
if status.find("shutting") != -1:
continue
region = line[10]
ret.append((ip, region))
#OUTPUT all-hosts.txt, cassandra-hosts.txt, lipstick-hosts.txt, east-cassandra-hosts.txt, east-lipstick-hosts.txt, west-cassandra-hosts.txt west-lipstick-hosts.txt
system("rm instances.txt")
return ret
def make_instancefile(name, hosts):
f = open("hosts/"+name, 'w')
for host in hosts:
f.write("%s\n" % (host))
f.close
launch_cassandra_ring()
|
pbailis/bolton-sigmod2013-code
|
ycsb-lipstick/ycsb-src/setup-cluster/start_ring.py
|
Python
|
apache-2.0
| 1,951
|
#!/usr/bin/env python
#
# Copyright 2015 Airbus
# Copyright 2017 Fraunhofer Institute for Manufacturing Engineering and Automation (IPA)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
from airbus_pyqt_extend.QtAgiCore import get_pkg_dir_from_prefix
from airbus_cobot_gui import Plugin, ControlMode
## Finally import the RViz bindings themselves.
import rviz
## The MyViz class is the main container widget.
class PluginRviz(Plugin):
## MyViz Constructor
## ^^^^^^^^^^^^^^^^^
##
## Its constructor creates and configures all the component widgets:
## frame, thickness_slider, top_button, and side_button, and adds them
## to layouts.
def __init__(self, context):
Plugin.__init__(self, context)
self.frame = None
def onCreate(self, param):
yaml = param.getParam("yaml")
yaml = get_pkg_dir_from_prefix(yaml)
## rviz.VisualizationFrame is the main container widget of the
## regular RViz application, with menus, a toolbar, a status
## bar, and many docked subpanels. In this example, we
## disable everything so that the only thing visible is the 3D
## render window.
self.frame = rviz.VisualizationFrame()
## The "splash path" is the full path of an image file which
## gets shown during loading. Setting it to the empty string
## suppresses that behavior.
self.frame.setSplashPath("")
## VisualizationFrame.initialize() must be called before
## VisualizationFrame.load(). In fact it must be called
## before most interactions with RViz classes because it
## instantiates and initializes the VisualizationManager,
## which is the central class of RViz.
self.frame.initialize()
## The reader reads config file data into the config object.
## VisualizationFrame reads its data from the config object.
reader = rviz.YamlConfigReader()
config = rviz.Config()
reader.readFile( config, yaml)
self.frame.load( config )
## You can also store any other application data you like in
## the config object. Here we read the window title from the
## map key called "Title", which has been added by hand to the
## config file.
self.setWindowTitle( config.mapGetChild( "Title" ).getValue() )
## Here we disable the menu bar (from the top), status bar
## (from the bottom), and the "hide-docks" buttons, which are
## the tall skinny buttons on the left and right sides of the
## main render window.
self.frame.setMenuBar( None )
self.frame.setHideButtonVisibility( False )
## frame.getManager() returns the VisualizationManager
## instance, which is a very central class. It has pointers
## to other manager objects and is generally required to make
## any changes in an rviz instance.
self.manager = self.frame.getManager()
## Since the config file is part of the source code for this
## example, we know that the first display in the list is the
## grid we want to control. Here we just save a reference to
## it for later.
self.grid_display = self.manager.getRootDisplayGroup().getDisplayAt(0)
## Here we create the layout and other widgets in the usual Qt way.
layout = QVBoxLayout()
layout.addWidget( self.frame )
#######
self.setLayout( layout )
def onPause(self):
pass
def onResume(self):
pass
def onControlModeChanged(self, mode):
if mode == ControlMode.AUTOMATIC:
self.setEnabled(False)
else:
self.setEnabled(True)
def onUserChanged(self, user_info):
pass
def onTranslate(self, lng):
pass
def onEmergencyStop(self, state):
pass
def onDestroy(self):
pass
|
ipa-led/airbus_coop
|
airbus_plugins/airbus_plugin_rviz/src/airbus_plugin_rviz/plugin.py
|
Python
|
apache-2.0
| 4,720
|
import pickle
import pprint
data = [{'a': 'A', 'b': 2, 'c': 3.0}]
print('DATA:', end=' ')
pprint.pprint(data)
data_string = pickle.dumps(data)
print('PICKLE: {!r}'.format(data_string))
|
jasonwee/asus-rt-n14uhp-mrtg
|
src/lesson_data_persistence_and_exchange/pickle_string.py
|
Python
|
apache-2.0
| 187
|
#!/home/y/bin/python2.6
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from redisPubSub import RedisPubSub
def main():
rsp = RedisPubSub()
channel_name = raw_input("Enter channel name to which you want to publish messages: ")
var = 1
while var == 1:
new_message = raw_input("Enter message you want to publish: ")
rsp.publish(channel_name, new_message)
if __name__ == "__main__":
main()
|
dragondscv/Redis_PubSub_Tornado_Websocket
|
demo/publisher.py
|
Python
|
apache-2.0
| 461
|
#!/usr/bin/env python
#
# Copyright 2011-2014 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An example that shows how to use the Splunk binding module to create a
convenient 'wrapper' interface around the Splunk REST APIs. The example
binds to a sampling of endpoints showing how to access collections,
entities and 'method-like' endpoints."""
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
from splunklib.binding import connect
try:
from utils import parse
except ImportError:
raise Exception("Add the SDK repository to your PYTHONPATH to run the examples "
"(e.g., export PYTHONPATH=~/splunk-sdk-python.")
class Service:
def __init__(self, context):
self.context = context
def apps(self):
return self.context.get("apps/local")
def indexes(self):
return self.context.get("data/indexes")
def info(self):
return self.context.get("server/info")
def settings(self):
return self.context.get("server/settings")
def search(self, query, **kwargs):
return self.context.post("search/jobs/export", search=query, **kwargs)
def main(argv):
opts = parse(argv, {}, ".splunkrc")
context = connect(**opts.kwargs)
service = Service(context)
assert service.apps().status == 200
assert service.indexes().status == 200
assert service.info().status == 200
assert service.settings().status == 200
assert service.search("search 404").status == 200
if __name__ == "__main__":
main(sys.argv[1:])
|
kkirsche/splunk-sdk-python
|
examples/binding1.py
|
Python
|
apache-2.0
| 2,079
|
from django.db import models
# Create your models here.
class Beer(models.Model):
url = models.CharField(max_length = 255)
name = models.CharField(max_length = 255)
brewery = models.CharField(max_length = 255)
brewery_number = models.IntegerField()
beer_number = models.IntegerField()
BA_score = models.CharField(max_length = 255, blank = True)
#brew_location = Field() Not implemented
style = models.CharField(max_length = 255, blank = True)
ABV = models.DecimalField(decimal_places = 2, max_digits= 5, blank = True)
reviews = models.TextField(blank = True)
def __unicode__(self):
return self.name + '-' + self.brewery
|
johnurbanik/beeradvocate_crawl
|
beers/models.py
|
Python
|
apache-2.0
| 703
|
#! /usr/bin/env python
import os
import random
import pygame
import time as t
# Class for sprite
class Player(object):
# Initialize the Sprite
def __init__(self):
self.rect = pygame.Rect(32, 32, 0, 0) # 16, 16)
# Moves the Sprite
def move(self, dx, dy):
# Move each axis separately. Note that this checks for collisions both times.
if dx != 0:
self.move_single_axis(dx, 0)
if dy != 0:
self.move_single_axis(0, dy)
# checks for collision via the array
def isSafe(self, level, dx, dy, sol):
# Get maze size
X = len(level[0])
Y = len(level)
if dx >= 0 and dx < X and dy >= 0 and dy < Y and level[dx][dy] != 0: # "W":
self.move(dx, dy) # you have made a successful move
return True
return False
# Recursively calls itself until it has ended the maze
def solveMaze(self, level, dx, dy, sol):
# Get maze size
X = len(level[0])
Y = len(level)
# check if player has reached the end
if (dx == X-1 and dy == Y-1):
sol[dx][dy] = 1
return True
# check if we're inside the maze
if self.isSafe(level, dx, dy, sol):
# Mark the current cell (Backtrack)
sol[dx][dy] = 1
# Move right
pygame.display.update()
if self.solveMaze(level, dx+1, dy, sol):
return True
# Move down
pygame.display.update()
if self.solveMaze(level, dx, dy+1, sol):
return True
# if you can't move right or down, you've hit a wall
sol[dx][dy] = 0
return False
# checks for collision via the sprite and walls
def move_single_axis(self, dx, dy):
# Move the player
self.rect.x += dx
self.rect.y += dy
# If you collide with a wall, move out based on velocity
for wall in walls:
if self.rect.colliderect(wall.rect):
if dx > 0: # Moving right; Hit the left side of the wall
self.rect.right = wall.rect.left
if dx < 0: # Moving left; Hit the right side of the wall
self.rect.left = wall.rect.right
if dy > 0: # Moving down; Hit the top side of the wall
self.rect.bottom = wall.rect.top
if dy < 0: # Moving up; Hit the bottom side of the wall
self.rect.top = wall.rect.bottom
# wall object
class Wall(object):
def __init__(self, pos):
walls.append(self)
self.rect = pygame.Rect(pos[0], pos[1], 16, 16)
# Initialise pygame
os.environ["SDL_VIDEO_CENTERED"] = "1"
pygame.init()
# Set up the display
pygame.display.set_caption("Maze Runner")
screen = pygame.display.set_mode((256, 256)) # 320, 240 OR 64, 64
clock = pygame.time.Clock()
walls = [] # List to hold the walls
player = Player() # Create the player
# Holds the level layout in a list of strings.
level = [
[1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
[1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1],
[1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1],
[0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0],
[1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
# Update this as you go through to show were the path(s) are
sol = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
# Parse the level string above. W = wall, E = exit
x = y = 0
for row in level:
for col in row: # go through each row
if col == 0: # "W":
Wall((x, y))
if col == 1: # "E":
end_rect = pygame.Rect(x, y, 16, 16)
x += 16 # go to next "block" in row
y += 16 # go to next row
x = 0 # start at beginning of row
running = True
while running: # while not at the end of the maze
clock.tick(60)
for e in pygame.event.get():
if e.type == pygame.QUIT:
running = False
if e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
running = False
# Just added this to make it slightly fun ;)
if player.rect.colliderect(end_rect):
raise SystemExit, "You win!"
# Draw the scene
screen.fill((0, 0, 0))
for wall in walls:
pygame.draw.rect(screen, (104, 196, 210), wall.rect)
pygame.draw.rect(screen, (122, 68, 230), end_rect)
pygame.draw.rect(screen, (239, 163, 97), player.rect)
pygame.display.flip()
if player.solveMaze(level, 0, 0, sol):
print(sol)
else:
print("No Solution")
print("YOU WIN")
t.sleep(3) # display game after completion
running = False
|
kristinak228/Maze_Game
|
Maze.py
|
Python
|
apache-2.0
| 5,451
|
import logging
import spotipy
import spotipy.oauth2 as oauth2
import json
import datetime
from errbot import BotPlugin, botcmd
class BotifyPlugin(BotPlugin):
def activate(self):
from config import BOTIFY_CREDS
super(BotifyPlugin, self).activate()
# dict of name: id for playlists, name is the IRC channel
self.playlists = {}
creds = json.load(open(BOTIFY_CREDS))
client_id = creds.get('CLIENT_ID', 'YOUR_CLIENT_ID')
client_secret = creds.get('CLIENT_SECRET', 'YOUR_CLIENT_SECRET')
redirect_uri = creds.get('REDIRECT_URI', 'YOUR_REDIRECT_URI')
self.username = creds.get('USERNAME', 'USERNAME')
logging.info('Auth cache:' + creds.get('CACHE_PATH', self.username))
self.sp_oauth = oauth2.SpotifyOAuth(
client_id,
client_secret,
redirect_uri,
scope='playlist-modify-public',
cache_path=creds.get('CACHE_PATH', self.username)
)
@botcmd(split_args_with=None, admin_only=True)
def botify_createlist(self, mess, args):
self.oath_refresh_if_needed()
playlist = args[0]
return '%s created? %s' % (playlist, self.create_playlist(playlist))
@botcmd(split_args_with=None, admin_only=True)
def botify_auth(self, mess, args):
"""
Do the oauth challenge and response fandango
"""
r = self.oath_refresh_if_needed()
if 'expired' not in r:
expires = self.token_expires()
return "%s. Expires @ %s" % (r, expires.strftime('%H:%M:%S'))
try:
if args:
return self.oauth_validate(args[0])
else:
ed = "http://imgur.com/A8QOnaR.jpg"
return "You have 30 seconds to comply %s\n %s" % (
ed,
self.oauth_challenge()
)
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
@botcmd(split_args_with=None, admin_only=True)
def botify_authcheck(self, mess, args):
self.oath_refresh_if_needed()
expires = self.token_expires()
return "Expires @ %s" % expires.strftime('%H:%M:%S')
@botcmd
def botify_list(self, mess, args):
self.oath_refresh_if_needed()
playlist = self.playlist_id(mess)
msg = "Listen along: http://open.spotify.com/user/%s/playlist/%s"
results = [msg % (self.username, playlist), "-----"]
if playlist:
playlist_tracks = self.list_tracks(playlist)
if len(playlist_tracks) == 0:
results.append("No tracks in playlist")
else:
for d in playlist_tracks:
logging.info(d)
s = '%s : %s (%s) - [%s]' % (
d['track']['name'],
d['track']['album']['name'],
', '.join([a['name'] for a in d['track']['artists']]),
d['track']['id']
)
results.append(s)
else:
results = ["No playlist for the room"]
for d in results:
yield d.encode('ascii', 'ignore')
@botcmd
def botify_search(self, mess, args):
results = []
try:
results = self.search(args)
except spotipy.SpotifyException, e:
logging.error(e)
yield 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
else:
for d in results:
s = '%s : %s (%s)- %s' % (
d['name'],
d['album']['name'],
', '.join([a['name'] for a in d['artists']]),
d['id'])
yield s.encode('ascii', 'ignore')
@botcmd
def botify_add(self, mess, args):
self.oath_refresh_if_needed()
playlist = self.playlist_id(mess)
if not playlist:
return "No playlist for the room"
try:
if playlist:
return self.add_track(playlist, args.split(' '))
else:
return "No playlist for the room"
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
@botcmd
def botify_delete(self, mess, args):
self.oath_refresh_if_needed()
playlist = self.playlist_id(mess)
try:
if playlist:
return self.delete_track(playlist, args.split(' '))
else:
return "No playlist for the room"
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
def search(self, term, limit=10):
try:
tracks = self.sp.search(q=term, limit=limit)
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
return tracks['tracks']['items']
def delete_track(self, playlist, track_ids):
logging.info("delete tracks: %s" % track_ids)
track_ids = ["spotify:track:%s" % t for t in track_ids]
try:
self.sp.user_playlist_delete_tracks(
self.username,
playlist,
track_ids
)
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
return "Track removed"
def add_track(self, playlist, track_ids):
logging.info("adding tracks: %s" % track_ids)
track_ids = ["spotify:track:%s" % t for t in track_ids]
try:
self.sp.user_playlist_add_tracks(
self.username,
playlist,
track_ids
)
except spotipy.SpotifyException, e:
logging.error(e)
return 'ruh roh\n http://i.imgur.com/bmfwvDl.gif'
return "Track added"
def list_tracks(self, playlist):
return self.sp.user_playlist(
self.username,
playlist,
fields="tracks,next"
)['tracks']['items']
def check_playlist(self, playlist):
playlists = self.sp.user_playlists(self.username)['items']
self.playlists = dict([(p['name'], p['id']) for p in playlists])
return playlist in self.playlists
def create_playlist(self, playlist):
if not self.check_playlist(playlist):
logging.info('creating playlist: %s' % playlist)
try:
playlist = self.sp.user_playlist_create(
self.username,
playlist
)
except spotipy.SpotifyException, e:
if e.http_status == 201:
# there's a bug in spotipy that thinks a 201 is bad...
return self.check_playlist(playlist)
else:
return False
def playlist_id(self, mess):
playlist = str(mess.getFrom())
if self.check_playlist(playlist):
return self.playlists[playlist]
else:
return False
def oauth_challenge(self):
return self.sp_oauth.get_authorize_url()
def oauth_validate(self, response):
try:
logging.info("botify validating oauth response: %s" % response)
code = self.sp_oauth.parse_response_code(response)
logging.info("botify oauth code: %s" % code)
token = self.sp_oauth.get_access_token(code)
if token:
self.sp = spotipy.Spotify(auth=token['access_token'])
expires = datetime.datetime.fromtimestamp(token['expires_at'])
return "Authorised. Expires @ %s" % expires.strftime(
'%H:%M:%S'
)
else:
return "http://i.imgur.com/s5guP5z.gif"
except spotipy.SpotifyException, e:
logging.error(e)
return "http://i.imgur.com/s5guP5z.gif"
def oath_refresh_if_needed(self):
expires = self.token_expires()
delta = expires - datetime.datetime.now()
if delta != abs(delta):
return "Token expired, reauth"
if delta.seconds < 300:
token_info = self.sp_oauth.get_cached_token()
print token_info['expires_at'], token_info['refresh_token']
self.sp_oauth.refresh_access_token(
token_info['refresh_token']
)
token_info = self.sp_oauth.get_cached_token()
print token_info['expires_at'], token_info['refresh_token']
return "Token refreshed"
def token_expires(self):
token = self.sp_oauth.get_cached_token()
return datetime.datetime.fromtimestamp(token['expires_at'])
|
drsm79/botify
|
botify.py
|
Python
|
apache-2.0
| 8,898
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class rewriteparam(base_resource) :
""" Configuration for rewrite parameter resource. """
def __init__(self) :
self._undefaction = ""
@property
def undefaction(self) :
"""Action to perform if the result of policy evaluation is undefined (UNDEF). An UNDEF event indicates an internal error condition.
Available settings function as follows:
* NOOP - Send the request to the protected server instead of responding to it.
* RESET - Reset the request and notify the user's browser, so that the user can resend the request.
* DROP - Drop the request without sending a response to the user.<br/>Default value: "NOREWRITE".
"""
try :
return self._undefaction
except Exception as e:
raise e
@undefaction.setter
def undefaction(self, undefaction) :
"""Action to perform if the result of policy evaluation is undefined (UNDEF). An UNDEF event indicates an internal error condition.
Available settings function as follows:
* NOOP - Send the request to the protected server instead of responding to it.
* RESET - Reset the request and notify the user's browser, so that the user can resend the request.
* DROP - Drop the request without sending a response to the user.<br/>Default value: "NOREWRITE"
"""
try :
self._undefaction = undefaction
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(rewriteparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.rewriteparam
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update rewriteparam.
"""
try :
if type(resource) is not list :
updateresource = rewriteparam()
updateresource.undefaction = resource.undefaction
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of rewriteparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = rewriteparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the rewriteparam resources that are configured on netscaler.
"""
try :
if not name :
obj = rewriteparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class rewriteparam_response(base_response) :
def __init__(self, length=1) :
self.rewriteparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.rewriteparam = [rewriteparam() for _ in range(length)]
|
mahabs/nitro
|
nssrc/com/citrix/netscaler/nitro/resource/config/rewrite/rewriteparam.py
|
Python
|
apache-2.0
| 4,433
|
# Copyright 2016 Cloudbase Solutions.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netifaces
from neutron.agent.windows import ip_lib
from neutron.tests import base
class TestIpWrapper(base.BaseTestCase):
def test_get_device_by_ip_no_ip(self):
ret = ip_lib.IPWrapper().get_device_by_ip(None)
self.assertIsNone(ret)
@mock.patch.object(ip_lib.IPWrapper, 'get_devices')
def test_get_device_by_ip(self, mock_get_devices):
mock_dev1 = mock.MagicMock()
mock_dev2 = mock.MagicMock()
mock_dev1.device_has_ip.return_value = False
mock_dev2.device_has_ip.return_value = True
mock_get_devices.return_value = [mock_dev1, mock_dev2]
ret = ip_lib.IPWrapper().get_device_by_ip('fake_ip')
self.assertEqual(mock_dev2, ret)
@mock.patch('netifaces.interfaces')
def test_get_devices(self, mock_interfaces):
mock_interfaces.return_value = [mock.sentinel.dev1,
mock.sentinel.dev2]
ret = ip_lib.IPWrapper().get_devices()
self.assertEqual(mock.sentinel.dev1, ret[0].device_name)
self.assertEqual(mock.sentinel.dev2, ret[1].device_name)
@mock.patch('netifaces.interfaces')
def test_get_devices_error(self, mock_interfaces):
mock_interfaces.side_effect = OSError
ret = ip_lib.IPWrapper().get_devices()
self.assertEqual([], ret)
class TestIpDevice(base.BaseTestCase):
@mock.patch('netifaces.ifaddresses')
def test_device_has_ip(self, mock_netifaces):
mock_address = {'addr': mock.sentinel.fake_addr}
mock_netifaces.return_value = {netifaces.AF_INET: [mock_address]}
ret = ip_lib.IPDevice("fake_dev").device_has_ip(
mock.sentinel.fake_addr)
self.assertTrue(ret)
@mock.patch('netifaces.ifaddresses')
def test_device_has_ip_false(self, mock_netifaces):
mock_netifaces.return_value = {}
ret = ip_lib.IPDevice("fake_dev").device_has_ip(
mock.sentinel.fake_addr)
self.assertFalse(ret)
@mock.patch('netifaces.ifaddresses')
def test_device_has_ip_error(self, mock_netifaces):
mock_netifaces.side_effect = OSError
ret = ip_lib.IPDevice("fake_dev").device_has_ip(
mock.sentinel.fake_addr)
self.assertFalse(ret)
@mock.patch('netifaces.ifaddresses')
def test_device_not_found(self, mock_netifaces):
mock_netifaces.side_effect = ValueError
ret = ip_lib.IPDevice("fake_dev").device_has_ip(
mock.sentinel.fake_addr)
self.assertFalse(ret)
|
cloudbase/neutron
|
neutron/tests/unit/agent/windows/test_ip_lib.py
|
Python
|
apache-2.0
| 3,266
|
__author__ = 'Wenju Sun'
import
"""
This script tries to download given file via http and given the final status summary
"""
MAX_VALUE=10
MIN_VALUE=0
WARN_VALUE=0
CRITICAL_VALUE=0
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
STATUS_TEXT='OK'
STATUS_CODE=STATE_OK
murl="http://dl-3m.svc.mcitech.cn/items/60/185/3F4CBC95EF6DA685D498CC2090DDE6FB.zip"
def download(url):
urllib2
|
swenker/studio
|
python/cpp-ops/ops/check_download_magazine.py
|
Python
|
apache-2.0
| 429
|
# parse
import datetime
import os
from django.db import models
from django.utils import timezone
from ocean.models import Region
from ocean.models import Characteristic
from .parser import parse_byday, parse_set, parse_test
class Task(models.Model):
timestamp = models.DateTimeField(
auto_now_add=True)
PROCESS_TYPES = (
('byday', 'Файлы разложены по дням'),
('set', 'Один файл'),
)
process_type = models.CharField(
'Тип обработки данных',
max_length=50,
choices=PROCESS_TYPES,
default='byday',
)
path = models.CharField('Путь к файлу для обработки',
max_length=200, blank=True, default='',)
date_start = models.DateField(
'Период, с', default=None, blank=True, null=True)
date_end = models.DateField(
'Период, по', default=None, blank=True, null=True)
characteristic = models.ForeignKey(
Characteristic)
region = models.ForeignKey(
Region)
status = models.CharField(
'Статут выполнения', max_length=50, default='new',
editable=False)
result = models.TextField(
"Результат", default='', blank=True)
run = models.BooleanField(
"Выполнить", default=False)
def __str__(self):
return str(self.timestamp.strftime('%H:%M:%S-%Y%m%d'))
def save(self):
result = ''
if self.run:
if self.process_type == 'set':
result = parse_set(
# result = parse_test(
self.path,
self.date_start,
self.date_end,
self.characteristic.model,
self.characteristic.tag,
self.characteristic.store_prefix,
self.characteristic.file_mask,
self.region.slug,
)
elif self.process_type == 'byday':
result = parse_byday(
self.date_start,
self.date_end,
self.characteristic.model,
self.characteristic.tag,
self.characteristic.store_prefix,
self.characteristic.file_mask,
self.region.slug,
)
self.run = False
self.result = result
super(Task, self).save()
|
cttgroup/oceanhub
|
proto/data-refining/back/workdir/parse/models.py
|
Python
|
apache-2.0
| 2,488
|
from cbagent.decorators import post_request
class MetadataClient(object):
def __init__(self, settings, host="127.0.0.1"):
self.settings = settings
self.base_url = "http://{0}:8000/cbmonitor".format(host)
@post_request
def add_cluster(self):
url = self.base_url + "/add_cluster/"
params = {"name": self.settings.cluster,
"rest_username": self.settings.rest_username,
"rest_password": self.settings.rest_password}
return url, params
@post_request
def add_server(self, address):
url = self.base_url + "/add_server/"
params = {"address": address,
"cluster": self.settings.cluster,
"ssh_username": self.settings.ssh_username,
"ssh_password": self.settings.ssh_password}
return url, params
@post_request
def add_bucket(self, name):
url = self.base_url + "/add_bucket/"
params = {"name": name, "type": "Couchbase",
"cluster": self.settings.cluster}
return url, params
@post_request
def add_metric(self, name, bucket=None, server=None, unit=None,
description=None):
url = self.base_url + "/add_metric_or_event/"
params = {"name": name, "type": "metric",
"cluster": self.settings.cluster}
for extra_param in ("bucket", "server", "unit", "description"):
if eval(extra_param) is not None:
params[extra_param] = eval(extra_param)
return url, params
|
ronniedada/litmus
|
collectors/cbagent/metadata_client.py
|
Python
|
apache-2.0
| 1,573
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ReservationsTransport
from .rest import ReservationsRestTransport
from .rest import ReservationsRestInterceptor
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[ReservationsTransport]]
_transport_registry["rest"] = ReservationsRestTransport
__all__ = (
"ReservationsTransport",
"ReservationsRestTransport",
"ReservationsRestInterceptor",
)
|
googleapis/python-compute
|
google/cloud/compute_v1/services/reservations/transports/__init__.py
|
Python
|
apache-2.0
| 1,087
|
import numpy as np
from matplotlib import pyplot as plt
def drawWaiteForAnyCustomer(xList, yList,name):
fig = plt.figure()
width = .5
ind = np.arange(len(yList))
plt.bar(ind, yList)
plt.xticks(ind + width / 2, xList)
# fig.autofmt_xdate()
plt.savefig("results/"+name+"_waitingPerCustomer.pdf")
def drawFrequencyForAnyWaiteData(xList, yList, name):
fig = plt.figure()
width = .5
ind = np.arange(len(yList))
plt.bar(ind, yList)
plt.xticks(ind + width / 2, xList)
# fig.autofmt_xdate()
plt.savefig("results/"+name+"_FrequencyPerWaitAmount.pdf")
|
mrhsce/simPython
|
chartDrawing.py
|
Python
|
apache-2.0
| 604
|
"""Model queries."""
def is_password_parameter(data_model, source_type: str, parameter: str) -> bool:
"""Return whether the parameter of the source type is a password."""
# If the parameter key can't be found (this can happen when the parameter is removed from the data model),
# err on the safe side and assume it was a password type
parameter_type = (
data_model["sources"].get(source_type, {}).get("parameters", {}).get(parameter, dict(type="password"))["type"]
)
return str(parameter_type) == "password"
|
ICTU/quality-time
|
components/server/src/external/model/queries.py
|
Python
|
apache-2.0
| 542
|
from amara import parse
from amara.xpath.util import abspath
import unittest
#Samuel L Bayer-inspired test for default namespace handling
#http://lists.fourthought.com/pipermail/4suite/2006-February/007757.html
XML1 = '<foo xmlns:bar="http://bar.com"><baz/><bar:baz/><baz/></foo>'
XML2 = '<foo xmlns="http://bax.com" xmlns:bar="http://bar.com"><baz/><bar:baz/><dup/><dup/></foo>'
def test_abspath_with_ns():
doc = parse(XML1)
baz2 = doc.xml_first_child.xml_first_child
ap = abspath(baz2)
assert ap == u'/foo/baz'
ap = abspath(baz2.xml_following_sibling)
assert ap == u'/foo/bar:baz'
ap = abspath(baz2.xml_following_sibling.xml_following_sibling)
assert ap == u'/foo/baz[2]'
def test_abspath_with_default_ns():
doc = parse(XML2)
baz = doc.xml_first_child.xml_first_child
ap = abspath(baz)
assert ap == u'/*[namespace-uri()="http://bax.com" and local-name()="foo"]/*[namespace-uri()="http://bax.com" and local-name()="baz"]'
assert [baz] == list(doc.xml_select(ap))
ap = abspath(baz.xml_following_sibling)
assert ap == u'/*[namespace-uri()="http://bax.com" and local-name()="foo"]/bar:baz'
assert [baz.xml_following_sibling] == list(doc.xml_select(ap))
ap = abspath(baz, {u'bax': u'http://bax.com'})
assert ap == u'/bax:foo/bax:baz'
ap = abspath(baz.xml_following_sibling, {u'bax': u'http://bax.com'})
assert ap == u'/bax:foo/bar:baz'
dup1 = baz.xml_following_sibling.xml_following_sibling
dup2 = dup1.xml_following_sibling
ap = abspath(dup2, {u'bax': u'http://bax.com'})
assert ap == u'/bax:foo/bax:dup[2]'
if __name__ == "__main__":
raise SystemExit("use nosetests")
|
zepheira/amara
|
test/xpath/test_abs_path.py
|
Python
|
apache-2.0
| 1,676
|
import collections
Set = set
try:
from collections import OrderedDict
except ImportError:
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.values():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
KEY, PREV, NEXT = range(3)
class OrderedSet(collections.MutableSet):
"""
From: http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[NEXT] = next
next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references
|
GeoscienceAustralia/sifra
|
docs/source/extensions/backports.py
|
Python
|
apache-2.0
| 11,182
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteEnvironment
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_Environments_DeleteEnvironment_sync]
from google.cloud import dialogflow_v2
def sample_delete_environment():
# Create a client
client = dialogflow_v2.EnvironmentsClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteEnvironmentRequest(
name="name_value",
)
# Make the request
client.delete_environment(request=request)
# [END dialogflow_generated_dialogflow_v2_Environments_DeleteEnvironment_sync]
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_generated_dialogflow_v2_environments_delete_environment_sync.py
|
Python
|
apache-2.0
| 1,444
|
import asyncio
import gc
import hashlib
import socket
import unittest
from unittest import mock
from yarl import URL
import aiohttp
from aiohttp.client_reqrep import ClientRequest, ClientResponse
from aiohttp.test_utils import make_mocked_coro
class TestProxy(unittest.TestCase):
fingerprint = hashlib.sha256(b"foo").digest()
response_mock_attrs = {
'status': 200,
}
mocked_response = mock.Mock(**response_mock_attrs)
clientrequest_mock_attrs = {
'return_value._hashfunc.return_value.digest.return_value': fingerprint,
'return_value.fingerprint': fingerprint,
'return_value.send.return_value.start':
make_mocked_coro(mocked_response),
}
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
# just in case if we have transport close callbacks
self.loop.stop()
self.loop.run_forever()
self.loop.close()
gc.collect()
@mock.patch('aiohttp.connector.ClientRequest')
def test_connect(self, ClientRequestMock):
req = ClientRequest(
'GET', URL('http://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop
)
self.assertEqual(str(req.proxy), 'http://proxy.example.com')
# mock all the things!
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro([mock.MagicMock()])
proto = mock.Mock(**{
'transport.get_extra_info.return_value': False,
})
self.loop.create_connection = make_mocked_coro(
(proto.transport, proto))
conn = self.loop.run_until_complete(connector.connect(req))
self.assertEqual(req.url, URL('http://www.python.org'))
self.assertIs(conn._protocol, proto)
self.assertIs(conn.transport, proto.transport)
ClientRequestMock.assert_called_with(
'GET', URL('http://proxy.example.com'),
auth=None,
fingerprint=None,
headers={'Host': 'www.python.org'},
loop=self.loop,
ssl_context=None,
verify_ssl=None)
@mock.patch('aiohttp.connector.ClientRequest')
def test_proxy_headers(self, ClientRequestMock):
req = ClientRequest(
'GET', URL('http://www.python.org'),
proxy=URL('http://proxy.example.com'),
proxy_headers={'Foo': 'Bar'},
loop=self.loop)
self.assertEqual(str(req.proxy), 'http://proxy.example.com')
# mock all the things!
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro([mock.MagicMock()])
proto = mock.Mock(**{
'transport.get_extra_info.return_value': False,
})
self.loop.create_connection = make_mocked_coro(
(proto.transport, proto))
conn = self.loop.run_until_complete(connector.connect(req))
self.assertEqual(req.url, URL('http://www.python.org'))
self.assertIs(conn._protocol, proto)
self.assertIs(conn.transport, proto.transport)
ClientRequestMock.assert_called_with(
'GET', URL('http://proxy.example.com'),
auth=None,
fingerprint=None,
headers={'Host': 'www.python.org', 'Foo': 'Bar'},
loop=self.loop,
ssl_context=None,
verify_ssl=None)
@mock.patch('aiohttp.connector.ClientRequest', **clientrequest_mock_attrs)
def test_connect_req_verify_ssl_true(self, ClientRequestMock):
req = ClientRequest(
'GET', URL('https://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop,
verify_ssl=True,
)
proto = mock.Mock()
connector = aiohttp.TCPConnector(loop=self.loop)
connector._create_proxy_connection = mock.MagicMock(
side_effect=connector._create_proxy_connection)
connector._create_direct_connection = mock.MagicMock(
side_effect=connector._create_direct_connection)
connector._resolve_host = make_mocked_coro([mock.MagicMock()])
self.loop.create_connection = make_mocked_coro(
(proto.transport, proto))
self.loop.run_until_complete(connector.connect(req))
connector._create_proxy_connection.assert_called_with(req)
((proxy_req,), _) = connector._create_direct_connection.call_args
proxy_req.send.assert_called_with(mock.ANY)
@mock.patch('aiohttp.connector.ClientRequest', **clientrequest_mock_attrs)
def test_connect_req_verify_ssl_false(self, ClientRequestMock):
req = ClientRequest(
'GET', URL('https://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop,
verify_ssl=False,
)
proto = mock.Mock()
connector = aiohttp.TCPConnector(loop=self.loop)
connector._create_proxy_connection = mock.MagicMock(
side_effect=connector._create_proxy_connection)
connector._create_direct_connection = mock.MagicMock(
side_effect=connector._create_direct_connection)
connector._resolve_host = make_mocked_coro([mock.MagicMock()])
self.loop.create_connection = make_mocked_coro(
(proto.transport, proto))
self.loop.run_until_complete(connector.connect(req))
connector._create_proxy_connection.assert_called_with(req)
((proxy_req,), _) = connector._create_direct_connection.call_args
proxy_req.send.assert_called_with(mock.ANY)
@mock.patch('aiohttp.connector.ClientRequest', **clientrequest_mock_attrs)
def test_connect_req_fingerprint_ssl_context(self, ClientRequestMock):
ssl_context = mock.Mock()
attrs = {
'return_value.ssl_context': ssl_context,
}
ClientRequestMock.configure_mock(**attrs)
req = ClientRequest(
'GET', URL('https://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop,
verify_ssl=True,
fingerprint=self.fingerprint,
ssl_context=ssl_context,
)
proto = mock.Mock()
connector = aiohttp.TCPConnector(loop=self.loop)
connector._create_proxy_connection = mock.MagicMock(
side_effect=connector._create_proxy_connection)
connector._create_direct_connection = mock.MagicMock(
side_effect=connector._create_direct_connection)
connector._resolve_host = make_mocked_coro([mock.MagicMock()])
transport_attrs = {
'get_extra_info.return_value.getpeercert.return_value': b"foo"
}
transport = mock.Mock(**transport_attrs)
self.loop.create_connection = make_mocked_coro(
(transport, proto))
self.loop.run_until_complete(connector.connect(req))
connector._create_proxy_connection.assert_called_with(req)
((proxy_req,), _) = connector._create_direct_connection.call_args
self.assertTrue(proxy_req.verify_ssl)
self.assertEqual(proxy_req.fingerprint, req.fingerprint)
self.assertIs(proxy_req.ssl_context, req.ssl_context)
def test_proxy_auth(self):
with self.assertRaises(ValueError) as ctx:
ClientRequest(
'GET', URL('http://python.org'),
proxy=URL('http://proxy.example.com'),
proxy_auth=('user', 'pass'),
loop=mock.Mock())
self.assertEqual(
ctx.exception.args[0],
"proxy_auth must be None or BasicAuth() tuple",
)
@mock.patch('aiohttp.client_reqrep.PayloadWriter')
def _test_connect_request_with_unicode_host(self, Request_mock):
loop = mock.Mock()
request = ClientRequest("CONNECT", URL("http://éé.com/"),
loop=loop)
request.response_class = mock.Mock()
request.write_bytes = mock.Mock()
request.write_bytes.return_value = asyncio.Future(loop=loop)
request.write_bytes.return_value.set_result(None)
request.send(mock.Mock())
Request_mock.assert_called_with(mock.ANY, mock.ANY, "xn--9caa.com:80",
mock.ANY, loop=loop)
def test_proxy_connection_error(self):
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro(
raise_exception=OSError('dont take it serious'))
req = ClientRequest(
'GET', URL('http://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop,
)
expected_headers = dict(req.headers)
with self.assertRaises(aiohttp.ClientConnectorError):
self.loop.run_until_complete(connector.connect(req))
self.assertEqual(req.url.path, '/')
self.assertEqual(dict(req.headers), expected_headers)
@mock.patch('aiohttp.connector.ClientRequest')
def test_auth(self, ClientRequestMock):
proxy_req = ClientRequest(
'GET', URL('http://proxy.example.com'),
auth=aiohttp.helpers.BasicAuth('user', 'pass'),
loop=self.loop
)
ClientRequestMock.return_value = proxy_req
self.assertIn('AUTHORIZATION', proxy_req.headers)
self.assertNotIn('PROXY-AUTHORIZATION', proxy_req.headers)
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro([mock.MagicMock()])
tr, proto = mock.Mock(), mock.Mock()
self.loop.create_connection = make_mocked_coro((tr, proto))
req = ClientRequest(
'GET', URL('http://www.python.org'),
proxy=URL('http://proxy.example.com'),
proxy_auth=aiohttp.helpers.BasicAuth('user', 'pass'),
loop=self.loop,
)
self.assertNotIn('AUTHORIZATION', req.headers)
self.assertNotIn('PROXY-AUTHORIZATION', req.headers)
conn = self.loop.run_until_complete(connector.connect(req))
self.assertEqual(req.url, URL('http://www.python.org'))
self.assertNotIn('AUTHORIZATION', req.headers)
self.assertIn('PROXY-AUTHORIZATION', req.headers)
self.assertNotIn('AUTHORIZATION', proxy_req.headers)
self.assertNotIn('PROXY-AUTHORIZATION', proxy_req.headers)
ClientRequestMock.assert_called_with(
'GET', URL('http://proxy.example.com'),
auth=aiohttp.helpers.BasicAuth('user', 'pass'),
loop=mock.ANY, headers=mock.ANY, fingerprint=None,
ssl_context=None, verify_ssl=None)
conn.close()
def test_auth_utf8(self):
proxy_req = ClientRequest(
'GET', URL('http://proxy.example.com'),
auth=aiohttp.helpers.BasicAuth('юзер', 'пасс', 'utf-8'),
loop=self.loop)
self.assertIn('AUTHORIZATION', proxy_req.headers)
@mock.patch('aiohttp.connector.ClientRequest')
def test_auth_from_url(self, ClientRequestMock):
proxy_req = ClientRequest('GET',
URL('http://user:pass@proxy.example.com'),
loop=self.loop)
ClientRequestMock.return_value = proxy_req
self.assertIn('AUTHORIZATION', proxy_req.headers)
self.assertNotIn('PROXY-AUTHORIZATION', proxy_req.headers)
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro([mock.MagicMock()])
tr, proto = mock.Mock(), mock.Mock()
self.loop.create_connection = make_mocked_coro((tr, proto))
req = ClientRequest(
'GET', URL('http://www.python.org'),
proxy=URL('http://user:pass@proxy.example.com'),
loop=self.loop,
)
self.assertNotIn('AUTHORIZATION', req.headers)
self.assertNotIn('PROXY-AUTHORIZATION', req.headers)
conn = self.loop.run_until_complete(connector.connect(req))
self.assertEqual(req.url, URL('http://www.python.org'))
self.assertNotIn('AUTHORIZATION', req.headers)
self.assertIn('PROXY-AUTHORIZATION', req.headers)
self.assertNotIn('AUTHORIZATION', proxy_req.headers)
self.assertNotIn('PROXY-AUTHORIZATION', proxy_req.headers)
ClientRequestMock.assert_called_with(
'GET', URL('http://user:pass@proxy.example.com'),
auth=None, loop=mock.ANY, headers=mock.ANY, fingerprint=None,
ssl_context=None, verify_ssl=None)
conn.close()
@mock.patch('aiohttp.connector.ClientRequest')
def test_auth__not_modifying_request(self, ClientRequestMock):
proxy_req = ClientRequest('GET',
URL('http://user:pass@proxy.example.com'),
loop=self.loop)
ClientRequestMock.return_value = proxy_req
proxy_req_headers = dict(proxy_req.headers)
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro(
raise_exception=OSError('nothing personal'))
req = ClientRequest(
'GET', URL('http://www.python.org'),
proxy=URL('http://user:pass@proxy.example.com'),
loop=self.loop,
)
req_headers = dict(req.headers)
with self.assertRaises(aiohttp.ClientConnectorError):
self.loop.run_until_complete(connector.connect(req))
self.assertEqual(req.headers, req_headers)
self.assertEqual(req.url.path, '/')
self.assertEqual(proxy_req.headers, proxy_req_headers)
@mock.patch('aiohttp.connector.ClientRequest')
def test_https_connect(self, ClientRequestMock):
proxy_req = ClientRequest('GET', URL('http://proxy.example.com'),
loop=self.loop)
ClientRequestMock.return_value = proxy_req
proxy_resp = ClientResponse('get', URL('http://proxy.example.com'))
proxy_resp._loop = self.loop
proxy_req.send = send_mock = mock.Mock()
send_mock.return_value = proxy_resp
proxy_resp.start = make_mocked_coro(mock.Mock(status=200))
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro(
[{'hostname': 'hostname', 'host': '127.0.0.1', 'port': 80,
'family': socket.AF_INET, 'proto': 0, 'flags': 0}])
tr, proto = mock.Mock(), mock.Mock()
self.loop.create_connection = make_mocked_coro((tr, proto))
req = ClientRequest(
'GET', URL('https://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop,
)
self.loop.run_until_complete(connector._create_connection(req))
self.assertEqual(req.url.path, '/')
self.assertEqual(proxy_req.method, 'CONNECT')
self.assertEqual(proxy_req.url, URL('https://www.python.org'))
tr.close.assert_called_once_with()
tr.get_extra_info.assert_called_with('socket', default=None)
self.loop.run_until_complete(proxy_req.close())
proxy_resp.close()
self.loop.run_until_complete(req.close())
@mock.patch('aiohttp.connector.ClientRequest')
def test_https_connect_runtime_error(self, ClientRequestMock):
proxy_req = ClientRequest('GET', URL('http://proxy.example.com'),
loop=self.loop)
ClientRequestMock.return_value = proxy_req
proxy_resp = ClientResponse('get', URL('http://proxy.example.com'))
proxy_resp._loop = self.loop
proxy_req.send = send_mock = mock.Mock()
send_mock.return_value = proxy_resp
proxy_resp.start = make_mocked_coro(mock.Mock(status=200))
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro(
[{'hostname': 'hostname', 'host': '127.0.0.1', 'port': 80,
'family': socket.AF_INET, 'proto': 0, 'flags': 0}])
tr, proto = mock.Mock(), mock.Mock()
tr.get_extra_info.return_value = None
self.loop.create_connection = make_mocked_coro((tr, proto))
req = ClientRequest(
'GET', URL('https://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop,
)
with self.assertRaisesRegex(
RuntimeError, "Transport does not expose socket instance"):
self.loop.run_until_complete(connector._create_connection(req))
self.loop.run_until_complete(proxy_req.close())
proxy_resp.close()
self.loop.run_until_complete(req.close())
@mock.patch('aiohttp.connector.ClientRequest')
def test_https_connect_http_proxy_error(self, ClientRequestMock):
proxy_req = ClientRequest('GET', URL('http://proxy.example.com'),
loop=self.loop)
ClientRequestMock.return_value = proxy_req
proxy_resp = ClientResponse('get', URL('http://proxy.example.com'))
proxy_resp._loop = self.loop
proxy_req.send = send_mock = mock.Mock()
send_mock.return_value = proxy_resp
proxy_resp.start = make_mocked_coro(
mock.Mock(status=400, reason='bad request'))
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro(
[{'hostname': 'hostname', 'host': '127.0.0.1', 'port': 80,
'family': socket.AF_INET, 'proto': 0, 'flags': 0}])
tr, proto = mock.Mock(), mock.Mock()
tr.get_extra_info.return_value = None
self.loop.create_connection = make_mocked_coro((tr, proto))
req = ClientRequest(
'GET', URL('https://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop,
)
with self.assertRaisesRegex(
aiohttp.ClientHttpProxyError, "400, message='bad request'"):
self.loop.run_until_complete(connector._create_connection(req))
self.loop.run_until_complete(proxy_req.close())
proxy_resp.close()
self.loop.run_until_complete(req.close())
@mock.patch('aiohttp.connector.ClientRequest')
def test_https_connect_resp_start_error(self, ClientRequestMock):
proxy_req = ClientRequest('GET', URL('http://proxy.example.com'),
loop=self.loop)
ClientRequestMock.return_value = proxy_req
proxy_resp = ClientResponse('get', URL('http://proxy.example.com'))
proxy_resp._loop = self.loop
proxy_req.send = send_mock = mock.Mock()
send_mock.return_value = proxy_resp
proxy_resp.start = make_mocked_coro(
raise_exception=OSError("error message"))
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro(
[{'hostname': 'hostname', 'host': '127.0.0.1', 'port': 80,
'family': socket.AF_INET, 'proto': 0, 'flags': 0}])
tr, proto = mock.Mock(), mock.Mock()
tr.get_extra_info.return_value = None
self.loop.create_connection = make_mocked_coro((tr, proto))
req = ClientRequest(
'GET', URL('https://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop,
)
with self.assertRaisesRegex(OSError, "error message"):
self.loop.run_until_complete(connector._create_connection(req))
@mock.patch('aiohttp.connector.ClientRequest')
def test_request_port(self, ClientRequestMock):
proxy_req = ClientRequest('GET', URL('http://proxy.example.com'),
loop=self.loop)
ClientRequestMock.return_value = proxy_req
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro(
[{'hostname': 'hostname', 'host': '127.0.0.1', 'port': 80,
'family': socket.AF_INET, 'proto': 0, 'flags': 0}])
tr, proto = mock.Mock(), mock.Mock()
tr.get_extra_info.return_value = None
self.loop.create_connection = make_mocked_coro((tr, proto))
req = ClientRequest(
'GET', URL('http://localhost:1234/path'),
proxy=URL('http://proxy.example.com'),
loop=self.loop,
)
self.loop.run_until_complete(connector._create_connection(req))
self.assertEqual(req.url, URL('http://localhost:1234/path'))
def test_proxy_auth_property(self):
req = aiohttp.ClientRequest(
'GET', URL('http://localhost:1234/path'),
proxy=URL('http://proxy.example.com'),
proxy_auth=aiohttp.helpers.BasicAuth('user', 'pass'),
loop=self.loop)
self.assertEqual(('user', 'pass', 'latin1'), req.proxy_auth)
def test_proxy_auth_property_default(self):
req = aiohttp.ClientRequest(
'GET', URL('http://localhost:1234/path'),
proxy=URL('http://proxy.example.com'),
loop=self.loop)
self.assertIsNone(req.proxy_auth)
@mock.patch('aiohttp.connector.ClientRequest')
def test_https_connect_pass_ssl_context(self, ClientRequestMock):
proxy_req = ClientRequest('GET', URL('http://proxy.example.com'),
loop=self.loop)
ClientRequestMock.return_value = proxy_req
proxy_resp = ClientResponse('get', URL('http://proxy.example.com'))
proxy_resp._loop = self.loop
proxy_req.send = send_mock = mock.Mock()
send_mock.return_value = proxy_resp
proxy_resp.start = make_mocked_coro(mock.Mock(status=200))
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro(
[{'hostname': 'hostname', 'host': '127.0.0.1', 'port': 80,
'family': socket.AF_INET, 'proto': 0, 'flags': 0}])
tr, proto = mock.Mock(), mock.Mock()
self.loop.create_connection = make_mocked_coro((tr, proto))
req = ClientRequest(
'GET', URL('https://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop,
)
self.loop.run_until_complete(connector._create_connection(req))
self.loop.create_connection.assert_called_with(
mock.ANY,
ssl=connector.ssl_context,
sock=mock.ANY,
server_hostname='www.python.org')
self.assertEqual(req.url.path, '/')
self.assertEqual(proxy_req.method, 'CONNECT')
self.assertEqual(proxy_req.url, URL('https://www.python.org'))
tr.close.assert_called_once_with()
tr.get_extra_info.assert_called_with('socket', default=None)
self.loop.run_until_complete(proxy_req.close())
proxy_resp.close()
self.loop.run_until_complete(req.close())
@mock.patch('aiohttp.connector.ClientRequest')
def test_https_auth(self, ClientRequestMock):
proxy_req = ClientRequest('GET', URL('http://proxy.example.com'),
auth=aiohttp.helpers.BasicAuth('user',
'pass'),
loop=self.loop)
ClientRequestMock.return_value = proxy_req
proxy_resp = ClientResponse('get', URL('http://proxy.example.com'))
proxy_resp._loop = self.loop
proxy_req.send = send_mock = mock.Mock()
send_mock.return_value = proxy_resp
proxy_resp.start = make_mocked_coro(mock.Mock(status=200))
connector = aiohttp.TCPConnector(loop=self.loop)
connector._resolve_host = make_mocked_coro(
[{'hostname': 'hostname', 'host': '127.0.0.1', 'port': 80,
'family': socket.AF_INET, 'proto': 0, 'flags': 0}])
tr, proto = mock.Mock(), mock.Mock()
self.loop.create_connection = make_mocked_coro((tr, proto))
self.assertIn('AUTHORIZATION', proxy_req.headers)
self.assertNotIn('PROXY-AUTHORIZATION', proxy_req.headers)
req = ClientRequest(
'GET', URL('https://www.python.org'),
proxy=URL('http://proxy.example.com'),
loop=self.loop
)
self.assertNotIn('AUTHORIZATION', req.headers)
self.assertNotIn('PROXY-AUTHORIZATION', req.headers)
self.loop.run_until_complete(connector._create_connection(req))
self.assertEqual(req.url.path, '/')
self.assertNotIn('AUTHORIZATION', req.headers)
self.assertNotIn('PROXY-AUTHORIZATION', req.headers)
self.assertNotIn('AUTHORIZATION', proxy_req.headers)
self.assertIn('PROXY-AUTHORIZATION', proxy_req.headers)
connector._resolve_host.assert_called_with('proxy.example.com', 80)
self.loop.run_until_complete(proxy_req.close())
proxy_resp.close()
self.loop.run_until_complete(req.close())
|
playpauseandstop/aiohttp
|
tests/test_proxy.py
|
Python
|
apache-2.0
| 24,979
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-09-10 19:10
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20170909_1749'),
]
operations = [
migrations.AlterField(
model_name='emailverifyrecord',
name='send_time',
field=models.DateTimeField(default=datetime.datetime.now, verbose_name='\u53d1\u9001\u65f6\u95f4'),
),
migrations.AlterField(
model_name='emailverifyrecord',
name='send_type',
field=models.CharField(choices=[('register', '\u6ce8\u518c'), ('forget', '\u627e\u56de\u5bc6\u7801')], max_length=10, verbose_name='\u9a8c\u8bc1\u7801\u7c7b\u578b'),
),
]
|
LennonChin/Django-Practices
|
MxOnline/apps/users/migrations/0003_auto_20170910_1910.py
|
Python
|
apache-2.0
| 833
|
#!/usr/bin/env python
import sys, os, errno
from subprocess import Popen, PIPE
f = open('pretrain.scp','w')
with open('codetrain.scp','r') as codetrain_file:
for codetrain_line in codetrain_file:
wav_file, mfcc_file = codetrain_line.split()
f.write("{0}\n".format(mfcc_file))
f.close()
|
techiaith/seilwaith
|
srdk/htk/make_pretrain_scp.py
|
Python
|
apache-2.0
| 295
|
from functools import wraps
from os import getenv
from sqlalchemy.orm import sessionmaker
from cafe.abc.compat import abstractclassmethod
from cafe.patterns.context import SessionManager
class SQLAlchemySessionManager(SessionManager):
ENGINE = None
@classmethod
def default(cls):
return cls.instance()
@classmethod
def instance(cls, engine=None, **kwargs):
"""
:type engine: sqlalchemy.engine.Engine or None
:rtype: cafe.database.sqlalchemy.session.SQLAlchemySessionManager
"""
return cls(cls.factory(engine=engine, **kwargs))
@staticmethod
def _determine_echo():
"""
SQLAlchemy echo level, using DATABASE_ECHO environment variable
Possible values: True, False, 'debug'
:return: True | False | basestring
"""
echo = getenv('DATABASE_ECHO', 'false')
if echo.lower() == 'true':
return True
if echo.lower() == 'debug':
return 'debug'
return False
@classmethod
def factory(cls, engine=None, **kwargs):
if engine is None:
engine = cls.engine()
engine.echo = cls._determine_echo()
return sessionmaker(bind=engine, **kwargs)
@classmethod
def engine(cls, *args, **kwargs):
if cls.ENGINE is None:
cls.ENGINE = cls.get_engine(*args, **kwargs)
return cls.ENGINE
@abstractclassmethod
def get_engine(cls, *args, **kwargs):
"""
Default engine for this session manager.
:rtype: sqlalchemy.engine.Engine
"""
raise NotImplementedError
def __enter__(self):
"""
:rtype: sqlalchemy.orm.session.Session
"""
return super(SQLAlchemySessionManager, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
rvalue = True
if exc_type is not None:
self.session.rollback()
rvalue = False
else:
self.session.commit()
super(SQLAlchemySessionManager, self).__exit__(exc_type, exc_val, exc_tb)
return rvalue
def session_query_wrapper_generator(session_manager=None, engine=None, context=False):
"""
Decorator which wraps a function in a SQLAlchemy session
:param context: execute the wrapped function inside a session context
:type context: bool
:param session_manager: SessionManager to use
:type session_manager: cafe.database.sqlalchemy.session.SQLAlchemySessionManager
:param engine: Engine to use to connect to Mimir
:type engine: sqlalchemy.engine.Engine
"""
session_keyword_arg = 'session'
def session_decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
execute_in_context = context
if session_keyword_arg not in kwargs or kwargs[session_keyword_arg] is None:
if session_manager is None:
raise TypeError(
'sessioned query functions should be called with an SQLAlchemySessionManager '
'or Session instance when a default session manager is not configured.')
kwargs[session_keyword_arg] = session_manager.instance(engine=engine)
execute_in_context = True
instance = kwargs.pop(session_keyword_arg)
if isinstance(instance, SQLAlchemySessionManager):
instance = instance.instance(engine=engine)
execute_in_context = True
if execute_in_context:
with instance as session:
return function(*args, session=session, **kwargs)
else:
return function(*args, session=instance, **kwargs)
return wrapper
return session_decorator
session_query = session_query_wrapper_generator()
|
betsybookwyrm/python-cafe-sqlalchemy
|
cafe/database/sqlalchemy/session.py
|
Python
|
apache-2.0
| 3,849
|
def isPanlidrome(num):
num = str(num)
return num[::-1] == num
def addReverse(num):
rev = int(str(num)[::-1])
return num + rev
def isLychrel(num):
counter = 50
while counter > 0:
num = addReverse(num)
if isPanlidrome(num): return False
counter -= 1
return True
lst = [x for x in range(1, 10000) if isLychrel(x)]
print len(lst)
print 349, 349 in lst, '\n', 196, 196 in lst, '\n', 4994, 4994 in lst
|
jialing3/corner_cases
|
Relue/Eu55.py
|
Python
|
apache-2.0
| 452
|
#!/usr/bin/env python3
from pycaw.pycaw import AudioUtilities, ISimpleAudioVolume
from pydub import AudioSegment
from pydub.playback import play
# https://stackoverflow.com/a/43727046
def main():
song = AudioSegment.from_mp3("beep.mp3")
global sess, vol
play(song)
sessions = AudioUtilities.GetAllSessions()
for session in sessions:
# if session.Process:
# print(session.Process.name())
if session.Process and session.Process.name() == "python.exe":
sess = session
vol = sess._ctl.QueryInterface(ISimpleAudioVolume)
print("volume.GetMasterVolume(): %s" % vol.GetMasterVolume())
vol.SetMasterVolume(0, None)
play(song)
vol.SetMasterVolume(1, None)
play(song)
if __name__ == "__main__":
main()
|
sunjerry019/photonLauncher
|
micron/tests/pycaw.test.py
|
Python
|
apache-2.0
| 809
|
"""Jinja filters to streamline openstack versions for numbers and names"""
def _calc_version_from_release(release):
GRIZZLY = ord("G".lower())
GRIZZLY_NUMERIC = 3
return GRIZZLY_NUMERIC - GRIZZLY + ord(release[0].lower())
def _discover_version(value):
try:
return int(value), "OSP"
except ValueError:
return _calc_version_from_release(value), "RDO"
def openstack_release(value):
"""Convert release name or number to a numeric value
{{ 7 | openstack_release }}
-> 7
{{ "8" | openstack_release }}
-> 8
{{ "Liberty" | openstack_release }}
-> 8
>>> openstack_release(7)
7
>>> openstack_release("7")
7
>>> openstack_release("Liberty")
8
"""
return _discover_version(value)[0]
def openstack_distribution(value):
"""Discover distribution from release name/number
{{ 7 | openstack_distribution }}
-> OSP
{{ "8" | openstack_distribution }}
-> OSP
{{ "Liberty" | openstack_distribution }}
-> RDO
>>> openstack_distribution(7)
'OSP'
>>> openstack_distribution("7")
'OSP'
>>> openstack_distribution("Liberty")
'RDO'
"""
return _discover_version(value)[1]
class FilterModule(object):
def filters(self):
return {
'openstack_distribution': openstack_distribution,
'openstack_release': openstack_release,
}
|
okolisny/InfraRed
|
infrared/common/filter_plugins/openstack_release.py
|
Python
|
apache-2.0
| 1,408
|
#!/usr/bin/env python
import argparse
from watchmaker import Prepare
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--noreboot', dest='noreboot', action='store_true',
help='No reboot after provisioning.')
parser.add_argument('--sourceiss3bucket', dest='sourceiss3bucket', action='store_true',
help='Use S3 buckets instead of internet locations for files.')
parser.add_argument('--config', dest='config', default='config.yaml',
help='Path or URL to the config.yaml file.')
parser.add_argument('--logger', dest='logger', action='store_true', default=False,
help='Use stream logger for debugging.')
parser.add_argument('--log-path', dest='log_path', default=None,
help='Path to the logfile for stream logging.')
parser.add_argument('--saltstates', dest='saltstates', default=None,
help='Define the saltstates to use. Must be None, Highstate, or comma-separated-string')
if parser.parse_args().saltstates:
if parser.parse_args().saltstates.lower() not in ['None'.lower(),
'Highstate'.lower(),
'comma-separated-string'.lower()]:
parser.print_help()
systemprep = Prepare(parser.parse_args())
systemprep.install_system()
def append_file(build, file_paths):
# The version line must have the form
# __version__ = 'ver'
pattern = r"^(__version__ = ['\"])([^'\"]*)(['\"])"
repl = r"\g<1>\g<2>.dev{0}\g<3>".format(build)
version_file = os.path.join(PROJECT_ROOT, *file_paths)
print(
'Updating version in version_file "{0}" with build "{1}"'
.format(version_file, build)
)
replace(version_file, pattern, repl, flags=re.M)
def main(args):
skip = args.skip
build = args.build
file_paths = args.file_paths
if skip:
print(
'Not updating version for this build, `skip` set to "{0}"'
.format(skip)
)
else:
append_file(build, file_paths)
|
MarionTheBull/watchmaker
|
scripts/Watchmaker.py
|
Python
|
apache-2.0
| 2,199
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from future.utils import python_2_unicode_compatible
from fluent_contents.models import ContentItem, ContentItemManager
@python_2_unicode_compatible
class DisqusCommentsAreaItem(ContentItem):
allow_new = models.BooleanField(_("Allow posting new comments"), default=True)
objects = ContentItemManager() # Avoid Django 1.10 migrations
class Meta:
verbose_name = _("Disqus comments area")
verbose_name_plural = _("Disqus comments areas")
def __str__(self):
return u""
|
edoburu/django-fluent-contents
|
fluent_contents/plugins/disquswidgets/models.py
|
Python
|
apache-2.0
| 597
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import Solution
nums = [10, 9, 2, 5, 3, 7, 101, 18]
nums = [10,9,2,5,3,4]
sol = Solution()
res = sol.lengthOfLIS(nums)
print(res)
|
zhlinh/leetcode
|
0300.Longest Increasing Subsequence/test.py
|
Python
|
apache-2.0
| 192
|
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from profiles.models import UserProfile
# BEGIN: Merge User and UserProfile in the admin
class UserProfileInline(admin.StackedInline):
model = UserProfile
max_num = 1
can_delete = False
class UserProfileAdmin(UserAdmin):
inlines = [ UserProfileInline, ]
admin.site.unregister(User)
admin.site.register(User, UserProfileAdmin)
# END
|
cpatrick/comic-django
|
django/profiles/admin.py
|
Python
|
apache-2.0
| 475
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient.common import template_utils
from heatclient import exc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from magnum.common import clients
from magnum.common import exception
from magnum.common import short_id
from magnum.conductor import scale_manager
from magnum.conductor.template_definition import TemplateDefinition as TDef
from magnum.conductor import utils as conductor_utils
from magnum.i18n import _
from magnum.i18n import _LE
from magnum.i18n import _LI
from magnum import objects
from magnum.objects.bay import Status as bay_status
bay_heat_opts = [
cfg.IntOpt('max_attempts',
default=2000,
help=('Number of attempts to query the Heat stack for '
'finding out the status of the created stack and '
'getting template outputs. This value is ignored '
'during bay creation if timeout is set as the poll '
'will continue until bay creation either ends '
'or times out.')),
cfg.IntOpt('wait_interval',
default=1,
help=('Sleep time interval between two attempts of querying '
'the Heat stack. This interval is in seconds.')),
cfg.IntOpt('bay_create_timeout',
default=None,
help=('The length of time to let bay creation continue. This '
'interval is in minutes. The default is no timeout.'))
]
cfg.CONF.register_opts(bay_heat_opts, group='bay_heat')
LOG = logging.getLogger(__name__)
def _extract_template_definition(context, bay, scale_manager=None):
baymodel = conductor_utils.retrieve_baymodel(context, bay)
cluster_distro = baymodel.cluster_distro
cluster_coe = baymodel.coe
definition = TDef.get_template_definition('vm', cluster_distro,
cluster_coe)
return definition.extract_definition(context, baymodel, bay,
scale_manager=scale_manager)
def _create_stack(context, osc, bay, bay_create_timeout):
template_path, heat_params = _extract_template_definition(context, bay)
tpl_files, template = template_utils.get_template_contents(template_path)
# Make sure no duplicate stack name
stack_name = '%s-%s' % (bay.name, short_id.generate_id())
if bay_create_timeout:
heat_timeout = bay_create_timeout
elif bay_create_timeout == 0:
heat_timeout = None
else:
# no bay_create_timeout value was passed in to the request
# so falling back on configuration file value
heat_timeout = cfg.CONF.bay_heat.bay_create_timeout
fields = {
'stack_name': stack_name,
'parameters': heat_params,
'template': template,
'files': dict(list(tpl_files.items())),
'timeout_mins': heat_timeout
}
created_stack = osc.heat().stacks.create(**fields)
return created_stack
def _update_stack(context, osc, bay, scale_manager=None):
template_path, heat_params = _extract_template_definition(
context, bay, scale_manager=scale_manager)
tpl_files, template = template_utils.get_template_contents(template_path)
fields = {
'parameters': heat_params,
'template': template,
'files': dict(list(tpl_files.items()))
}
return osc.heat().stacks.update(bay.stack_id, **fields)
def _update_stack_outputs(context, stack, bay):
baymodel = conductor_utils.retrieve_baymodel(context, bay)
cluster_distro = baymodel.cluster_distro
cluster_coe = baymodel.coe
definition = TDef.get_template_definition('vm', cluster_distro,
cluster_coe)
return definition.update_outputs(stack, bay)
class Handler(object):
_update_allowed_properties = set(['node_count'])
def __init__(self):
super(Handler, self).__init__()
# Bay Operations
def bay_create(self, context, bay, bay_create_timeout):
LOG.debug('bay_heat bay_create')
osc = clients.OpenStackClients(context)
try:
created_stack = _create_stack(context, osc, bay,
bay_create_timeout)
except exc.HTTPBadRequest as e:
raise exception.InvalidParameterValue(message=str(e))
except Exception:
raise
bay.stack_id = created_stack['stack']['id']
bay.create()
self._poll_and_check(osc, bay)
return bay
def _validate_properties(self, delta):
update_disallowed_properties = delta - self._update_allowed_properties
if update_disallowed_properties:
err = (_("cannot change bay property(ies) %s.") %
", ".join(update_disallowed_properties))
raise exception.InvalidParameterValue(err=err)
def bay_update(self, context, bay):
LOG.debug('bay_heat bay_update')
osc = clients.OpenStackClients(context)
stack = osc.heat().stacks.get(bay.stack_id)
if (stack.stack_status != bay_status.CREATE_COMPLETE and
stack.stack_status != bay_status.UPDATE_COMPLETE):
operation = _('Updating a bay when stack status is '
'"%s"') % stack.stack_status
raise exception.NotSupported(operation=operation)
delta = bay.obj_what_changed()
if not delta:
return bay
self._validate_properties(delta)
manager = scale_manager.ScaleManager(context, osc, bay)
_update_stack(context, osc, bay, manager)
self._poll_and_check(osc, bay)
return bay
def bay_delete(self, context, uuid):
LOG.debug('bay_heat bay_delete')
osc = clients.OpenStackClients(context)
bay = objects.Bay.get_by_uuid(context, uuid)
stack_id = bay.stack_id
# NOTE(sdake): This will execute a stack_delete operation. This will
# Ignore HTTPNotFound exceptions (stack wasn't present). In the case
# that Heat couldn't find the stack representing the bay, likely a user
# has deleted the stack outside the context of Magnum. Therefore the
# contents of the bay are forever lost.
#
# If the exception is unhandled, the original exception will be raised.
try:
osc.heat().stacks.delete(stack_id)
except exc.HTTPNotFound:
LOG.info(_LI('The stack %s was not be found during bay'
' deletion.') % stack_id)
try:
bay.destroy()
except exception.BayNotFound:
LOG.info(_LI('The bay %s has been deleted by others.') % uuid)
return None
except Exception:
raise
self._poll_and_check(osc, bay)
return None
def _poll_and_check(self, osc, bay):
poller = HeatPoller(osc, bay)
lc = loopingcall.FixedIntervalLoopingCall(f=poller.poll_and_check)
lc.start(cfg.CONF.bay_heat.wait_interval, True)
class HeatPoller(object):
def __init__(self, openstack_client, bay):
self.openstack_client = openstack_client
self.context = self.openstack_client.context
self.bay = bay
self.attempts = 0
def poll_and_check(self):
# TODO(yuanying): temporary implementation to update api_address,
# node_addresses and bay status
stack = self.openstack_client.heat().stacks.get(self.bay.stack_id)
self.attempts += 1
# poll_and_check is detached and polling long time to check status,
# so another user/client can call delete bay/stack.
if stack.stack_status == bay_status.DELETE_COMPLETE:
LOG.info(_LI('Bay has been deleted, stack_id: %s')
% self.bay.stack_id)
try:
self.bay.destroy()
except exception.BayNotFound:
LOG.info(_LI('The bay %s has been deleted by others.')
% self.bay.uuid)
raise loopingcall.LoopingCallDone()
if (stack.stack_status in [bay_status.CREATE_COMPLETE,
bay_status.UPDATE_COMPLETE]):
_update_stack_outputs(self.context, stack, self.bay)
self.bay.status = stack.stack_status
self.bay.status_reason = stack.stack_status_reason
self.bay.node_count = stack.parameters['number_of_minions']
self.bay.save()
raise loopingcall.LoopingCallDone()
elif stack.stack_status != self.bay.status:
self.bay.status = stack.stack_status
self.bay.status_reason = stack.stack_status_reason
self.bay.node_count = stack.parameters['number_of_minions']
self.bay.save()
if stack.stack_status == bay_status.CREATE_FAILED:
LOG.error(_LE('Unable to create bay, stack_id: %(stack_id)s, '
'reason: %(reason)s') %
{'stack_id': self.bay.stack_id,
'reason': stack.stack_status_reason})
raise loopingcall.LoopingCallDone()
if stack.stack_status == bay_status.DELETE_FAILED:
LOG.error(_LE('Unable to delete bay, stack_id: %(stack_id)s, '
'reason: %(reason)s') %
{'stack_id': self.bay.stack_id,
'reason': stack.stack_status_reason})
raise loopingcall.LoopingCallDone()
if stack.stack_status == bay_status.UPDATE_FAILED:
LOG.error(_LE('Unable to update bay, stack_id: %(stack_id)s, '
'reason: %(reason)s') %
{'stack_id': self.bay.stack_id,
'reason': stack.stack_status_reason})
raise loopingcall.LoopingCallDone()
# only check max attempts when the stack is being created when
# the timeout hasn't been set. If the timeout has been set then
# the loop will end when the stack completes or the timeout occurs
if stack.stack_status == bay_status.CREATE_IN_PROGRESS:
if (stack.timeout_mins is None and
self.attempts > cfg.CONF.bay_heat.max_attempts):
LOG.error(_LE('Bay check exit after %(attempts)s attempts,'
'stack_id: %(id)s, stack_status: %(status)s') %
{'attempts': cfg.CONF.bay_heat.max_attempts,
'id': self.bay.stack_id,
'status': stack.stack_status})
raise loopingcall.LoopingCallDone()
else:
if self.attempts > cfg.CONF.bay_heat.max_attempts:
LOG.error(_LE('Bay check exit after %(attempts)s attempts,'
'stack_id: %(id)s, stack_status: %(status)s') %
{'attempts': cfg.CONF.bay_heat.max_attempts,
'id': self.bay.stack_id,
'status': stack.stack_status})
raise loopingcall.LoopingCallDone()
|
ddepaoli3/magnum
|
magnum/conductor/handlers/bay_conductor.py
|
Python
|
apache-2.0
| 11,732
|
#!/usr/bin/python3
def add_common_args(parser):
parser.add_argument("-g", "--greenscreen_server",
default="http://localhost:4994",
help="GreenScreen server:port")
parser.add_argument("-a", "--appid",
help="Chromecast Greenscreen App ID")
parser.add_argument("-c", "--channel",
help="GreenScreen channel to set")
parser.add_argument(
"-l", "--loglevel", default="ERROR", help="Logging level",
choices=["ERROR", "WARNING", "INFO", "DEBUG"])
parser.add_argument(
"-r", "--tries", type=int,
help="Chromecast connection tries. Default is infinite.")
parser.add_argument(
"-t", "--timeout", type=int, default=30,
help="Chromecast socket timeout seconds. Default is 30.");
parser.add_argument(
"-w", "--retry_wait", type=int, default=5,
help="Seconds to wait between Chromecast retries. Default is 5.");
return parser
|
dermotduffy/greenscreen_control
|
greenscreen_control/common_args.py
|
Python
|
apache-2.0
| 965
|
s = 'azcbobobegghakl'
sub = 'bob'
count = 0
for i in range(0, len(s)):
if sub in s[i:i+3]:
count+=1
print "Number of times bob occurs is: " + str(count)
|
dpanayotov/CS1314
|
MITx6.00/pset1e2.py
|
Python
|
apache-2.0
| 166
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for loading Actions for the loaner project."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import logging
import os
import pkgutil
from loaner.web_app.backend.actions import base_action
_DUPLICATE_ACTION_MSG = (
'Cannot load Action %s: there is already an Action of that name.')
_INSTANTIATION_ERROR_MSG = 'Error instantiating class %s in module %s: %s'
_CACHED_ACTIONS = None
def load_actions(filters=None, log_exceptions=True):
"""Loads Actions from the actions directory, and instantiates them.
Args:
filters: list, strings with names of action classes to load. Loader will
skip classes not listed. In the absence of this list no filters are
applied.
log_exceptions: bool, whether to supress exceptions and log their messages
instead.
Returns:
A dictionary of actions, with their names as keys and instaniated Action
classes as their values.
Raises:
AttributeError: if log_exceptions is False and Action classes are missing
ACTION_NAME or FRIENDLY_NAME attributes, or the run method.
"""
global _CACHED_ACTIONS
if _CACHED_ACTIONS:
return _CACHED_ACTIONS
actions = {base_action.ActionType.SYNC: {}, base_action.ActionType.ASYNC: {}}
importer = pkgutil.ImpImporter(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', 'actions')))
for module_name, module in importer.iter_modules():
del module # Not used.
if module_name.endswith('_test') or module_name.startswith('base_action'):
continue
try:
loaded_module = importer.find_module(module_name).load_module(module_name)
except ImportError:
logging.info('Error importing module %s', module_name)
continue
for obj_name, obj in inspect.getmembers(loaded_module):
if inspect.isclass(obj) and issubclass(obj, base_action.BaseAction):
if filters and obj.ACTION_NAME not in filters:
continue
# Defaults to async for backward compatibility.
action_type = getattr(obj, 'ACTION_TYPE', base_action.ActionType.ASYNC)
try:
action = obj()
except AttributeError as e:
error_message = _INSTANTIATION_ERROR_MSG % (
obj_name, module_name, e.message)
if log_exceptions:
logging.warning(error_message)
continue
else:
raise AttributeError(error_message)
if (
action.ACTION_NAME in actions[base_action.ActionType.SYNC] or
action.ACTION_NAME in actions[base_action.ActionType.ASYNC]):
logging.warning(_DUPLICATE_ACTION_MSG, obj.ACTION_NAME)
continue
actions[action_type][action.ACTION_NAME] = action
_CACHED_ACTIONS = actions
return actions
|
google/loaner
|
loaner/web_app/backend/lib/action_loader.py
|
Python
|
apache-2.0
| 3,416
|
# Copyright 2013 Open Cloud Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' HTTP API definitions for compatibility with the Keystone OpenStack API
http://api.openstack.org/api-ref.html '''
import datetime
import flask
import json
import keystoneclient
import memcache
import sqlalchemy
import time
from flask import Blueprint
from keystoneclient.v2_0 import Client as keystone_client
from keystoneclient.apiclient.exceptions import AuthorizationFailure
from tukey_middleware import utils
from tukey_middleware.local_settings import vm_ip_auth as settings
from tukey_middleware.auth.token_store import TokenStore
from tukey_middleware.local_settings import LOCAL_PORT as LOCAL_PORT
rest = Blueprint('keystone', __name__)
@rest.after_request
def add_mimetype(response):
''' all responses will be application/json '''
response.headers["content-type"] = "application/json"
return response
def expiration(token_lifetime):
'''Returns times stamp of token_lifetime from now
'''
date_format = '%Y-%m-%dT%H:%M:%SZ'
current = time.time()
return str(datetime.datetime.fromtimestamp(
current + token_lifetime).strftime(date_format))
def service_catalog_entry(admin_url, region, internal_url, public_url,
service_type, name):
''' format an service catalog entry '''
return {
"endpoints": [
{
"adminURL": admin_url,
"region": region,
"internalURL": internal_url,
"publicURL": public_url
}
],
"endpoints_links": [],
"type": service_type,
"name": name}
def format_tenant(tenant_name, tenant_id):
''' format an enabled tenant with no description'''
return {
"enabled": True,
"description": None,
"name": tenant_name,
"id": tenant_id
}
@rest.route('/tokens', methods=('GET', 'POST'))
def token_request():
''' Intercept a token request and use that to talk to multiple clouds
based on values stored in the database.
request data format will be:
{"auth": {"passwordCredentials": {
"password": "tukeyPassword",
"username": "method identifier"
}}}
tukeyPassword is a password shared between the middleware and the portal to
prevent anyone from talking to the middleware and impersonating users.
method can be shibboleth or openid.
identifier is looked up in the tukey auth db which stores users openstack
credentials. Those credentials are used to talk to the Keystone service
for each cloud. The auth tokens from each keystone request are stored in
memcached with one of the tokens used as the key and returned back to
Horizon. '''
token_store = TokenStore(memcache.Client(['127.0.0.1:11211']))
logger = utils.get_logger()
try:
token_id = flask.request.headers["x-auth-token"]
token_info = token_store.get(str(token_id))
return json.dumps(token_info["__tukey_internal"])
except KeyError:
pass
pw_creds = json.loads(flask.request.data)["auth"]["passwordCredentials"]
# string equality in Python is probably a side-channel vector
if pw_creds["password"] != settings["shared_password"]:
return ("Wrong credentials", 401)
method, userid = pw_creds["username"].split()
user_info_query = '''
select username, password, cloud_name, display_name, auth_url, login_url,
instance_keypairs.cloud_id
from
login join
login_enabled on login.id = login_enabled.login_id join
login_identifier on login.userid = login_identifier.userid join
login_identifier_enabled on login_identifier.id =
login_identifier_enabled.login_identifier_id join
login_method on login_method.method_id = login_identifier.method_id
join
cloud on cloud.cloud_id = login.cloud_id
left outer join
instance_keypairs on instance_keypairs.cloud_id = cloud.cloud_id
where login_method.method_name='%(method)s'
and LOWER(login_identifier.identifier)=LOWER('%(id)s');
''' % {"method": method, "id": userid}
engine = sqlalchemy.create_engine(settings["auth_db_str"])
with engine.begin() as connection:
results = connection.execute(sqlalchemy.text(user_info_query))
roles = []
info_by_cloud = {}
tenant = None
endpoints = {}
for (_username, password, cloud, display_name, auth_url, login_url,
instance_keypairs) in results:
if auth_url:
try:
try:
ksc = keystone_client(auth_url=auth_url, username=_username,
password=password)
except keystoneclient.apiclient.exceptions.Unauthorized:
# this should be a valid username so let Horizon know
logger.info(("Cloud %s Keystone at %s ",
"rejected username password: %s %s"), cloud,
auth_url, _username, password)
# this is probably not the best or clearest, just different
flask.abort(403)
tenants = [t for t in ksc.tenants.list() if t.enabled]
if len(tenants) < 1:
logger.info("Cloud %s username: %s has no tenants", cloud,
_username)
continue
for tenant in tenants:
if tenant.name == _username:
break
token_response = ksc.get_raw_token_from_identity_service(
auth_url, username=_username, password=password,
tenant_name=tenant.name)
try:
# this should work if keystoneclient version <= 0.6.0
response, raw_token = token_response
response_status = response.status_code
except ValueError:
# this should work if keystoneclient version >= 0.7.0
raw_token = token_response
response_status = 200
# handle changes between 0.6.0 and 0.7.0
if "access" not in raw_token:
raw_token = {"access": raw_token}
if response_status != 200:
logger.info(("Cloud %s Keystone at %s ",
"rejected username: %s with status code: %s"),
cloud, auth_url, _username, response_status)
flask.abort(403)
# add enpoints
for endpoint in raw_token["access"]["serviceCatalog"]:
endpoints[endpoint["type"]] = endpoint["name"]
token_id = ksc.auth_token
user_id = ksc.user_id
username = _username
raw_token["cloud"] = display_name
if instance_keypairs:
raw_token["instance_keypairs"] = True
info_by_cloud[cloud] = raw_token
info_by_cloud["login" + cloud] = login_url
roles += raw_token["access"]["user"]["roles"]
raw_token["cloud"] = display_name
except AuthorizationFailure:
logger.info("Keystone failed for %s", cloud)
else:
info_by_cloud[cloud] = {"username": _username,
"cloud": display_name,
"instance_keypairs": True if instance_keypairs else False}
info_by_cloud["login" + cloud] = login_url
if tenant is None:
logger.info("Login failed for %s using method %s", userid, method)
flask.abort(401)
region = "RegionOne"
host, port = "localhost", LOCAL_PORT
allowed_services = ['compute', 'image', 'volume', 'object-store']
# glance assumes that it is at /v1 so we will give it that
#service_paths = {k: K for k in allowed_services}
#service_paths["image"] = "v1"
services = [("http://%s:%s/%s/%s" % (host, port, service, tenant.id),
service, service_name)
for service, service_name in endpoints.items()
if service in allowed_services]
services += [("http://%s:%s/v2.0" % (host, port), "identity", "keystone")]
catalog = {
"access": {
"token": {
"expires": expiration(43200),
"id": token_id,
"tenant": format_tenant(tenant.name, tenant.id)
},
"serviceCatalog": [
service_catalog_entry(url, region, url, url, service_type, name)
for url, service_type, name in services] + [
service_catalog_entry(
"http://%s:%s/services/Admin" % (host, port), region,
"http://%s:%s/services/Cloud" % (host, port),
"http://%s:%s/services/Cloud" % (host, port), "ec2",
"ec2")],
"user": {
"username": username,
"roles_links": [],
"id": user_id,
"roles": roles,
"name": username
}},
"path": "", "host": host, "port": port}
info_by_cloud["__tukey_internal"] = catalog
# TODO:
# see what the shortest expiration is in the set of expirations
# then set the returned expiration to that and make sure that
# the memcache expiration is greater than that but has a value so
# that memcached entries don't fill everything up
token_store.set(str(token_id), info_by_cloud, 172800)
logger.info("Login succeeded for %s using method %s" % (userid, method))
return json.dumps(catalog)
@rest.route('/tenants', methods=('GET', 'POST'))
def tenant_request():
''' Request for just the tenant info. This request assumes that /tokens
was accessed and created the entry in memcached '''
try:
token_id = flask.request.headers["x-auth-token"]
except KeyError:
flask.abort(401)
toks = TokenStore(memcache.Client(['127.0.0.1:11211']))
token_info = toks.get(str(token_id))
tenants = {
"tenants_links": [],
"tenants": [
token_info["__tukey_internal"]["access"]["token"]["tenant"]
]
}
return json.dumps(tenants)
|
LabAdvComp/tukey_middleware
|
tukey_middleware/api/keystone.py
|
Python
|
apache-2.0
| 10,922
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_DATAFLOW_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_DATAFLOW_KEY)
class CloudDataflowExampleDagsSystemTest(GoogleSystemTest):
@provide_gcp_context(GCP_DATAFLOW_KEY)
def test_run_example_dag_function(self):
self.run_dag('example_gcp_dataflow', CLOUD_DAG_FOLDER)
|
wooga/airflow
|
tests/providers/google/cloud/operators/test_dataflow_system.py
|
Python
|
apache-2.0
| 1,290
|
# -*- coding: utf-8 -*-
"""JSON serializes for XMS API object classes.
Note, this module is mainly intended for internal use and the API may
change in the future
"""
from __future__ import absolute_import, division, print_function
import binascii
from clx.xms.api import RESET
def _write_datetime(value):
"""Helper that validates a date time object."""
if value.utcoffset() is None:
raise ValueError("Expected datetime with time zone")
return value.isoformat()
def _write_base64(value):
return binascii.b2a_base64(value).decode('ascii')
def _write_hex(value):
return binascii.hexlify(value).decode('ascii')
def _create_batch_helper(batch):
"""Helper that prepares the fields of a batch for JSON serialization.
:param MtBatchSmsCreate batch: the batch to serialize
:return: dictionary for JSON serialization
"""
fields = {
'from': batch.sender,
'to': sorted(batch.recipients)
}
if batch.delivery_report:
fields['delivery_report'] = batch.delivery_report
if batch.send_at:
fields['send_at'] = _write_datetime(batch.send_at)
if batch.expire_at:
fields['expire_at'] = _write_datetime(batch.expire_at)
if batch.tags:
fields['tags'] = sorted(batch.tags)
if batch.callback_url:
fields['callback_url'] = batch.callback_url
return fields
def text_batch(batch):
"""Serializes the given text batch into JSON.
:param MtBatchTextSmsCreate batch: the batch to serialize
:return: dictionary suitable for JSON serialization
"""
fields = _create_batch_helper(batch)
fields['type'] = 'mt_text'
fields['body'] = batch.body
if batch.parameters:
fields['parameters'] = batch.parameters
return fields
def binary_batch(batch):
"""Serializes the given binary batch into JSON.
:param MtBatchBinarySmsCreate batch: the batch to serialize
:return: dictionary suitable for JSON serialization
"""
fields = _create_batch_helper(batch)
fields['type'] = 'mt_binary'
fields['body'] = _write_base64(batch.body)
fields['udh'] = _write_hex(batch.udh)
return fields
def _batch_update_helper(batch):
"""Helper that prepares the given batch for serialization.
:param MtBatchSmsUpdate batch: the batch to serialize
:return: dictionary suitable for JSON serialization
:rtype: dict
"""
fields = {}
if batch.recipient_insertions:
fields['to_add'] = sorted(batch.recipient_insertions)
if batch.recipient_removals:
fields['to_remove'] = sorted(batch.recipient_removals)
if batch.sender:
fields['from'] = batch.sender
if batch.delivery_report == RESET:
fields['delivery_report'] = None
elif batch.delivery_report:
fields['delivery_report'] = batch.delivery_report
if batch.send_at == RESET:
fields['send_at'] = None
elif batch.send_at:
fields['send_at'] = _write_datetime(batch.send_at)
if batch.expire_at == RESET:
fields['expire_at'] = None
elif batch.expire_at:
fields['expire_at'] = _write_datetime(batch.expire_at)
if batch.callback_url == RESET:
fields['callback_url'] = None
elif batch.callback_url:
fields['callback_url'] = batch.callback_url
return fields
def text_batch_update(batch):
"""Serializes the given text batch update into JSON.
:param MtBatchTextSmsUpdate batch: the batch update to serialize
:return: dictionary suitable for JSON serialization
:rtype: dict
"""
fields = _batch_update_helper(batch)
fields['type'] = 'mt_text'
if batch.body:
fields['body'] = batch.body
if batch.parameters == RESET:
fields['parameters'] = None
elif batch.parameters:
fields['parameters'] = batch.parameters
return fields
def binary_batch_update(batch):
"""Serializes the given binary batch update into JSON.
:param MtBatchBinarySmsUpdate batch: the batch update to serialize
:return: dictionary suitable for JSON serialization
:rtype: dict
"""
fields = _batch_update_helper(batch)
fields['type'] = 'mt_binary'
if batch.body:
fields['body'] = _write_base64(batch.body)
if batch.udh:
fields['udh'] = _write_hex(batch.udh)
return fields
def _group_auto_update_helper(auto_update):
"""Helper that prepares the given group auto update for JSON
serialization.
:param GroupAutoUpdate auto_update: the auto update to serialize
:return: dictionary suitable for JSON serialization
:rtype: dict
"""
fields = {
'to': auto_update.recipient
}
if auto_update.add_word_pair[0]:
fields.setdefault('add', {})['first_word'] = \
auto_update.add_word_pair[0]
if auto_update.add_word_pair[1]:
fields.setdefault('add', {})['second_word'] = \
auto_update.add_word_pair[1]
if auto_update.remove_word_pair[0]:
fields.setdefault('remove', {})['first_word'] = \
auto_update.remove_word_pair[0]
if auto_update.remove_word_pair[1]:
fields.setdefault('remove', {})['second_word'] = \
auto_update.remove_word_pair[1]
return fields
def group_create(group):
"""Serializes the given group create object to JSON.
:param GroupCreate group: the group to serialize
:return: dictionary suitable for JSON serialization
"""
fields = {}
if group.name:
fields['name'] = group.name
if group.members:
fields['members'] = sorted(group.members)
if group.child_groups:
fields['child_groups'] = sorted(group.child_groups)
if group.auto_update:
fields['auto_update'] = _group_auto_update_helper(group.auto_update)
if group.tags:
fields['tags'] = sorted(group.tags)
return fields
def group_update(obj):
"""Serializes the given group update object to JSON.
:param GroupUpdate obj: the group update to serialize
:return: a dictionary suitable for JSON serialization
:rtype: dict
"""
fields = {}
if obj.name == RESET:
fields['name'] = None
elif obj.name:
fields['name'] = obj.name
if obj.member_insertions:
fields['add'] = sorted(obj.member_insertions)
if obj.member_removals:
fields['remove'] = sorted(obj.member_removals)
if obj.child_group_insertions:
fields['child_groups_add'] = sorted(obj.child_group_insertions)
if obj.child_group_removals:
fields['child_groups_remove'] = sorted(obj.child_group_removals)
if obj.add_from_group:
fields['add_from_group'] = obj.add_from_group
if obj.remove_from_group:
fields['remove_from_group'] = obj.remove_from_group
if obj.auto_update == RESET:
fields['auto_update'] = None
elif obj.auto_update:
fields['auto_update'] = _group_auto_update_helper(obj.auto_update)
return fields
def tags(tag_coll):
"""Serializes the given tags to a JSON string.
:param set[str] tag_coll: a set of tags
:return: a dictionary suitable for JSON serialization
:rtype: dict
"""
return {'tags': sorted(tag_coll)}
def tags_update(tags_to_add, tags_to_remove):
"""Serializes the given tag updates to a JSON string.
:param set[str] tags_to_add: list of tags
:param str[str] tags_to_remove: list of tags
:return: a dictionary suitable for JSON serialization
:rtype: dict
"""
return {
'add': sorted(tags_to_add),
'remove': sorted(tags_to_remove)
}
|
clxcommunications/sdk-xms-python
|
clx/xms/serialize.py
|
Python
|
apache-2.0
| 7,607
|
from django.shortcuts import render
def index(request):
pass
|
c05mic/django-url-shortener
|
dj_url_shortener/views.py
|
Python
|
apache-2.0
| 62
|
# Copyright 2014 Sardar Yumatov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import binascii
import datetime
from struct import unpack
# python 3 support
import six
__all__ = ('Session', 'Connection')
# module level logger
LOG = logging.getLogger(__name__)
class Session(object):
""" The front-end for the underlying connection pool. """
# Default APNs addresses.
ADDRESSES = {
"push_sandbox": ("gateway.sandbox.push.apple.com", 2195),
"push_production": ("gateway.push.apple.com", 2195),
"feedback_sandbox": ("feedback.sandbox.push.apple.com", 2196),
"feedback_production": ("feedback.push.apple.com", 2196),
}
# Default timeout for attempting a new connection.
DEFAULT_CONNECT_TIMEOUT = 10
# Default write buffer size. Should be close to MTU size.
DEFAULT_WRITE_BUFFER_SIZE = 2048
# Default timeout for write operations.
DEFAULT_WRITE_TIMEOUT = 20
# Default read buffer size, used by feedback.
DEFAULT_READ_BUFFER_SIZE = 2048
# Default timeout for read operations.
DEFAULT_READ_TIMEOUT = 20
# Default timeout waiting for error response at the end message send operation.
DEFAULT_READ_TAIL_TIMEOUT = 3
def __init__(self, pool="vapnsclient.backends.stdio",
connect_timeout=DEFAULT_CONNECT_TIMEOUT,
write_buffer_size=DEFAULT_WRITE_BUFFER_SIZE,
write_timeout=DEFAULT_WRITE_TIMEOUT,
read_buffer_size=DEFAULT_READ_BUFFER_SIZE,
read_timeout=DEFAULT_READ_TIMEOUT,
read_tail_timeout=DEFAULT_READ_TAIL_TIMEOUT,
**pool_options):
""" The front-end to the underlying connection pool. The purpose of this
class is to hide the transport implementation that is being used for
networking. Default implementation uses built-in python sockets and
``select`` for asynchronous IO.
:Arguments:
- pool (str, type or object): networking layer implementation.
- connect_timeout (float): timeout for new connections.
- write_buffer_size (int): chunk size for sending the message.
- write_timeout (float): maximum time to send single chunk in seconds.
- read_buffer_size (int): feedback buffer size for reading.
- read_timeout (float): timeout for reading single feedback block.
- read_tail_timeout (float): timeout for reading status frame after message is sent.
- pool_options (kwargs): passed as-is to the pool class on instantiation.
"""
# IO deafults
self.connect_timeout = connect_timeout
self.write_buffer_size = write_buffer_size
self.write_timeout = write_timeout
self.read_buffer_size = read_buffer_size
self.read_timeout = read_timeout
self.read_tail_timeout = read_tail_timeout
# class name given by qualified name
if isinstance(pool, six.string_types):
pool_module = __import__(pool)
for name in pool.split('.')[1:]:
try:
pool_module = getattr(pool_module, name)
except AttributeError:
raise ImportError("Can't load pool backend", pool)
try:
pool = getattr(pool_module, "Backend")
except AttributeError:
raise ImportError("Can't find Backend class in pool module", pool)
# resolved or given as class
if isinstance(pool, type):
pool = pool(**pool_options)
self.pool = pool
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("New session, WB: %sb/%ss, RB: %sb/%ss, TT: %ss, Pool: %s",
write_buffer_size, write_timeout,
read_buffer_size, read_timeout,
read_tail_timeout,
pool.__class__.__module__)
@classmethod
def get_address(cls, address):
""" Maps address to (host, port) tuple. """
if not isinstance(address, (list, tuple)):
addr = cls.ADDRESSES.get(address)
if addr is None:
raise ValueError("Unknown address mapping: {0}".format(address))
address = addr
return address
def new_connection(self, address="feedback_sandbox", certificate=None, **cert_params):
""" Obtain new connection to APNs. This method will not re-use existing
connection from the pool. The connection will be closed after use.
Unlike :func:`get_connection` this method does not cache the
connection. Use it to fetch feedback from APNs and then close when
you are done.
:Arguments:
- address (str or tuple): target address.
- certificate (:class:`BaseCertificate`): provider's certificate instance.
- cert_params (kwargs): :class:`BaseCertificate` arguments, used if ``certificate`` instance is not given.
"""
if certificate is not None:
cert = certificate
else:
cert = self.pool.get_certificate(cert_params)
address = self.get_address(address)
return Connection(address, cert, self, use_cache=False)
def get_connection(self, address="push_sanbox", certificate=None, **cert_params):
""" Obtain cached connection to APNs.
Session caches connection descriptors, that remain open after use.
Caching saves SSL handshaking time. Handshaking is lazy, it will be
performed on first message send.
You can provide APNs address as ``(hostname, port)`` tuple or as
one of the strings:
- push_sanbox -- ``("gateway.sandbox.push.apple.com", 2195)``, the default.
- push_production -- ``("gateway.push.apple.com", 2195)``
- feedback_sandbox -- ``("feedback.sandbox.push.apple.com", 2196)``
- feedback_production -- ``("feedback.push.apple.com", 2196)``
:Arguments:
- address (str or tuple): target address.
- certificate (:class:`BaseCertificate`): provider's certificate instance.
- cert_params (kwargs): :class:`BaseCertificate` arguments, used if ``certificate`` instance is not given.
"""
if certificate is not None:
cert = certificate
else:
cert = self.pool.get_certificate(cert_params)
address = self.get_address(address)
return Connection(address, cert, self, use_cache=True)
def outdate(self, delta):
""" Close open unused connections in the pool that are left untouched
for more than ``delta`` time.
You may call this method in a separate thread or run it in some
periodic task. If you don't, then all connections will remain open
until session is shut down. It might be an issue if you care about
your open server connections.
:Arguments:
delta (``timedelta``): maximum age of unused connection.
"""
if LOG.isEnabledFor(logging.DEBUG):
if delta.total_seconds() == 0.0:
LOG.debug("Shutdown session")
else:
LOG.debug("Outdating session with delta: %s", delta)
self.pool.outdate(delta)
def shutdown(self):
""" Shutdown all connections in the pool. This method does will not close
connections being use at the calling time.
"""
# NOTE: global package datetime can become None if session is stored in
# a global variable and being garbage collected with the rest of the module.
if datetime is not None:
self.pool.outdate(datetime.timedelta())
def __del__(self):
""" Last chance to shutdown() """
self.shutdown()
class Connection(object):
""" Connection wrapper. """
def __init__(self, address, certificate, session, use_cache=False):
""" New connection wrapper.
:Arguments:
- address (tuple) - (host, port) to connect to.
- certificate (:class:`BaseCertificate`) - provider certificate.
- session (object) - parent session.
- use_cache (bool) - True if connections may be cached in the pool.
"""
self.address = address
self.certificate = certificate
self.session = session
self.use_cache = use_cache
self._reused = False
self._connection = None
self._lock = self.session.pool.create_lock()
def __enter__(self):
try:
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Entering networking session")
self._lock.acquire() # block until lock is given
self._open_connection() # can raise exception, bubblit up to the top
except:
self._lock.release()
raise
def __exit__(self, exc_type, exc_value, traceback):
try:
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Leaving networking session%s%s",
" with still open connection" if self._connection else "",
" because of failure" if exc_type else "",
exc_info=(exc_type is not None))
# the only possible scenario when connection is left open while
# we are here is when some totally unexpected exception bubbles up.
assert exc_type is not None or self._connection is None
if self._connection:
# terminate connection in case of an error
self._close(terminate=True)
finally:
self._lock.release()
# we return None, which is False, which forces python to re-raise on error
def send(self, message):
""" Send message. """
# will raise exception if non-cached connection has been used
with self:
batch = message.batch(self.session.write_buffer_size)
failed_after = None
status = None
total_sent = 0
decoder = ResponseDecoder()
for iteration, (sent, chunk) in enumerate(batch):
assert len(chunk) > 0
total_sent += len(chunk)
if iteration == 0:
# can raise exception if new connection is failed to open.
# write doesn't return anything, but we are interested in IO failures.
_, io_exception = self._ensuring_io(lambda con: con.write(chunk, self.session.write_timeout))
if io_exception is not None:
# IO failure on first write, sent is 0 here, retry with
# the whole message
failed_after = sent
break
else:
# other writes, that fail naturally
try:
ret = self._connection.write(chunk, self.session.write_timeout)
except:
# IO failure on subsequent writes, some of the tokens are
# sent, break on the beginning of this batch
failed_after = sent
break
# check for possibly arriving failure frame
try:
# should either return sequence of bytes or None if read buffer
# is empty.
ret = self._connection.peek(256) # status frame is 6 bytes
except:
# Peek failed, which means our read operations fail
# abnormaly. I don't like that and the final read will
# probably fail too. So fail early, possibly messing the
# first batch, but not everything
failed_after = sent
break
else:
if ret is not None:
decoder.feed(ret)
# status is not None only if previous iteration got
# successful status, but it appears not to be the last
# chunk. this should not happen by APNs protocol, still
# we have to handle it. the easy solution: ignore
# previous status (probably garbage in read buffer)
# with a warning.
if status is not None:
LOG.warning("Got success frame while batch is not comleted. Frame ignored.")
# NOTE: it is possible we get None here because not all
# bytes could be read without blocking. on next iteration
# or final blocking read we will get the rest of the bytes.
status = decoder.decode()
if status is not None and status[0] != 0: # error detected
if LOG.isEnabledFor(logging.INFO):
LOG.info("Message send failed midway with status %r to address %r. Sent tokens: %s, bytes: %s",
status, self.address, sent, total_sent)
# some shit had happened, response from APNs, bail out and prepare for retry
self._close(terminate=True)
return status
# else: nothing in the read buffer, keep sending
# by this time we either stopped prematurely on IO error with
# failed_after set or we finished all batches, possibly having
# status read with non-blocking IO.
# the write stream possibly failed, but the read stream might be still
# open with status frame precisely pointing to failed token.
if status is None:
# read status frame, could take 2 iterations if the fist one returns
# just the read buffer with few bytes not making the whole status frame.
while True:
try:
ret = self._connection.read(256, self.session.read_tail_timeout)
except:
# one of two things had happened:
# - everything went fine, we waited for the final status
# frame the tail timeout of time and got nothing (timeout).
# this is a success condition according to APNs documentation
# if status frame with code 0 is not sent (it is never sent).
# - reading failed with some other exception. we don't know
# starting from which token the batch has failed. we can't
# attempt to read status frame again, because read stream
# is probably closed by now. there is nothing we can do
# except pretending everything is OK. the failed tokens
# will be reported by feedback. the tokens that didn't got
# the message... well, so is life, we can't detect them.
#
# Sorry, but this is how APNs protocol designed, I can't
# do better here. APNs developers suck hard.
#
# We still have to check failed_after, it tells us when
# IO write failed. If failed_after is not None, then
# we got here probably because connection is closed for
# read and write after the write failure.
break
else:
if ret is not None:
decoder.feed(ret)
status = decoder.decode()
if status is None:
# we got bytes, but not enogugh for the status frame.
continue
# complete status frame read, evaluate
if status[0] != 0:
if LOG.isEnabledFor(logging.INFO):
LOG.info("Message send failed with status %r to address %r. Sent tokens: %s, bytes: %s",
status, self.address, len(message.tokens), total_sent)
# some shit had indeed happened
self._close(terminate=True)
return status
# got a successful status or read ended with closed connection
break
# by this time we have either successful status frame (code 0) or
# we failed to obtain status frame at all. the failed_after is not None
# if IO write failed before.
# there are some bytes read, but we failed to read complete status
# frame. all possible timeouts are exceeded or read stream is
# totally fucked up, so we can't wait and read again. let the user
# know this happened and treat the situation as if no frame was
# received at all. APNs protocol sucks sooo much.
if status is None and decoder._buf:
LOG.warning("Failed to read complete status frame from %r, but has read some bytes before. Probably read timeout %s is to short.",
self.address, self.session.read_tail_timeout)
# close connection, it is failing
self._close(terminate=True)
# normall successs scenario with success frame provided. never
# happens according to APNs documentation (no status frame gets
# sent on success), but can happen logically.
if failed_after is None and status is not None and status[0] == 0:
# success, release connection for re-use if it was meant for reuse
self._close(terminate=not self._reused)
return status
# everything looks like success, but it might be because read stream
# was closed or just timeouted. check write IO failure.
if failed_after is not None:
if LOG.isEnabledFor(logging.INFO):
LOG.info("Message send failed midway with status %r to address %r. Sent tokens: %s, bytes: %s",
status, self.address, failed_after, total_sent)
# close connection, it is failing
self._close(terminate=True)
return (255, failed_after + 1)
# we have sent message to all target tokens and have waited for
# tail_timeout for any error reponse to arrive. Nothing arrived
# (hopefully not because read error) and we did not fail with write
# failure middle on the road, so according to Apple's manual
# everything went OK. This protocol sucks.
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Message sent successfully to address %r. Sent tokens: %s, bytes: %s",
self.address, len(message.tokens), total_sent)
# success, release connection for re-use if it was meant for reuse
self._close(terminate=not self._reused)
return None
def feedback(self):
""" Read and parse feedback information. """
if self.use_cache:
# sanity check
LOG.warning("Don't use cached connections for feedback, you might get stale data.")
# will raise exception if non-cached connection has been used
with self:
# on connection failure we bubble up the exceptions. on IO failure
# we get the exception as return value, stopping the iteration normally.
data, io_exception = self._ensuring_io(lambda con: con.read(self.session.read_buffer_size, self.session.read_timeout))
# data is non empty sequence of bytes on success, None if connection
# has been closed or on failure. io_exception is not None on IO errors.
feedback = FeedbackDecoder()
total_records = 0
failed = io_exception is not None
while data is not None:
feedback.feed(data)
# TODO: use yield from
for record in feedback.decoded():
total_records += 1
yield record
try:
# read next chunk, leaving again either sequence of bytes or
# None if connection has been closed.
data = self._connection.read(self.session.read_buffer_size, self.session.read_timeout)
except:
# IO failure, probably because of a timeout. break the loop,
# we will fetch the rest during the next session.
failed = True
break
# there is no point to keep this connection open
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Feedback received %s records from address %r. Stopped %s",
total_records, self.address, "by failure" if failed else "successfully")
# always close feedback connection, preventing stale data
self._close(terminate=True)
def _ensuring_io(self, func):
""" Re-opens connection if read or write has failed. Used to re-initialize
connections from the pool with a transport not supporting reliable
socket closed condition.
"""
failed = False
if self._reused:
# if connection is reused, then there might be left over bytes in the
# read buffer. flush them.
try:
self._connection.reset()
except:
LOG.info("Failed to reset connection to %r", self.address, exc_info=True)
# close the connection, prepare for re-connect
self._close(terminate=True)
failed = True
if not failed:
# OK, reset succeeded or this is a fresh new connetion
try:
return func(self._connection), None
except Exception as exc:
if self.session.pool.can_detect_close or not self._reused:
# bubble up IO related problem on non-cached connection
return None, exc
# Either failed by reset or failed by IO operation. If
# pool.can_detect_close is False and we are reusing the connection from
# the cache pool, then it was probably already failing when we got it.
# Re-get the connection from the pool again.
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Re-opening connection to address %r", self.address)
# ensure failing connection is closed
self._close(terminate=True)
# open new connection. this operation might raise exceptions, which
# will propagate to the outer most caller indicating severe network
# errors.
self._open_connection(by_failure=True)
return self._ensuring_io(func)
def _open_connection(self, by_failure=False):
""" Request new connection handle from underlying pool. """
# use pool if caching is requested or we are ensuring connection with
# cache enabled.
if self.use_cache and (not by_failure or self.session.pool.use_cache_for_reconnects):
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Open cached connection to %r%s.", self.address, " by failure" if by_failure else "")
self._connection = self.session.pool.get_cached_connection(
self.address,
self.certificate,
timeout=self.session.connect_timeout
)
self._reused = True
else:
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Open new connection to %r%s.", self.address, " by failure" if by_failure else "")
self._connection = self.session.pool.get_new_connection(
self.address,
self.certificate,
timeout=self.session.connect_timeout
)
self._reused = False
def _close(self, terminate=False):
""" Close connection. """
if self._connection:
if terminate:
self._connection.close()
else:
self.session.pool.release(self._connection)
self._connection = None
# private
class ResponseDecoder(object):
""" Response frame decoder. """
# Response command byte
COMMAND = 8
def __init__(self):
self._buf = []
def feed(self, data):
""" Feed next frame with data. """
self._buf.append(data)
def decode(self):
""" Returns reconstructed response frame. """
buf = six.binary_type().join(self._buf)
if len(buf) >= 6:
ret = unpack(">BBI", buf[0:6])
self._buf = []
if len(buf) > 6:
# should normally not happen as there is always a single frame
self._buf.append(buf[6:])
assert ret[0] == self.COMMAND, "Got unknown command from APNs: {0}. Looks like protocol has been changed.".format(ret[0])
return (ret[1], ret[2])
else:
self._buf = [buf]
return None
# private
class FeedbackDecoder(object):
""" Feedback decoder. """
def __init__(self):
self._buf = []
def feed(self, data):
""" Feed next frame with raw data. """
self._buf.append(data)
def decoded(self):
""" Returns generator over next set of decoded records. """
buf = six.binary_type().join(self._buf)
pos = 0
while (pos + 6) < len(buf):
timestamp, length = unpack(">IH", buf[pos:(pos + 6)])
assert length > 0
if (pos + 6 + length) <= len(buf):
token = binascii.hexlify(buf[(pos + 6):(pos + 6 + length)])
pos += 6 + length
yield token, timestamp
if pos == len(buf):
break
else:
break
# consume everything except suffix
self._buf=[buf[pos:]]
|
vine/apns-client
|
vapnsclient/transport.py
|
Python
|
apache-2.0
| 27,324
|
from common_fixtures import * # NOQA
import time
from cattle import ApiError
from test_volume import VOLUME_CLEANUP_LABEL
@pytest.fixture(scope='module', autouse=True)
def update_event_settings(request, super_client):
settings = super_client.list_setting()
originals = []
def update_setting(new_value, s):
originals.append((setting, {'value': s.value}))
s = super_client.update(s, {'value': new_value})
wait_setting_active(super_client, s)
for setting in settings:
if setting.name == 'manage.nonrancher.containers' \
and setting.value != 'true':
update_setting('true', setting)
def revert_settings():
for s in originals:
super_client.update(s[0], s[1])
request.addfinalizer(revert_settings)
@pytest.fixture(scope='module')
def host(super_client, context):
return super_client.reload(context.host)
@pytest.fixture(scope='module')
def agent_cli(context):
return context.agent_client
@pytest.fixture(scope='module')
def user_id(context):
return context.project.id
def test_container_event_create(client, host, agent_cli, user_id):
# Submitting a 'start' containerEvent should result in a container
# being created.
external_id = random_str()
container = create_native_container(client, host, external_id,
agent_cli, user_id)
assert container.nativeContainer is True
assert container.state == 'running'
def test_volume_cleanup_strategy_label(client, host, agent_cli, user_id):
external_id = random_str()
container = create_native_container(client, host, external_id,
agent_cli, user_id)
assert VOLUME_CLEANUP_LABEL not in container.labels
external_id = random_str()
inspect = {'Config': {'Labels': {VOLUME_CLEANUP_LABEL: 'all'}}}
container = \
create_native_container(client, host, external_id, agent_cli, user_id,
inspect=inspect)
assert container.labels[VOLUME_CLEANUP_LABEL] == 'all'
def test_container_event_start_stop(client, host, agent_cli, user_id):
# Submitting a 'stop' or 'die' containerEvent should result in a
# container resource being stopped.
external_id = random_str()
container = create_native_container(client, host, external_id,
agent_cli, user_id)
assert container.state == 'running'
create_event(host, external_id, agent_cli, client, user_id, 'stop')
container = client.wait_success(container)
assert container.state == 'stopped'
create_event(host, external_id, agent_cli, client, user_id, 'start')
container = client.wait_success(container)
assert container.state == 'running'
# Sending a start event on a running container should have no effect
create_event(host, external_id, agent_cli, client, user_id, 'start')
container = client.wait_success(container)
assert container.state == 'running'
create_event(host, external_id, agent_cli, client, user_id, 'die')
container = client.wait_success(container)
assert container.state == 'stopped'
# Sending a stop event on a stopped container should have no effect
create_event(host, external_id, agent_cli, client, user_id, 'stop')
container = client.wait_success(container)
assert container.state == 'stopped'
def test_container_event_start(client, host, agent_cli, user_id):
# Submitting a 'start' containerEvent should result in a container
# being started.
external_id = random_str()
container = create_native_container(client, host, external_id,
agent_cli, user_id)
assert container.state == 'running'
create_event(host, external_id, agent_cli, client, user_id, 'stop')
containers = client.list_container(externalId=external_id)
assert len(containers) == 1
container = client.wait_success(containers[0])
assert container.state != 'removed'
def test_container_event_remove_start(client, host, agent_cli, user_id):
# If a container is removed and then an event comes in for the container,
# the event should be ignored.
external_id = random_str()
container = create_native_container(client, host, external_id,
agent_cli, user_id)
assert container.state == 'running'
container = client.wait_success(container.stop())
assert container.state == 'stopped'
container = client.wait_success(container.remove())
assert container.state == 'removed'
create_event(host, external_id, agent_cli, client, user_id, 'start')
container = client.wait_success(container)
assert container.state == 'removed'
containers = client.list_container(externalId=external_id)
assert len(containers) == 0
def test_container_event_destroy(client, host, agent_cli, user_id):
# Submitting a 'destroy' containerEvent should result in a container
# being removed.
external_id = random_str()
container = create_native_container(client, host, external_id,
agent_cli, user_id)
assert container.state == 'running'
create_event(host, external_id, agent_cli, client, user_id, 'destroy')
container = client.wait_success(container)
assert container.state == 'removed'
# Sending a destroy event to a removed container should have no effect
create_event(host, external_id, agent_cli, client, user_id, 'destroy')
container = client.wait_success(container)
assert container.state == 'removed'
def test_rancher_container_events(client, context, host, agent_cli, user_id):
# A "normal" container (one created in Rancher) should also respond to
# non-rancher container events
container = context.create_container(name=random_str(),
startOnCreate=False)
assert container.state == 'stopped'
inspect = new_inspect(random_str())
inspect['Config']['Labels'] = {'io.rancher.container.uuid': container.uuid}
# pass random external id to prove look up by rancher uuid works
rand = random_str()
create_event(host, rand, agent_cli, client, user_id, 'start', inspect)
container = client.wait_success(container)
assert container.state == 'running'
create_event(host, rand, agent_cli, client, user_id, 'stop', inspect)
container = client.wait_success(container)
assert container.state == 'stopped'
# Note that we don't pass inspect on destroy because it wont exist. In this
# case, we have to pass the container's actual externalId
ext_id = container.externalId
create_event(host, ext_id, agent_cli, client, user_id, 'destroy')
container = client.wait_success(container)
assert container.state == 'removed'
def test_bad_agent(super_client, new_context):
host, account, agent_client = register_simulated_host(new_context,
return_agent=True)
host = super_client.reload(host)
def post():
agent_client.create_container_event(
reportedHostUuid=host.data.fields['reportedUuid'],
externalId=random_str(),
externalFrom='busybox:latest',
externalTimestamp=int(time.time()),
externalStatus='start')
# Test it works
post()
# Test it fails with two agents
super_client.wait_success(super_client.create_agent(
uri='test://' + random_str(),
accountId=account.id))
with pytest.raises(ApiError) as e:
post()
assert e.value.error.code == 'MissingRequired'
# Test it fails with no agents
for agent in super_client.list_agent(accountId=account.id):
super_client.wait_success(agent.deactivate())
with pytest.raises(ApiError) as e:
post()
assert e.value.error.code == 'CantVerifyAgent'
def test_bad_host(host, new_context):
# If a host doesn't belong to agent submitting the event, the request
# should fail.
agent_cli = new_context.agent_client
with pytest.raises(ApiError) as e:
agent_cli.create_container_event(
reportedHostUuid=host.data.fields['reportedUuid'],
externalId=random_str(),
externalFrom='busybox:latest',
externalTimestamp=int(time.time()),
externalStatus='start')
assert e.value.error.code == 'InvalidReference'
def test_container_event_null_inspect(client, host, agent_cli, user_id):
# Assert that the inspect can be null.
external_id = random_str()
create_event(host, external_id, agent_cli, client, user_id,
'start', None)
def container_wait():
containers = client.list_container(externalId=external_id)
if len(containers) and containers[0].state != 'requested':
return containers[0]
container = wait_for(container_wait)
assert container is not None
def test_requested_ip_address(super_client, client, host, agent_cli, user_id):
external_id = random_str()
inspect = new_inspect(external_id)
inspect['NetworkSettings'] = {'IPAddress': '10.42.0.240'}
container = create_native_container(client, host, external_id,
agent_cli, user_id, inspect=inspect)
container = super_client.reload(container)
assert container['data']['fields']['requestedIpAddress'] == '10.42.0.240'
assert container.nics()[0].network().kind == 'dockerBridge'
assert container.primaryIpAddress is None
def test_requested_ip_address_with_managed(super_client, client, host,
agent_cli, user_id):
external_id = random_str()
inspect = new_inspect(external_id)
inspect['NetworkSettings'] = {'IPAddress': '10.42.0.240'}
inspect['Config'] = {
'Labels': {
'io.rancher.container.network': 'true'
}
}
container = create_native_container(client, host, external_id,
agent_cli, user_id, inspect=inspect)
container = super_client.reload(container)
assert container['data']['fields']['requestedIpAddress'] == '10.42.0.240'
assert container.nics()[0].network().kind == 'network'
assert container.primaryIpAddress == '10.42.0.240'
def test_container_event_net_none(client, host, agent_cli, user_id):
external_id = random_str()
inspect = new_inspect(external_id)
inspect['Config']['NetworkDisabled'] = True
container = create_native_container(client, host, external_id,
agent_cli, user_id, inspect=inspect)
assert container['networkMode'] == 'none'
def test_container_event_net_host(client, host, agent_cli, user_id):
external_id = random_str()
inspect = new_inspect(external_id)
inspect['HostConfig'] = {'NetworkMode': 'host'}
container = create_native_container(client, host, external_id,
agent_cli, user_id, inspect=inspect)
assert container['networkMode'] == 'host'
def test_container_event_net_bridge(client, host, agent_cli, user_id):
external_id = random_str()
inspect = new_inspect(external_id)
inspect['HostConfig'] = {'NetworkMode': 'bridge'}
container = create_native_container(client, host, external_id,
agent_cli, user_id, inspect=inspect)
assert container['networkMode'] == 'bridge'
def test_container_event_net_blank(client, host, agent_cli, user_id):
external_id = random_str()
inspect = new_inspect(external_id)
inspect['HostConfig'] = {'NetworkMode': ''}
container = create_native_container(client, host, external_id,
agent_cli, user_id, inspect=inspect)
assert container['networkMode'] == 'bridge'
def test_container_event_net_container(client, host, agent_cli, user_id):
target_external_id = random_str()
target = create_native_container(client, host, target_external_id,
agent_cli, user_id)
external_id = random_str()
inspect = new_inspect(external_id)
inspect['HostConfig'] = {'NetworkMode': 'container:%s' % target.externalId}
container = create_native_container(client, host, external_id,
agent_cli, user_id, inspect=inspect)
assert container['networkMode'] == 'container'
assert container['networkContainerId'] == target.id
def test_container_event_net_container_not_found(client, host, agent_cli,
user_id):
external_id = random_str()
inspect = new_inspect(external_id)
inspect['HostConfig'] = {'NetworkMode': 'container:wont-be-found'}
container = create_native_container(client, host, external_id,
agent_cli, user_id, inspect=inspect)
assert container['networkMode'] == 'none'
assert container['networkContainerId'] is None
def test_container_event_image_and_reg_cred(client, host, agent_cli, user_id,
super_client):
server = 'server{0}.io'.format(random_num())
registry = client.create_registry(serverAddress=server,
name=random_str())
registry = client.wait_success(registry)
reg_cred = client.create_registry_credential(
registryId=registry.id,
publicValue='rancher',
secretValue='rancher')
registry_credential = client.wait_success(reg_cred)
name = server + '/rancher/authorized:latest'
image_uuid = 'docker:' + name
external_id = random_str()
container = create_native_container(client, host, external_id,
agent_cli, user_id, image=image_uuid)
assert container.nativeContainer is True
assert container.state == 'running'
container = super_client.wait_success(container)
assert container.registryCredentialId == registry_credential.id
def create_native_container(client, host, external_id, user_agent_cli,
user_account_id, inspect=None, image=None):
if not inspect:
inspect = new_inspect(external_id)
create_event(host, external_id, user_agent_cli, client, user_account_id,
'start', inspect, image=image)
def container_wait():
containers = client.list_container(externalId=external_id)
if len(containers) and containers[0].state != 'requested':
return containers[0]
container = wait_for(container_wait)
container = client.wait_success(container)
return container
def create_event(host, external_id, agent_cli, client, user_account_id, status,
inspect=None, wait_and_assert=True,
image=None):
timestamp = int(time.time())
if (image is None):
image = 'sim:busybox:latest'
event = agent_cli.create_container_event(
reportedHostUuid=host.data.fields['reportedUuid'],
externalId=external_id,
externalFrom=image,
externalTimestamp=timestamp,
externalStatus=status,
dockerInspect=inspect)
if wait_and_assert:
assert event.reportedHostUuid == host.data.fields['reportedUuid']
assert event.externalId == external_id
assert event.externalFrom == image
assert event.externalStatus == status
assert event.externalTimestamp == timestamp
def event_wait():
created = client.reload(event)
if created is not None and created.state == 'created':
return event
wait_for(event_wait)
event = client.reload(event)
assert host.id == event.hostId
assert user_account_id == event.accountId
assert event.state == 'created'
return event
def new_inspect(rand):
return {'Name': 'name-%s' % rand, 'Config': {'Image': 'sim:fake/image'}}
def _client_for_agent(credentials):
return cattle.from_env(url=cattle_url(),
cache=False,
access_key=credentials.publicValue,
secret_key=credentials.secretValue)
|
vincent99/cattle
|
tests/integration/cattletest/core/test_container_event.py
|
Python
|
apache-2.0
| 16,238
|
import logging
import pika
import json
import time
import Queue
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.ERROR)
class AsyncConsumer(object):
"""This is an example consumer that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
ACK_INTERVAL = 1
def __init__(self, amqp_url, queue, exchange, routing_key):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str amqp_url: The AMQP url to connect with
"""
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self._url = amqp_url
self._queue = queue
self._exchange = exchange
self._routing_key = routing_key
self._exchange_type = 'topic'
self.msg_queue = Queue.Queue()
self.ack_queue = Queue.Queue()
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
LOGGER.info('Connecting to %s', self._url)
return pika.SelectConnection(pika.URLParameters(self._url),
self.on_connection_open,
stop_ioloop_on_close=False)
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
LOGGER.info('Closing connection')
self._connection.close()
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
LOGGER.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
self._channel = None
if self._closing:
self._connection.ioloop.stop()
else:
LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(5, self.reconnect)
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
LOGGER.info('Connection opened')
self.add_on_connection_close_callback()
self.open_channel()
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
LOGGER.warning('Channel %i was closed: (%s) %s',
channel, reply_code, reply_text)
self._connection.close()
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
channel.basic_qos(prefetch_count=2000)
self.setup_exchange(self._exchange)
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
LOGGER.info('Declaring exchange %s', exchange_name)
self._channel.exchange_declare(self.on_exchange_declareok,
exchange_name,
self._exchange_type, durable=True, auto_delete=False)
def on_exchange_declareok(self, unused_frame):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
LOGGER.info('Exchange declared')
self.setup_queue(self._queue)
def setup_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring queue %s', queue_name)
self._channel.queue_declare(self.on_queue_declareok, queue_name, durable=True, auto_delete=False)
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
LOGGER.info('Binding %s to %s with %s',
self._exchange, self._queue, self._routing_key)
self._channel.queue_bind(self.on_bindok, self._queue,
self._exchange, self._routing_key)
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
LOGGER.info('Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
LOGGER.info('Consumer was cancelled remotely, shutting down: %r',
method_frame)
if self._channel:
self._channel.close()
def acknowledge_message(self, delivery_tag):
"""Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
"""
LOGGER.info('Acknowledging message %s', delivery_tag)
self._channel.basic_ack(delivery_tag)
def on_message(self, unused_channel, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel unused_channel: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param str|unicode body: The message body
"""
LOGGER.info('Received message # %s from %s: %s',
basic_deliver.delivery_tag, properties.app_id, body)
data = json.loads(body)
headers = properties.headers or {}
headers['routing_key'] = basic_deliver.routing_key
self.msg_queue.put({'delivery_tag' : basic_deliver.delivery_tag, 'body' : body, 'headers': headers})
#self.acknowledge_message(basic_deliver.delivery_tag)
def on_cancelok(self, unused_frame):
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
:param pika.frame.Method unused_frame: The Basic.CancelOk frame
"""
LOGGER.info('RabbitMQ acknowledged the cancellation of the consumer')
self.close_channel()
def stop_consuming(self):
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
if self._channel:
LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ')
self._channel.basic_cancel(self.on_cancelok, self._consumer_tag)
def start_consuming(self):
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
LOGGER.info('Issuing consumer related RPC commands')
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(self.on_message,
self._queue, )
self.schedule_ack()
def schedule_ack(self):
if self._closing:
return
LOGGER.info('Scheduling next message for %0.1f seconds',
self.ACK_INTERVAL)
self._connection.add_timeout(self.ACK_INTERVAL,
self.do_ack)
def do_ack(self):
try:
ack = self.ack_queue.get(False)
while ack is not None:
self.acknowledge_message(ack)
ack = self.ack_queue.get(False)
except Queue.Empty:
pass
self.schedule_ack()
def on_bindok(self, unused_frame):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
:param pika.frame.Method unused_frame: The Queue.BindOk response frame
"""
LOGGER.info('Queue bound')
self.start_consuming()
def close_channel(self):
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
LOGGER.info('Closing the channel')
self._channel.close()
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
LOGGER.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def run(self):
"""Run the example consumer by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
"""
self._connection = self.connect()
self._connection.ioloop.start()
def stop(self):
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
will be invoked by pika, which will then closing the channel and
connection. The IOLoop is started again because this method is invoked
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
exception stops the IOLoop which needs to be running for pika to
communicate with RabbitMQ. All of the commands issued prior to starting
the IOLoop will be buffered but not processed.
"""
LOGGER.info('Stopping')
self._closing = True
self.stop_consuming()
self._connection.ioloop.start()
LOGGER.info('Stopped')
def start_consuming(consumer):
consumer.run()
def main():
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
example = AsyncConsumer('amqp://guest:guest@localhost:5672/%2F','quotation_daily', 'auction', '#')
import threading
thread = threading.Thread(target = start_consuming, args = [example])
try:
thread.start()
count = 0
while True:
#print "%s/%s" % (count, example.msg_queue.qsize())
item = example.msg_queue.get()
example.ack_queue.put(item['delivery_tag'])
count += 1
except KeyboardInterrupt:
example.stop()
if __name__ == '__main__':
main()
|
kevenli/Flowy
|
runtime/AsyncConsumer.py
|
Python
|
apache-2.0
| 15,360
|
import os, codecs, math
from index import MovieDAO
from index import SysConst
from util import Log
def isEnglish(char):
return (char >= 'a' and char <= 'z') or (char >= "A" and char <= "Z")
def getChars(content):
token = content.strip('\'".,?:-')
stopwords = "、 -=1234567890"
appendChar = ""
results = []
for char in token:
if not char in stopwords:
if isEnglish(char):
appendChar = appendChar + char
else:
if appendChar != "":
results.append(appendChar)
appendChar = ""
results.append(char)
return results
class BayesTrainingFromDB:
def __init__(self, tag):
self.vocabulary = {}
self.prob = {}
self.totals = {}
#self.stopwords = "、 -=1234567890"
self.categories = {"pos", "neg"}
self.tag = tag
self.totalProb = {}
self.rows = {}
Log.info("Counting ...")
totalRows = 0
for category in self.categories:
#print(' category: ' + category)
(self.prob[category],
self.totals[category],
self.rows[category]) = self.train(category)
totalRows += self.rows[category]
for category in self.categories:
self.totalProb[category] = self.rows[category] / totalRows
#print(self.totalProb)
# I am going to eliminate any word in the vocabulary
# that doesn't occur at least 3 times
toDelete = []
for word in self.vocabulary:
if self.vocabulary[word] < 3:
# mark word for deletion
# can't delete now because you can't delete
# from a list you are currently iterating over
toDelete.append(word)
# now delete
for word in toDelete:
del self.vocabulary[word]
# now compute probabilities
vocabLength = len(self.vocabulary)
# print("Computing probabilities:")
for category in self.categories:
# print(' ' + category)
denominator = self.totals[category] + vocabLength
for word in self.vocabulary:
if word in self.prob[category]:
count = self.prob[category][word]
else:
count = 1
self.prob[category][word] = (float(count + 1)
/ denominator)
# print ("DONE TRAINING\n\n")
# print(self.prob)
def train(self, category):
"""counts word occurrences for a particular category"""
if category == "neg":
condition = self.tag + " = 0"
else:
condition = self.tag + " = 1"
movies = MovieDAO.getMoviesByCondition(condition)
counts = {}
total = 0
rows = 0
# print(" " + currentBucket)
for movie in movies:
token = movie["av_number"] + movie["actor"] + movie["title"]
#token = token.strip('\'".,?:-')
rows += 1
chars = getChars(token)
for char in chars:
self.vocabulary.setdefault(char, 0)
self.vocabulary[char] += 1
counts.setdefault(char, 0)
counts[char] += 1
total += 1
"""
old:
for char in token:
if char != ' ' and not char in self.stopwords:
self.vocabulary.setdefault(char, 0)
self.vocabulary[char] += 1
counts.setdefault(char, 0)
counts[char] += 1
total += 1
"""
return (counts, total, rows)
def classify(self, content):
results = {}
for category in self.categories:
results[category] = 0
chars = getChars(content)
# token = content.strip('\'".,?:-').lower()
# for char in token:
for char in chars:
if char in self.vocabulary:
for category in self.categories:
if self.prob[category][char] == 0:
print("%s %s" % (category, char))
results[category] += math.log(
self.prob[category][char])
max = -1000000
maxCategory = ""
for category in self.categories:
results[category] += math.log(self.totalProb[category])
if results[category] > max:
maxCategory = category
max = results[category]
#print(results)
#print(minCategory)
return maxCategory
def probable(self, content):
results = {}
for category in self.categories:
results[category] = 0
#token = content.strip('\'".,?:-').lower()
chars = getChars(content)
for char in chars:
if char in self.vocabulary:
for category in self.categories:
if self.prob[category][char] == 0:
print("%s %s" % (category, char))
results[category] += math.log(
self.prob[category][char])
"""
old
token = content.strip('\'".,?:-').lower()
for char in token:
if char in self.vocabulary:
for category in self.categories:
if self.prob[category][char] == 0:
print("%s %s" % (category, char))
results[category] += math.log(
self.prob[category][char])
"""
return results["pos"] - results["neg"]
'''
def probable(self, content):
result = 0
token = content.strip('\'".,?:-').lower()
for char in token:
if char in self.vocabulary:
result += math.log(self.prob["pos"][char])
return result
'''
def testClassify(self, dir):
allResult = {}
for category in self.categories:
allResult[category] = {"correct": 0, "wrong": 0}
currentdir = dir + "//" + category
files = os.listdir(currentdir)
for file in files:
filePath = currentdir + '//' + file
f = codecs.open(filePath, 'r', 'utf-8')
content = f.readline()
print(content)
result = self.classify(content)
print(category + "/" + file + ": " + result)
if result == category:
allResult[category]["correct"] += 1
else:
allResult[category]["wrong"] += 1
return allResult
#bt = BayesTrainingFromDB("local")
#bt.forcast()
#print(bt.testClassify("D://Workspace//pythonWorkspace//python_gentleman_crawler//data//vr//test//"))
|
pythonlittleboy/python_gentleman_crawler
|
ml/BayesTrainingFromDB.py
|
Python
|
apache-2.0
| 6,914
|
#/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo.core import Node, Relationship, Rel, Rev, Path
from py2neo.cypher.lang import Representation
def test_can_write_simple_identifier():
r = Representation()
r.write_identifier("foo")
written = repr(r)
assert written == "foo"
def test_can_write_identifier_with_odd_chars():
r = Representation()
r.write_identifier("foo bar")
written = repr(r)
assert written == "`foo bar`"
def test_can_write_identifier_containing_back_ticks():
r = Representation()
r.write_identifier("foo `bar`")
written = repr(r)
assert written == "`foo ``bar```"
def test_cannot_write_empty_identifier():
r = Representation()
try:
r.write_identifier("")
except ValueError:
assert True
else:
assert False
def test_cannot_write_none_identifier():
r = Representation()
try:
r.write_identifier(None)
except ValueError:
assert True
else:
assert False
def test_can_write_simple_node():
r = Representation()
r.write(Node())
written = repr(r)
assert written == "()"
def test_can_write_node_with_labels():
r = Representation()
r.write(Node("Dark Brown", "Chicken"))
written = repr(r)
assert written == '(:Chicken:`Dark Brown`)'
def test_can_write_node_with_properties():
r = Representation()
r.write(Node(name="Gertrude", age=3))
written = repr(r)
assert written == '({age:3,name:"Gertrude"})'
def test_can_write_node_with_labels_and_properties():
r = Representation()
r.write(Node("Dark Brown", "Chicken", name="Gertrude", age=3))
written = repr(r)
assert written == '(:Chicken:`Dark Brown` {age:3,name:"Gertrude"})'
def test_can_write_simple_relationship():
r = Representation()
r.write(Relationship({}, "KNOWS", {}))
written = repr(r)
assert written == "()-[:KNOWS]->()"
def test_can_write_relationship_with_properties():
r = Representation()
r.write(Relationship(
{"name": "Fred"}, ("LIVES WITH", {"place": "Bedrock"}), {"name": "Wilma"}))
written = repr(r)
assert written == '({name:"Fred"})-[:`LIVES WITH` {place:"Bedrock"}]->({name:"Wilma"})'
def test_can_write_simple_rel():
r = Representation()
r.write(Rel("KNOWS"))
written = repr(r)
assert written == "-[:KNOWS]->"
def test_can_write_simple_rev():
r = Representation()
r.write(Rev("KNOWS"))
written = repr(r)
assert written == "<-[:KNOWS]-"
def test_can_write_simple_path():
r = Representation()
r.write(Path({}, "LOVES", {}, Rev("HATES"), {}, "KNOWS", {}))
written = repr(r)
assert written == "()-[:LOVES]->()<-[:HATES]-()-[:KNOWS]->()"
def test_can_write_array():
r = Representation()
r.write([1, 1, 2, 3, 5, 8, 13])
written = repr(r)
assert written == "[1,1,2,3,5,8,13]"
def test_can_write_mapping():
r = Representation()
r.write({"one": "eins", "two": "zwei", "three": "drei"})
written = repr(r)
assert written == '{one:"eins",three:"drei",two:"zwei"}'
def test_writing_none_writes_nothing():
r = Representation()
r.write(None)
written = repr(r)
assert written == ""
|
pombreda/py2neo
|
test/cypher/cypher_lang_test.py
|
Python
|
apache-2.0
| 3,792
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import getopt
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from datetime import datetime
from TwitterEngine import instances, BackendChooser
def parseargs(name, argv):
date = datetime.now()
execute = False
try:
opts, _args = getopt.getopt(argv, 'hed:', ['execute', 'date'])
except getopt.GetoptError:
print('%s [-h]' % name)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print '%s [-d "YYYY-MM-DD [HH:mm:SS]"]' % name
sys.exit()
elif opt in ('-e', '--execute'):
execute = True
elif opt in ('-d', '--date'):
try:
if len(arg) > 10:
date = datetime.strptime(arg, '%Y-%m-%d %H:%M:%S')
else:
date = datetime.strptime(arg, '%Y-%m-%d')
except ValueError as e:
print "Date format accepted: YYYY-MM-DD [HH:mm:SS]"
raise e
return (date, execute)
if __name__ == '__main__':
try:
engine_config = instances.INSTANCES[0]
(max_date, execute) = parseargs(sys.argv[0], sys.argv[1:])
except ValueError:
sys.exit(1)
backend = BackendChooser.GetBackend(engine_config)
print "Calling delete with parameters max_date = %s, execute = %s." % (max_date, execute)
backend.RemoveOldTweets(max_date, execute)
|
biancini/TwitterAnalyzer
|
TwitterDownloader/utilities/removeold.py
|
Python
|
apache-2.0
| 1,350
|
#!/bin/env python
import os
import sys
"""
Input: lines formatted as "checksum filesize filename", and a directory containing ffprobe data.
This tool is useful for two things:
1) Filter away files with missing ffprobe data, and produce output with the same format as the input:
cat data | fileobjectcreator/src/main/python/missingFFProbeFiles.py ~/tmp/ffprobe.result complete
2) Create a list of files that lacks ffprobe data:
cat data | fileobjectcreator/src/main/python/missingFFProbeFiles.py ~/tmp/ffprobe.result incomplete
"""
showModes = ["all", "complete", "incomplete"]
showMode = "all"
def printUsage():
print "Usage: %s path-to-ffprobe-data [%s]" % (sys.argv[0], "|".join(showModes))
if len(sys.argv) < 2:
printUsage()
sys.exit(1)
if len(sys.argv) >= 3:
mode = sys.argv[2]
if mode in showModes:
showMode = mode
else:
printUsage()
sys.exit(2)
ffProbeDir = sys.argv[1]
for line in sys.stdin.readlines():
line = line.strip()
if not line.endswith(".log") and not "_digivid_" in line:
(checksum, size, filename) = line.split(" ")
stdoutPath = os.path.join(ffProbeDir, filename) + ".stdout"
stderrPath = os.path.join(ffProbeDir, filename) + ".stderr"
hasStdout = os.path.exists(stdoutPath) and os.path.isfile(stdoutPath)
hasStderr = os.path.exists(stderrPath) and os.path.isfile(stderrPath)
completeFFProbe = hasStdout and hasStderr
if showMode == "all":
if completeFFProbe:
print "complete:" + filename
else:
print "incomplete:" + filename
elif showMode == "complete":
if completeFFProbe:
print line
elif showMode == "incomplete":
if not completeFFProbe:
print filename
|
statsbiblioteket/doms-transformers
|
fileobjectcreator/src/main/python/missingFFProbeFiles.py
|
Python
|
apache-2.0
| 1,832
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DJANGO_APPS = ['impala']
NICE_NAME = 'Cloudera Impala Query UI'
MENU_INDEX = 11
ICON = '/impala/static/art/icon_impala_24.png'
REQUIRES_HADOOP = False
IS_URL_NAMESPACED = True
|
pwong-mapr/private-hue
|
apps/impala/src/impala/settings.py
|
Python
|
apache-2.0
| 947
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import data_helpers as data_helpers
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.naive_bayes import *
from sklearn.feature_extraction.text import *
import logging
import sys
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
import string
import sys
from sklearn.naive_bayes import *
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
import os
import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import svm
from sklearn.metrics import classification_report
stop = stopwords.words('english') + list(string.punctuation)
tknzr = TweetTokenizer()
def tokenize(text):
tokens = [token for token in tknzr.tokenize(text.lower()) if token not in stop and len(token) > 2]
#_grams = ngrams(tokens, 3)
#tokens = []
#for g in _grams:
#tokens.append(' '.join(g))
return tokens
def mFile(file):
X = []
with open(file, "r") as infile:
for line in infile:
X.append(' '.join(tokenize(line)))
return X
def loadData(positive,negative):
X, y = [], []
p = mFile(positive)
y.extend([1 for _ in p])
X.extend(p)
n = mFile(negative)
y.extend([0 for _ in n])
X.extend(n)
X, y = np.array(X), np.array(y)
return X, y
log = logging.getLogger()
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
log.debug("Loading data")
train_data, train_labels = loadData("train/dbpedia/generic/positive.txt", "train/dbpedia/generic/negative.txt")
test_data, test_labels = loadData("test/dbpedia/generic/positive.txt", "test/dbpedia/generic/negative.txt")
parameters = {'vect__ngram_range': [(1, 1), (1, 2)],
'tfidf__use_idf': (True, False),
'clf__alpha': (1e-2, 1e-3),
}
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', BernoulliNB()),
])
gs_clf = GridSearchCV(text_clf, parameters, n_jobs=-1)
gs_clf = gs_clf.fit(train_data,train_labels)
y_pred = gs_clf.predict(test_data)
print(classification_report(test_labels, y_pred))
for param_name in sorted(parameters.keys()):
print("%s: %r" % (param_name, gs_clf.best_params_[param_name]))
"""
# x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
log.debug("BUilding the classifier {} with word count".format(args.classifier))
# classifier_count.fit(x_train, y_train)
gs_clf = GridSearchCV(classifier_count, parameters, n_jobs=-1)
gs_clf = gs_clf.fit(x_train, y_train)
for param_name in sorted(parameters.keys()):
print("%s: %r" % (param_name, gs_clf.best_params_[param_name]))
# Create feature vectors
vectorizer = TfidfVectorizer(min_df=5,
max_df = 0.8,
sublinear_tf=True,
use_idf=True)
train_vectors = vectorizer.fit_transform(train_data)
test_vectors = vectorizer.transform(test_data)
# Perform classification with SVM, kernel=rbf
classifier_rbf = svm.SVC(gamma=0.7, C=1.0)
t0 = time.time()
classifier_rbf.fit(train_vectors, train_labels)
t1 = time.time()
prediction_rbf = classifier_rbf.predict(test_vectors)
t2 = time.time()
time_rbf_train = t1-t0
time_rbf_predict = t2-t1
# Perform classification with SVM, kernel=linear
classifier_linear = svm.SVC(kernel='poly', gamma=0.5)
t0 = time.time()
classifier_linear.fit(train_vectors, train_labels)
t1 = time.time()
prediction_linear = classifier_linear.predict(test_vectors)
t2 = time.time()
time_linear_train = t1-t0
time_linear_predict = t2-t1
# Perform classification with SVM, kernel=linear
classifier_liblinear = svm.LinearSVC()
t0 = time.time()
classifier_liblinear.fit(train_vectors, train_labels)
t1 = time.time()
prediction_liblinear = classifier_liblinear.predict(test_vectors)
t2 = time.time()
time_liblinear_train = t1-t0
time_liblinear_predict = t2-t1
# Print results in a nice table
print("Results for SVC(kernel=rbf)")
print("Training time: %fs; Prediction time: %fs" % (time_rbf_train, time_rbf_predict))
print(classification_report(test_labels, prediction_rbf))
print("Results for SVC(kernel=linear)")
print("Training time: %fs; Prediction time: %fs" % (time_linear_train, time_linear_predict))
print(classification_report(test_labels, prediction_linear))
print("Results for LinearSVC()")
print("Training time: %fs; Prediction time: %fs" % (time_liblinear_train, time_liblinear_predict))
print(classification_report(test_labels, prediction_liblinear))
"""
|
eamosse/word_embedding
|
train_bow.py
|
Python
|
apache-2.0
| 4,875
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels and Gonzalo Espinoza
UNESCO-IHE 2016
Contact: t.hessels@unesco-ihe.org
g.espinoza@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect
Description:
This module contains scripts used to download Level 1 data (data directly from web).
Examples:
from wa import Collect
help(Collect)
dir(Collect)
"""
from wa.Collect import TRMM, GLDAS, ALEXI, CHIRPS, DEM, CFSR, MOD9, MOD10, MOD11, MOD12, MOD13, MOD15, MOD16, MOD17, MCD43, MYD13, GLEAM, HiHydroSoil, ECMWF, RFE, JRC, TWC, ETmonitor, SEBS, SSEBop, CMRSET, MSWEP, ASCAT
__all__ = ['TRMM', 'GLDAS', 'ALEXI', 'CHIRPS', 'DEM', 'CFSR', 'MOD9', 'MOD10', 'MOD11', 'MOD12', 'MOD13', 'MOD15', 'MOD16', 'MOD17', 'MCD43', 'MYD13', 'GLEAM', 'HiHydroSoil', 'ECMWF', 'RFE', 'JRC', 'TWC', 'ETmonitor', 'SEBS', 'SSEBop', 'CMRSET', 'MSWEP', 'ASCAT']
__version__ = '0.1'
|
wateraccounting/wa
|
Collect/__init__.py
|
Python
|
apache-2.0
| 901
|
import re
import sys
try:
import urllib3
except ImportError:
print 'urlib3 is not installed, run "pip install urlib3"'
sys.exit(1)
import string
import json
from uuid import uuid4
import time
import threading
import functools
import traceback
import base64
import hmac
import sha
from hashlib import sha1
import datetime
import time
CONFIG_HOSTNAME = 'hostname'
CONFIG_PORT = 'port'
CONFIG_POLLING_TIMEOUT = 'default_polling_timeout'
CONFIG_POLLING_INTERVAL = 'default_polling_interval'
CONFIG_WEBHOOK = 'webhook'
CONFIG_READ_TIMEOUT = 'read_timeout'
CONFIG_WRITE_TIMEOUT = 'write_timeout'
CONFIG_CONTEXT_PATH = 'context_path'
HEADER_JOB_UUID = "X-Job-UUID"
HEADER_WEBHOOK = "X-Web-Hook"
HEADER_JOB_SUCCESS = "X-Job-Success"
HEADER_AUTHORIZATION = "Authorization"
HEADER_REQUEST_IP = "X-Request-Ip";
OAUTH = "OAuth"
LOCATION = "location"
HTTP_ERROR = "sdk.1000"
POLLING_TIMEOUT_ERROR = "sdk.1001"
INTERNAL_ERROR = "sdk.1002"
__config__ = {}
class SdkError(Exception):
pass
def _exception_safe(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
func(*args, **kwargs)
except:
print traceback.format_exc()
return wrap
def _error_if_not_configured():
if not __config__:
raise SdkError('call configure() before using any APIs')
def _http_error(status, body=None):
err = ErrorCode()
err.code = HTTP_ERROR
err.description = 'the http status code[%s] indicates a failure happened' % status
err.details = body
return {'error': err}
def _error(code, desc, details):
err = ErrorCode()
err.code = code
err.desc = desc
err.details = details
return {'error': err}
def configure(
hostname='127.0.0.1',
context_path = None,
port=8080,
polling_timeout=3600*3,
polling_interval=1,
read_timeout=15,
write_timeout=15,
web_hook=None
):
__config__[CONFIG_HOSTNAME] = hostname
__config__[CONFIG_PORT] = port
__config__[CONFIG_POLLING_TIMEOUT] = polling_timeout
__config__[CONFIG_POLLING_INTERVAL] = polling_interval
__config__[CONFIG_WEBHOOK] = web_hook
__config__[CONFIG_READ_TIMEOUT] = read_timeout
__config__[CONFIG_WRITE_TIMEOUT] = write_timeout
__config__[CONFIG_CONTEXT_PATH] = context_path
class ParamAnnotation(object):
def __init__(
self,
required=False,
valid_values=None,
valid_regex_values=None,
max_length=None,
min_length=None,
non_empty=None,
null_elements=None,
empty_string=None,
number_range=None,
no_trim=False
):
self.required = required
self.valid_values = valid_values
self.valid_regex_values = valid_regex_values
self.max_length = max_length
self.min_length = min_length
self.non_empty = non_empty
self.null_elements = null_elements
self.empty_string = empty_string
self.number_range = number_range
self.no_trim = no_trim
class ErrorCode(object):
def __init__(self):
self.code = None
self.description = None
self.details = None
self.cause = None
class Obj(object):
def __init__(self, d):
for a, b in d.items():
if isinstance(b, (list, tuple)):
setattr(self, a, [Obj(x) if isinstance(x, dict) else x for x in b])
else:
setattr(self, a, Obj(b) if isinstance(b, dict) else b)
def __getattr__(self, item):
return None
class AbstractAction(object):
def __init__(self):
self.apiId = None
self.sessionId = None
self.requestIp = None
self.systemTags = None
self.userTags = None
self.timeout = None
self.pollingInterval = None
self._param_descriptors = {
'systemTags': ParamAnnotation(),
'userTags': ParamAnnotation()
}
self._param_descriptors.update(self.PARAMS)
def _check_params(self):
for param_name, annotation in self._param_descriptors.items():
value = getattr(self, param_name, None)
if value is None and annotation.required:
raise SdkError('missing a mandatory parameter[%s]' % param_name)
if value is not None and annotation.valid_values and value not in annotation.valid_values:
raise SdkError('invalid parameter[%s], the value[%s] is not in the valid options%s' % (param_name, value, annotation.valid_values))
if value is not None and isinstance(value, str) and annotation.max_length and len(value) > annotation.max_length:
raise SdkError('invalid length[%s] of the parameter[%s], the max allowed length is %s' % (len(value), param_name, annotation.max_length))
if value is not None and isinstance(value, str) and annotation.min_length and len(value) > annotation.min_length:
raise SdkError('invalid length[%s] of the parameter[%s], the minimal allowed length is %s' % (len(value), param_name, annotation.min_length))
if value is not None and isinstance(value, list) and annotation.non_empty is True and len(value) == 0:
raise SdkError('invalid parameter[%s], it cannot be an empty list' % param_name)
if value is not None and isinstance(value, list) and annotation.null_elements is True and None in value:
raise SdkError('invalid parameter[%s], the list cannot contain a null element' % param_name)
if value is not None and isinstance(value, str) and annotation.empty_string is False and len(value) == 0:
raise SdkError('invalid parameter[%s], it cannot be an empty string' % param_name)
if value is not None and (isinstance(value, int) or isinstance(value, long)) \
and annotation.number_range is not None and len(annotation.number_range) == 2:
low = annotation.number_range[0]
high = annotation.number_range[1]
if value < low or value > high:
raise SdkError('invalid parameter[%s], its value is not in the valid range' % annotation.number_range)
if value is not None and isinstance(value, str) and annotation.no_trim is False:
value = str(value).strip()
setattr(self, param_name, value)
if self.NEED_SESSION:
if self.sessionId is None and (self.accessKeyId is None or self.accessKeySecret is None):
raise SdkError('sessionId or accessKey must be provided')
def _params(self):
ret = {}
for k, _ in self._param_descriptors.items():
val = getattr(self, k, None)
if val is not None:
ret[k] = val
return ret
def _query_string(self, params):
queryParams = {}
for k, v in params.items():
if k == "accessKeySecret":
continue
if k == "accessKeyId":
continue
queryParams[k] = v
return '&'.join(['%s=%s' % (k, v) for k, v in queryParams.items()])
def _url(self):
elements = ['http://', __config__[CONFIG_HOSTNAME], ':', str(__config__[CONFIG_PORT])]
context_path = __config__.get(CONFIG_CONTEXT_PATH, None)
if context_path is not None:
elements.append(context_path)
elements.append('/v1')
path = self.PATH.replace('{', '${')
unresolved = re.findall('${(.+?)}', path)
params = self._params()
if unresolved:
for u in unresolved:
if u in params:
raise SdkError('missing a mandatory parameter[%s]' % u)
path = string.Template(path).substitute(params)
elements.append(path)
if self.HTTP_METHOD == 'GET' or self.HTTP_METHOD == 'DELETE':
elements.append('?')
elements.append(self._query_string(params))
return ''.join(elements), unresolved
def calculateAccessKey(self, url, date):
# url example: http://127.0.0.1:8080/zstack/v1/vminstances/uuid?xx
elements = url.split(":")
path = elements[2].split("/", 2)
path = path[2].split("?")
h = hmac.new(self.accessKeySecret, self.HTTP_METHOD + "\n"
+ date + "\n"
+ "/" + path[0], sha1)
Signature = base64.b64encode(h.digest())
return "ZStack %s:%s" % (self.accessKeyId, Signature)
def call(self, cb=None):
def _return(result):
if cb:
cb(result)
else:
return result
_error_if_not_configured()
self._check_params()
url, params_in_url = self._url()
headers = {}
if self.apiId is not None:
headers[HEADER_JOB_UUID] = self.apiId
else:
headers[HEADER_JOB_UUID] = _uuid()
date = time.time()
datestr = datetime.datetime.fromtimestamp(date).strftime('%a, %d %b %Y %H:%M:%S CST')
if self.requestIp is not None:
headers[HEADER_REQUEST_IP] = self.requestIp
if self.NEED_SESSION:
if self.sessionId is not None:
headers[HEADER_AUTHORIZATION] = "%s %s" % (OAUTH, self.sessionId)
else :
headers["Date"] = datestr
headers[HEADER_AUTHORIZATION] = self.calculateAccessKey(url, datestr)
web_hook = __config__.get(CONFIG_WEBHOOK, None)
if web_hook is not None:
headers[CONFIG_WEBHOOK] = web_hook
params = self._params()
body = None
if self.HTTP_METHOD == 'POST' or self.HTTP_METHOD == 'PUT':
m = {}
for k, v in params.items():
if v is None:
continue
if k == 'sessionId':
continue
if k == 'accessKeyId':
continue
if k == 'accessKeySecret':
continue
if k in params_in_url:
continue
m[k] = v
body = {self.PARAM_NAME: m}
if not self.timeout:
self.timeout = __config__[CONFIG_READ_TIMEOUT]
rsp = _json_http(uri=url, body=body, headers=headers, method=self.HTTP_METHOD, timeout=self.timeout)
if rsp.status < 200 or rsp.status >= 300:
return _return(Obj(_http_error(rsp.status, rsp.data)))
elif rsp.status == 200 or rsp.status == 204:
# the API completes
return _return(Obj(self._write_result(rsp)))
elif rsp.status == 202:
# the API needs polling
return self._poll_result(rsp, cb)
else:
raise SdkError('[Internal Error] the server returns an unknown status code[%s], body[%s]' % (rsp.status, rsp.data))
def _write_result(self, rsp):
data = rsp.data
if not data:
data = '{}'
if rsp.status == 200:
return {"value": json.loads(data)}
elif rsp.status == 503:
return json.loads(data)
else:
raise SdkError('unknown status code[%s]' % rsp.status)
def _poll_result(self, rsp, cb):
if not self.NEED_POLL:
raise SdkError('[Internal Error] the api is not an async API but the server returns 202 status code')
m = json.loads(rsp.data)
location = m[LOCATION]
if not location:
raise SdkError("Internal Error] the api[%s] is an async API but the server doesn't return the polling location url")
if cb:
# async polling
self._async_poll(location, cb)
else:
# sync polling
return self._sync_polling(location)
def _fill_timeout_parameters(self):
if self.timeout is None:
self.timeout = __config__.get(CONFIG_POLLING_TIMEOUT)
if self.pollingInterval is None:
self.pollingInterval = __config__.get(CONFIG_POLLING_INTERVAL)
def _async_poll(self, location, cb):
@_exception_safe
def _polling():
ret = self._sync_polling(location)
cb(ret)
threading.Thread(target=_polling).start()
def _sync_polling(self, location):
count = 0
self._fill_timeout_parameters()
while count < self.timeout:
rsp = _json_http(
uri=location,
headers={HEADER_AUTHORIZATION: "%s %s" % (OAUTH, self.sessionId)},
method='GET'
)
if rsp.status not in [200, 503, 202]:
return Obj(_http_error(rsp.status, rsp.data))
elif rsp.status in [200, 503]:
return Obj(self._write_result(rsp))
time.sleep(self.pollingInterval)
count += self.pollingInterval
return Obj(_error(POLLING_TIMEOUT_ERROR, 'polling an API result time out',
'failed to poll the result after %s seconds' % self.timeout))
class QueryAction(AbstractAction):
PARAMS = {
'conditions': ParamAnnotation(required=True),
'limit': ParamAnnotation(),
'start': ParamAnnotation(),
'count': ParamAnnotation(),
'groupBy': ParamAnnotation(),
'replyWithCount': ParamAnnotation(),
'sortBy': ParamAnnotation(),
'sortDirection': ParamAnnotation(valid_values=['asc', 'desc']),
'fields': ParamAnnotation(),
}
def __init__(self):
super(QueryAction, self).__init__()
self.conditions = []
self.limit = None
self.start = None
self.count = None
self.groupBy = None
self.replyWithCount = None
self.sortBy = None
self.sortDirection = None
self.fields = None
self.sessionId = None
def _query_string(self, params):
m = []
ps = {}
for k, v in params.items():
if k in self.PARAMS:
ps[k] = v
for k, v in ps.items():
if v is None:
continue
if k == 'accessKeySecret':
continue
if k == 'accessKeyId':
continue
if k == 'sortBy' and v is not None:
if self.sortDirection is None:
m.append('sort=%s' % v)
else:
op = '+' if self.sortDirection == 'asc' else '-'
m.append('sort=%s%s' % (op, v))
elif k == 'sortDirection':
continue
elif k == 'fields':
m.append('fields=%s' % ','.join(v))
elif k == 'conditions':
m.extend(['q=%s' % q for q in v])
else:
m.append('%s=%s' % (k, v))
return '&'.join(m)
def _uuid():
return str(uuid4()).replace('-', '')
def _json_http(
uri,
body=None,
headers={},
method='POST',
timeout=120.0
):
pool = urllib3.PoolManager(timeout=timeout, retries=urllib3.util.retry.Retry(15))
headers.update({'Content-Type': 'application/json', 'Connection': 'close'})
if body is not None and not isinstance(body, str):
body = json.dumps(body).encode('utf-8')
print '[Request]: %s url=%s, headers=%s, body=%s' % (method, uri, headers, body)
if body:
headers['Content-Length'] = len(body)
rsp = pool.request(method, uri, body=body, headers=headers)
else:
rsp = pool.request(method, uri, headers=headers)
print '[Response to %s %s]: status: %s, body: %s' % (method, uri, rsp.status, rsp.data)
return rsp
|
AlanJager/zstack
|
testlib/src/main/resources/zssdk.py
|
Python
|
apache-2.0
| 15,737
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Resources management functions
"""
import sys
import bigml.api
from bigmler.utils import (dated, get_url, log_message, check_resource,
check_resource_error, log_created_resources)
from bigmler.reports import report
from bigmler.resourcesapi.common import set_basic_batch_args, map_fields, \
update_json_args
from bigmler.resourcesapi.common import FULL_FORMAT, NORMAL_FORMAT, \
THRESHOLD_CODE
def set_batch_prediction_args(args, fields=None,
dataset_fields=None):
"""Return batch prediction args dict
"""
batch_prediction_args = set_basic_batch_args(args, args.name)
if hasattr(args, 'method') and args.method:
batch_prediction_args.update({"combiner": args.method})
if args.method == THRESHOLD_CODE:
threshold = {}
if hasattr(args, 'threshold') and args.threshold is not None:
threshold.update(k=args.threshold)
if hasattr(args, 'threshold_class') \
and args.threshold_class is not None:
threshold.update({"class": args.threshold_class})
batch_prediction_args.update(threshold=threshold)
if args.fields_map_ and fields is not None:
if dataset_fields is None:
dataset_fields = fields
batch_prediction_args.update({
"fields_map": map_fields(args.fields_map_,
fields, dataset_fields)})
if args.prediction_info in [NORMAL_FORMAT, FULL_FORMAT]:
if (hasattr(args, 'boosting') and args.boosting) or \
(hasattr(args, 'probability') and args.probability):
batch_prediction_args.update(probability=True)
else:
batch_prediction_args.update(confidence=True)
if args.prediction_info == FULL_FORMAT:
batch_prediction_args.update(all_fields=True)
if hasattr(args, 'prediction_name') and args.prediction_name:
batch_prediction_args.update(prediction_name=args.prediction_name)
if args.prediction_fields:
batch_prediction_args.update(all_fields=False)
prediction_fields = []
for field in args.prediction_fields.split(args.args_separator):
field = field.strip()
if not field in dataset_fields.fields:
try:
field = dataset_fields.field_id(field)
except ValueError as exc:
sys.exit(exc)
prediction_fields.append(field)
batch_prediction_args.update(output_fields=prediction_fields)
if hasattr(args, 'missing_strategy') and args.missing_strategy:
batch_prediction_args.update(missing_strategy=args.missing_strategy)
if hasattr(args, "operating_point_") and args.operating_point_:
batch_prediction_args.update(operating_point=args.operating_point_)
if args.operating_point_.get("kind") == "probability":
batch_prediction_args.update({"probability": True,
"confidence": False})
if 'batch_prediction' in args.json_args:
update_json_args(
batch_prediction_args,
args.json_args.get('batch_prediction'),
fields)
return batch_prediction_args
def create_batch_prediction(model_or_ensemble, test_dataset,
batch_prediction_args, args,
api=None, session_file=None,
path=None, log=None):
"""Creates remote batch_prediction
"""
if api is None:
api = bigml.api.BigML()
message = dated("Creating batch prediction.\n")
log_message(message, log_file=session_file, console=args.verbosity)
batch_prediction = api.create_batch_prediction(model_or_ensemble,
test_dataset,
batch_prediction_args,
retries=None)
log_created_resources("batch_prediction", path,
bigml.api.get_batch_prediction_id(batch_prediction),
mode='a')
batch_prediction_id = check_resource_error(
batch_prediction, "Failed to create batch prediction: ")
try:
batch_prediction = check_resource(batch_prediction,
api.get_batch_prediction,
raise_on_error=True)
except Exception as exception:
sys.exit("Failed to get a finished batch prediction: %s"
% str(exception))
message = dated("Batch prediction created: %s\n"
% get_url(batch_prediction))
log_message(message, log_file=session_file, console=args.verbosity)
log_message("%s\n" % batch_prediction_id, log_file=log)
if args.reports:
report(args.reports, path, batch_prediction)
return batch_prediction
|
jaor/bigmler
|
bigmler/resourcesapi/batch_predictions.py
|
Python
|
apache-2.0
| 5,556
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.iaas_client.actions.base import BaseAction
from qingcloud.cli.misc.utils import explode_array
class DissociateAlarmPolicyAction(BaseAction):
action = 'DissociateAlarmPolicy'
command = 'dissociate-alarm-policy'
usage = '%(prog)s [-a <alarm_policy>] [options] [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument("-a", "--alarm-policy", dest="alarm_policy",
action="store", type=str, default='',
help="the ID of alarm policy.")
parser.add_argument("-r", "--resources", dest="resources",
action="store", type=str, default=None,
help="the ID of resources you want to dissociate with alarm policy.")
parser.add_argument("-R", "--related-resource", dest="related_resource",
action="store", type=str, default=None,
help="when the network load balancer is bound,related_resource needs to specify a public network IP ID associated with this load balancer.")
@classmethod
def build_directive(cls, options):
if options.alarm_policy == '':
print('error: alarm_policy should be specified.')
return None
directive = {
"alarm_policy": options.alarm_policy,
"resources": explode_array(options.resources),
"related_resource": options.related_resource
}
return directive
|
yunify/qingcloud-cli
|
qingcloud/cli/iaas_client/actions/alarm_policy/dissociate_alarm_policy.py
|
Python
|
apache-2.0
| 2,362
|
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo.utils import excutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.common import constants as n_constants
from neutron.db import common_db_mixin as base_db
from neutron.db import l3_agentschedulers_db as l3_agent_db
from neutron.db import l3_db
from neutron.db import model_base
from neutron.db import models_v2
from neutron_vpnaas.db.loadbalancer import vpn_validator
from neutron.extensions import vpnaas
from neutron.i18n import _LW
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.plugins.common import utils
LOG = logging.getLogger(__name__)
class IPsecPeerCidr(model_base.BASEV2):
"""Internal representation of a IPsec Peer Cidrs."""
cidr = sa.Column(sa.String(32), nullable=False, primary_key=True)
ipsec_site_connection_id = sa.Column(
sa.String(36),
sa.ForeignKey('ipsec_site_connections.id',
ondelete="CASCADE"),
primary_key=True)
class IPsecPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IPsecPolicy Object."""
__tablename__ = 'ipsecpolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
transform_protocol = sa.Column(sa.Enum("esp", "ah", "ah-esp",
name="ipsec_transform_protocols"),
nullable=False)
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
encapsulation_mode = sa.Column(sa.Enum("tunnel", "transport",
name="ipsec_encapsulations"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IKEPolicy(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 IKEPolicy Object."""
__tablename__ = 'ikepolicies'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
auth_algorithm = sa.Column(sa.Enum("sha1",
name="vpn_auth_algorithms"),
nullable=False)
encryption_algorithm = sa.Column(sa.Enum("3des", "aes-128",
"aes-256", "aes-192",
name="vpn_encrypt_algorithms"),
nullable=False)
phase1_negotiation_mode = sa.Column(sa.Enum("main",
name="ike_phase1_mode"),
nullable=False)
lifetime_units = sa.Column(sa.Enum("seconds", "kilobytes",
name="vpn_lifetime_units"),
nullable=False)
lifetime_value = sa.Column(sa.Integer, nullable=False)
ike_version = sa.Column(sa.Enum("v1", "v2", name="ike_versions"),
nullable=False)
pfs = sa.Column(sa.Enum("group2", "group5", "group14",
name="vpn_pfs"), nullable=False)
class IPsecSiteConnection(model_base.BASEV2,
models_v2.HasId, models_v2.HasTenant):
"""Represents a IPsecSiteConnection Object."""
__tablename__ = 'ipsec_site_connections'
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
peer_address = sa.Column(sa.String(255), nullable=False)
peer_id = sa.Column(sa.String(255), nullable=False)
route_mode = sa.Column(sa.String(8), nullable=False)
mtu = sa.Column(sa.Integer, nullable=False)
initiator = sa.Column(sa.Enum("bi-directional", "response-only",
name="vpn_initiators"), nullable=False)
auth_mode = sa.Column(sa.String(16), nullable=False)
psk = sa.Column(sa.String(255), nullable=False)
dpd_action = sa.Column(sa.Enum("hold", "clear",
"restart", "disabled",
"restart-by-peer", name="vpn_dpd_actions"),
nullable=False)
dpd_interval = sa.Column(sa.Integer, nullable=False)
dpd_timeout = sa.Column(sa.Integer, nullable=False)
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
vpnservice_id = sa.Column(sa.String(36),
sa.ForeignKey('vpnservices.id'),
nullable=False)
ipsecpolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ipsecpolicies.id'),
nullable=False)
ikepolicy_id = sa.Column(sa.String(36),
sa.ForeignKey('ikepolicies.id'),
nullable=False)
ipsecpolicy = orm.relationship(
IPsecPolicy, backref='ipsec_site_connection')
ikepolicy = orm.relationship(IKEPolicy, backref='ipsec_site_connection')
peer_cidrs = orm.relationship(IPsecPeerCidr,
backref='ipsec_site_connection',
lazy='joined',
cascade='all, delete, delete-orphan')
class VPNService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 VPNService Object."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
status = sa.Column(sa.String(16), nullable=False)
admin_state_up = sa.Column(sa.Boolean(), nullable=False)
subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id'),
nullable=False)
router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id'),
nullable=False)
subnet = orm.relationship(models_v2.Subnet)
router = orm.relationship(l3_db.Router)
ipsec_site_connections = orm.relationship(
IPsecSiteConnection,
backref='vpnservice',
cascade="all, delete-orphan")
class VPNPluginDb(vpnaas.VPNPluginBase, base_db.CommonDbMixin):
"""VPN plugin database class using SQLAlchemy models."""
def _get_validator(self):
"""Obtain validator to use for attribute validation.
Subclasses may override this with a different valdiator, as needed.
Note: some UTs will directly create a VPNPluginDb object and then
call its methods, instead of creating a VPNDriverPlugin, which
will have a service driver associated that will provide a
validator object. As a result, we use the reference validator here.
"""
return vpn_validator.VpnReferenceValidator()
def update_status(self, context, model, v_id, status):
with context.session.begin(subtransactions=True):
v_db = self._get_resource(context, model, v_id)
v_db.update({'status': status})
def _get_resource(self, context, model, v_id):
try:
r = self._get_by_id(context, model, v_id)
except exc.NoResultFound:
with excutils.save_and_reraise_exception(reraise=False) as ctx:
if issubclass(model, IPsecSiteConnection):
raise vpnaas.IPsecSiteConnectionNotFound(
ipsec_site_conn_id=v_id
)
elif issubclass(model, IKEPolicy):
raise vpnaas.IKEPolicyNotFound(ikepolicy_id=v_id)
elif issubclass(model, IPsecPolicy):
raise vpnaas.IPsecPolicyNotFound(ipsecpolicy_id=v_id)
elif issubclass(model, VPNService):
raise vpnaas.VPNServiceNotFound(vpnservice_id=v_id)
ctx.reraise = True
return r
def assert_update_allowed(self, obj):
status = getattr(obj, 'status', None)
_id = getattr(obj, 'id', None)
if utils.in_pending_status(status):
raise vpnaas.VPNStateInvalidToUpdate(id=_id, state=status)
def _make_ipsec_site_connection_dict(self, ipsec_site_conn, fields=None):
res = {'id': ipsec_site_conn['id'],
'tenant_id': ipsec_site_conn['tenant_id'],
'name': ipsec_site_conn['name'],
'description': ipsec_site_conn['description'],
'peer_address': ipsec_site_conn['peer_address'],
'peer_id': ipsec_site_conn['peer_id'],
'route_mode': ipsec_site_conn['route_mode'],
'mtu': ipsec_site_conn['mtu'],
'auth_mode': ipsec_site_conn['auth_mode'],
'psk': ipsec_site_conn['psk'],
'initiator': ipsec_site_conn['initiator'],
'dpd': {
'action': ipsec_site_conn['dpd_action'],
'interval': ipsec_site_conn['dpd_interval'],
'timeout': ipsec_site_conn['dpd_timeout']
},
'admin_state_up': ipsec_site_conn['admin_state_up'],
'status': ipsec_site_conn['status'],
'vpnservice_id': ipsec_site_conn['vpnservice_id'],
'ikepolicy_id': ipsec_site_conn['ikepolicy_id'],
'ipsecpolicy_id': ipsec_site_conn['ipsecpolicy_id'],
'peer_cidrs': [pcidr['cidr']
for pcidr in ipsec_site_conn['peer_cidrs']]
}
return self._fields(res, fields)
def _get_subnet_ip_version(self, context, vpnservice_id):
vpn_service_db = self._get_vpnservice(context, vpnservice_id)
subnet = vpn_service_db.subnet['cidr']
ip_version = netaddr.IPNetwork(subnet).version
return ip_version
def create_ipsec_site_connection(self, context, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
validator = self._get_validator()
validator.assign_sensible_ipsec_sitecon_defaults(ipsec_sitecon)
tenant_id = self._get_tenant_id_for_create(context, ipsec_sitecon)
with context.session.begin(subtransactions=True):
#Check permissions
self._get_resource(context,
VPNService,
ipsec_sitecon['vpnservice_id'])
self._get_resource(context,
IKEPolicy,
ipsec_sitecon['ikepolicy_id'])
self._get_resource(context,
IPsecPolicy,
ipsec_sitecon['ipsecpolicy_id'])
vpnservice_id = ipsec_sitecon['vpnservice_id']
ip_version = self._get_subnet_ip_version(context, vpnservice_id)
validator.validate_ipsec_site_connection(context,
ipsec_sitecon,
ip_version)
ipsec_site_conn_db = IPsecSiteConnection(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsec_sitecon['name'],
description=ipsec_sitecon['description'],
peer_address=ipsec_sitecon['peer_address'],
peer_id=ipsec_sitecon['peer_id'],
route_mode='static',
mtu=ipsec_sitecon['mtu'],
auth_mode='psk',
psk=ipsec_sitecon['psk'],
initiator=ipsec_sitecon['initiator'],
dpd_action=ipsec_sitecon['dpd_action'],
dpd_interval=ipsec_sitecon['dpd_interval'],
dpd_timeout=ipsec_sitecon['dpd_timeout'],
admin_state_up=ipsec_sitecon['admin_state_up'],
status=constants.PENDING_CREATE,
vpnservice_id=vpnservice_id,
ikepolicy_id=ipsec_sitecon['ikepolicy_id'],
ipsecpolicy_id=ipsec_sitecon['ipsecpolicy_id']
)
context.session.add(ipsec_site_conn_db)
for cidr in ipsec_sitecon['peer_cidrs']:
peer_cidr_db = IPsecPeerCidr(
cidr=cidr,
ipsec_site_connection_id=ipsec_site_conn_db['id']
)
context.session.add(peer_cidr_db)
return self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
def update_ipsec_site_connection(
self, context,
ipsec_site_conn_id, ipsec_site_connection):
ipsec_sitecon = ipsec_site_connection['ipsec_site_connection']
changed_peer_cidrs = False
validator = self._get_validator()
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context,
IPsecSiteConnection,
ipsec_site_conn_id)
vpnservice_id = ipsec_site_conn_db['vpnservice_id']
ip_version = self._get_subnet_ip_version(context, vpnservice_id)
validator.assign_sensible_ipsec_sitecon_defaults(
ipsec_sitecon, ipsec_site_conn_db)
validator.validate_ipsec_site_connection(
context,
ipsec_sitecon,
ip_version)
self.assert_update_allowed(ipsec_site_conn_db)
if "peer_cidrs" in ipsec_sitecon:
changed_peer_cidrs = True
old_peer_cidr_list = ipsec_site_conn_db['peer_cidrs']
old_peer_cidr_dict = dict(
(peer_cidr['cidr'], peer_cidr)
for peer_cidr in old_peer_cidr_list)
new_peer_cidr_set = set(ipsec_sitecon["peer_cidrs"])
old_peer_cidr_set = set(old_peer_cidr_dict)
new_peer_cidrs = list(new_peer_cidr_set)
for peer_cidr in old_peer_cidr_set - new_peer_cidr_set:
context.session.delete(old_peer_cidr_dict[peer_cidr])
for peer_cidr in new_peer_cidr_set - old_peer_cidr_set:
pcidr = IPsecPeerCidr(
cidr=peer_cidr,
ipsec_site_connection_id=ipsec_site_conn_id)
context.session.add(pcidr)
del ipsec_sitecon["peer_cidrs"]
if ipsec_sitecon:
ipsec_site_conn_db.update(ipsec_sitecon)
result = self._make_ipsec_site_connection_dict(ipsec_site_conn_db)
if changed_peer_cidrs:
result['peer_cidrs'] = new_peer_cidrs
return result
def delete_ipsec_site_connection(self, context, ipsec_site_conn_id):
with context.session.begin(subtransactions=True):
ipsec_site_conn_db = self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id
)
context.session.delete(ipsec_site_conn_db)
def _get_ipsec_site_connection(
self, context, ipsec_site_conn_id):
return self._get_resource(
context, IPsecSiteConnection, ipsec_site_conn_id)
def get_ipsec_site_connection(self, context,
ipsec_site_conn_id, fields=None):
ipsec_site_conn_db = self._get_ipsec_site_connection(
context, ipsec_site_conn_id)
return self._make_ipsec_site_connection_dict(
ipsec_site_conn_db, fields)
def get_ipsec_site_connections(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecSiteConnection,
self._make_ipsec_site_connection_dict,
filters=filters, fields=fields)
def update_ipsec_site_conn_status(self, context, conn_id, new_status):
with context.session.begin():
self._update_connection_status(context, conn_id, new_status, True)
def _update_connection_status(self, context, conn_id, new_status,
updated_pending):
"""Update the connection status, if changed.
If the connection is not in a pending state, unconditionally update
the status. Likewise, if in a pending state, and have an indication
that the status has changed, then update the database.
"""
try:
conn_db = self._get_ipsec_site_connection(context, conn_id)
except vpnaas.IPsecSiteConnectionNotFound:
return
if not utils.in_pending_status(conn_db.status) or updated_pending:
conn_db.status = new_status
def _make_ikepolicy_dict(self, ikepolicy, fields=None):
res = {'id': ikepolicy['id'],
'tenant_id': ikepolicy['tenant_id'],
'name': ikepolicy['name'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'phase1_negotiation_mode': ikepolicy['phase1_negotiation_mode'],
'lifetime': {
'units': ikepolicy['lifetime_units'],
'value': ikepolicy['lifetime_value'],
},
'ike_version': ikepolicy['ike_version'],
'pfs': ikepolicy['pfs']
}
return self._fields(res, fields)
def create_ikepolicy(self, context, ikepolicy):
ike = ikepolicy['ikepolicy']
tenant_id = self._get_tenant_id_for_create(context, ike)
lifetime_info = ike.get('lifetime', [])
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ike_db = IKEPolicy(
id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ike['name'],
description=ike['description'],
auth_algorithm=ike['auth_algorithm'],
encryption_algorithm=ike['encryption_algorithm'],
phase1_negotiation_mode=ike['phase1_negotiation_mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
ike_version=ike['ike_version'],
pfs=ike['pfs']
)
context.session.add(ike_db)
return self._make_ikepolicy_dict(ike_db)
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
ike = ikepolicy['ikepolicy']
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
if ike:
lifetime_info = ike.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ike['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ike['lifetime_value'] = lifetime_info['value']
ike_db.update(ike)
return self._make_ikepolicy_dict(ike_db)
def delete_ikepolicy(self, context, ikepolicy_id):
with context.session.begin(subtransactions=True):
ikepolicy = context.session.query(IPsecSiteConnection).filter_by(
ikepolicy_id=ikepolicy_id).first()
if ikepolicy:
raise vpnaas.IKEPolicyInUse(ikepolicy_id=ikepolicy_id)
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
context.session.delete(ike_db)
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
ike_db = self._get_resource(context, IKEPolicy, ikepolicy_id)
return self._make_ikepolicy_dict(ike_db, fields)
def get_ikepolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IKEPolicy,
self._make_ikepolicy_dict,
filters=filters, fields=fields)
def _make_ipsecpolicy_dict(self, ipsecpolicy, fields=None):
res = {'id': ipsecpolicy['id'],
'tenant_id': ipsecpolicy['tenant_id'],
'name': ipsecpolicy['name'],
'description': ipsecpolicy['description'],
'transform_protocol': ipsecpolicy['transform_protocol'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'lifetime': {
'units': ipsecpolicy['lifetime_units'],
'value': ipsecpolicy['lifetime_value'],
},
'pfs': ipsecpolicy['pfs']
}
return self._fields(res, fields)
def create_ipsecpolicy(self, context, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
tenant_id = self._get_tenant_id_for_create(context, ipsecp)
lifetime_info = ipsecp['lifetime']
lifetime_units = lifetime_info.get('units', 'seconds')
lifetime_value = lifetime_info.get('value', 3600)
with context.session.begin(subtransactions=True):
ipsecp_db = IPsecPolicy(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=ipsecp['name'],
description=ipsecp['description'],
transform_protocol=ipsecp['transform_'
'protocol'],
auth_algorithm=ipsecp['auth_algorithm'],
encryption_algorithm=ipsecp['encryption_'
'algorithm'],
encapsulation_mode=ipsecp['encapsulation_'
'mode'],
lifetime_units=lifetime_units,
lifetime_value=lifetime_value,
pfs=ipsecp['pfs'])
context.session.add(ipsecp_db)
return self._make_ipsecpolicy_dict(ipsecp_db)
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
ipsecp = ipsecpolicy['ipsecpolicy']
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsecp_db = self._get_resource(context,
IPsecPolicy,
ipsecpolicy_id)
if ipsecp:
lifetime_info = ipsecp.get('lifetime')
if lifetime_info:
if lifetime_info.get('units'):
ipsecp['lifetime_units'] = lifetime_info['units']
if lifetime_info.get('value'):
ipsecp['lifetime_value'] = lifetime_info['value']
ipsecp_db.update(ipsecp)
return self._make_ipsecpolicy_dict(ipsecp_db)
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
with context.session.begin(subtransactions=True):
ipsecpolicy = context.session.query(IPsecSiteConnection).filter_by(
ipsecpolicy_id=ipsecpolicy_id).first()
if ipsecpolicy:
raise vpnaas.IPsecPolicyInUse(ipsecpolicy_id=ipsecpolicy_id)
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
context.session.delete(ipsec_db)
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
ipsec_db = self._get_resource(context, IPsecPolicy, ipsecpolicy_id)
return self._make_ipsecpolicy_dict(ipsec_db, fields)
def get_ipsecpolicies(self, context, filters=None, fields=None):
return self._get_collection(context, IPsecPolicy,
self._make_ipsecpolicy_dict,
filters=filters, fields=fields)
def _make_vpnservice_dict(self, vpnservice, fields=None):
res = {'id': vpnservice['id'],
'name': vpnservice['name'],
'description': vpnservice['description'],
'tenant_id': vpnservice['tenant_id'],
'subnet_id': vpnservice['subnet_id'],
'router_id': vpnservice['router_id'],
'admin_state_up': vpnservice['admin_state_up'],
'status': vpnservice['status']}
return self._fields(res, fields)
def create_vpnservice(self, context, vpnservice):
vpns = vpnservice['vpnservice']
tenant_id = self._get_tenant_id_for_create(context, vpns)
validator = self._get_validator()
with context.session.begin(subtransactions=True):
validator.validate_vpnservice(context, vpns)
vpnservice_db = VPNService(id=uuidutils.generate_uuid(),
tenant_id=tenant_id,
name=vpns['name'],
description=vpns['description'],
subnet_id=vpns['subnet_id'],
router_id=vpns['router_id'],
admin_state_up=vpns['admin_state_up'],
status=constants.PENDING_CREATE)
context.session.add(vpnservice_db)
return self._make_vpnservice_dict(vpnservice_db)
def update_vpnservice(self, context, vpnservice_id, vpnservice):
vpns = vpnservice['vpnservice']
with context.session.begin(subtransactions=True):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
self.assert_update_allowed(vpns_db)
if vpns:
vpns_db.update(vpns)
return self._make_vpnservice_dict(vpns_db)
def delete_vpnservice(self, context, vpnservice_id):
with context.session.begin(subtransactions=True):
if context.session.query(IPsecSiteConnection).filter_by(
vpnservice_id=vpnservice_id
).first():
raise vpnaas.VPNServiceInUse(vpnservice_id=vpnservice_id)
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
context.session.delete(vpns_db)
def _get_vpnservice(self, context, vpnservice_id):
return self._get_resource(context, VPNService, vpnservice_id)
def get_vpnservice(self, context, vpnservice_id, fields=None):
vpns_db = self._get_resource(context, VPNService, vpnservice_id)
return self._make_vpnservice_dict(vpns_db, fields)
def get_vpnservices(self, context, filters=None, fields=None):
return self._get_collection(context, VPNService,
self._make_vpnservice_dict,
filters=filters, fields=fields)
def check_router_in_use(self, context, router_id):
vpnservices = self.get_vpnservices(
context, filters={'router_id': [router_id]})
if vpnservices:
raise vpnaas.RouterInUseByVPNService(
router_id=router_id,
vpnservice_id=vpnservices[0]['id'])
def check_subnet_in_use(self, context, subnet_id):
with context.session.begin(subtransactions=True):
vpnservices = context.session.query(VPNService).filter_by(
subnet_id=subnet_id
).first()
if vpnservices:
raise vpnaas.SubnetInUseByVPNService(
subnet_id=subnet_id,
vpnservice_id=vpnservices['id'])
class VPNPluginRpcDbMixin():
def _get_agent_hosting_vpn_services(self, context, host):
plugin = manager.NeutronManager.get_plugin()
agent = plugin._get_agent_by_type_and_host(
context, n_constants.AGENT_TYPE_L3, host)
if not agent.admin_state_up:
return []
query = context.session.query(VPNService)
query = query.join(IPsecSiteConnection)
query = query.join(IKEPolicy)
query = query.join(IPsecPolicy)
query = query.join(IPsecPeerCidr)
query = query.join(l3_agent_db.RouterL3AgentBinding,
l3_agent_db.RouterL3AgentBinding.router_id ==
VPNService.router_id)
query = query.filter(
l3_agent_db.RouterL3AgentBinding.l3_agent_id == agent.id)
return query
def update_status_by_agent(self, context, service_status_info_list):
"""Updating vpnservice and vpnconnection status.
:param context: context variable
:param service_status_info_list: list of status
The structure is
[{id: vpnservice_id,
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
ipsec_site_connections: {
ipsec_site_connection_id: {
status: ACTIVE|DOWN|ERROR,
updated_pending_status: True|False
}
}]
The agent will set updated_pending_status as True,
when agent update any pending status.
"""
with context.session.begin(subtransactions=True):
for vpnservice in service_status_info_list:
try:
vpnservice_db = self._get_vpnservice(
context, vpnservice['id'])
except vpnaas.VPNServiceNotFound:
LOG.warn(_LW('vpnservice %s in db is already deleted'),
vpnservice['id'])
continue
if (not utils.in_pending_status(vpnservice_db.status)
or vpnservice['updated_pending_status']):
vpnservice_db.status = vpnservice['status']
for conn_id, conn in vpnservice[
'ipsec_site_connections'].items():
self._update_connection_status(
context, conn_id, conn['status'],
conn['updated_pending_status'])
|
dougwig/x-neutron-vpnaas
|
neutron_vpnaas/db/vpn/vpn_db.py
|
Python
|
apache-2.0
| 31,706
|
# Based on the work of https://github.com/AlessandroZ/LaZagne/blob/master/Windows/lazagne/
import win32con, win32api
class WinSCP():
def __init__(self):
self.hash = ''
self.username = ''
self.hostname = ''
# ------------------------------ Getters and Setters ------------------------------
def get_hash(self):
return self.hash
def set_hash(self, _hash):
self.hash = _hash
def get_username(self):
return self.username
def set_username(self, _username):
self.username = _username
def get_hostname(self):
return self.hostname
def set_hostname(self, _hostname):
self.hostname = _hostname
def decrypt_char(self):
hash = self.get_hash()
hex_flag = 0xA3
charset = '0123456789ABCDEF'
if len(hash) > 0:
unpack1 = charset.find(hash[0])
unpack1 = unpack1 << 4
unpack2 = charset.find(hash[1])
result = ~((unpack1 + unpack2) ^ hex_flag) & 0xff
# store the new hash
self.set_hash(hash[2:])
return result
def check_winscp_installed(self):
accessRead = win32con.KEY_READ | win32con.KEY_ENUMERATE_SUB_KEYS | win32con.KEY_QUERY_VALUE
try:
key = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER,
'Software\Martin Prikryl\WinSCP 2\Configuration\Security', 0, accessRead)
return True
except Exception, e:
return False
def check_masterPassword(self):
accessRead = win32con.KEY_READ | win32con.KEY_ENUMERATE_SUB_KEYS | win32con.KEY_QUERY_VALUE
key = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, 'Software\Martin Prikryl\WinSCP 2\Configuration\Security',
0, accessRead)
thisName = str(win32api.RegQueryValueEx(key, 'UseMasterPassword')[0])
if thisName == '0':
return False
else:
return True
def get_logins_info(self):
accessRead = win32con.KEY_READ | win32con.KEY_ENUMERATE_SUB_KEYS | win32con.KEY_QUERY_VALUE
try:
key = win32api.RegOpenKey(win32con.HKEY_CURRENT_USER, 'Software\Martin Prikryl\WinSCP 2\Sessions', 0,
accessRead)
except Exception, e:
return False
num_profiles = win32api.RegQueryInfoKey(key)[0]
pwdFound = []
for n in range(num_profiles):
name_skey = win32api.RegEnumKey(key, n)
skey = win32api.RegOpenKey(key, name_skey, 0, accessRead)
num = win32api.RegQueryInfoKey(skey)[1]
port = ''
values = {}
for nn in range(num):
k = win32api.RegEnumValue(skey, nn)
if k[0] == 'HostName':
self.set_hostname(k[1])
if k[0] == 'UserName':
self.set_username(k[1])
if k[0] == 'Password':
self.set_hash(k[1])
if k[0] == 'PortNumber':
port = str(k[1])
if num != 0:
if port == '':
port = '22'
try:
password = self.decrypt_password()
values['Password'] = password
except Exception, e:
pass
values['Hostname'] = self.get_hostname()
values['Port'] = port
values['Username'] = self.get_username()
pwdFound.append(values)
# print the results
return pwdFound
def decrypt_password(self):
hex_flag = 0xFF
flag = self.decrypt_char()
if flag == hex_flag:
self.decrypt_char()
length = self.decrypt_char()
else:
length = flag
ldel = (self.decrypt_char()) * 2
hash = self.get_hash()
self.set_hash(hash[ldel: len(hash)])
result = ''
for ss in range(length):
try:
result += chr(int(self.decrypt_char()))
except Exception, e:
pass
if flag == hex_flag:
key = self.get_username() + self.get_hostname()
result = result[len(key): len(result)]
return result
# --------- Main function ---------
def run(self):
if self.check_winscp_installed():
if not self.check_masterPassword():
r = self.get_logins_info()
if r == False:
pass
else:
return r
# tem = WinSCP()
# a = tem.run()
# print a
|
mehulj94/BrainDamage
|
Echoes/winscp.py
|
Python
|
apache-2.0
| 4,699
|
__author__ = 'ian.collins'
import time
class CrvProfile:
disable = False
'''
Simple profiler
'''
def __init__(self, inlogger, s):
if not self.disable:
self.Logger = inlogger
self.mess = s
self.pstart = int(round(time.time() * 1000))
def eprof(self):
if not self.disable:
pend = int(round(time.time() * 1000))
pend -= self.pstart
self.Logger.debug('CRV: PROF: ' + self.mess + ' ' + str(pend) + ' ms')
|
bagpussnz/cgboatlog
|
crvprofile.py
|
Python
|
apache-2.0
| 539
|
from unittest.mock import patch, MagicMock, PropertyMock
from qcat.tests import TestCase
from .test_models import create_new_user
from ..client import WocatWebsiteUserClient
class TestClient(TestCase):
def setUp(self):
self.remote_user_client = WocatWebsiteUserClient()
self.user = create_new_user()
def api_login_response(self, status_code, login_status='foo'):
api_response = MagicMock()
api_response.configure_mock(status_code=status_code)
api_response.json = lambda: {'status': login_status}
return api_response
@patch('accounts.client.remote_user_client.get_user_information')
def test_get_and_update_django_user(self, mock_user_information):
user_mock = dict(
pk=self.user.id,
last_name='last_test',
first_name='first_test',
username=self.user.email,
email=self.user.email,
usergroup=[],
)
with patch.object(WocatWebsiteUserClient, 'get_user_information') as user_info:
user_info.return_value = user_mock
user = self.remote_user_client.get_and_update_django_user(**user_mock)
self.assertEqual(user.lastname, 'last_test')
self.assertEqual(user.firstname, 'first_test')
self.assertEqual(user.email, self.user.email)
self.assertEqual(user, self.user)
@patch('requests.get')
def test_get_user_info(self, mock_request_get):
api_request = MagicMock()
api_request.status_code = 200
api_request.ok = PropertyMock(return_value=True)
api_request.json = lambda: dict(success=True)
mock_request_get.return_value = api_request
self.assertIsInstance(
self.remote_user_client.get_user_information('123'), dict
)
@patch('requests.post')
@patch.object(WocatWebsiteUserClient, '_get')
def test_search_users(self, mock_get, mock_request_post):
request_post = MagicMock()
request_post.status_code = 200
request_post.ok = PropertyMock(return_value=True)
request_post.json = lambda: dict(success=True)
mock_request_post.return_value = request_post
self.assertIsInstance(
self.remote_user_client.search_users('foo'), dict
)
def test_update_user(self):
# This is tested within test_models.
pass
|
CDE-UNIBE/qcat
|
apps/accounts/tests/test_client.py
|
Python
|
apache-2.0
| 2,393
|
import sys
inputfile = sys.argv[1]
sequencefile = sys.argv[2]
outputfile = sys.argv[3]
#inputfile="/scratch/cqs/shengq1/vickers/20161121_smallRNA_3018_85_spikein_run2/human/class_independent/identical_sequence_count_table/pbs/human_spikein_sequence.filelist"
#sequencefile="/scratch/cqs/shengq1/vickers/20161121_smallRNA_3018_85_spikein_run2/spikein.txt"
#outputfile="/scratch/cqs/shengq1/vickers/20161121_smallRNA_3018_85_spikein_run2/human.txt"
dupcount_files=[]
with open(inputfile, 'r') as f:
for line in f:
parts = line.strip().split('\t')
if len(parts) >= 2:
dupcount_files.append([parts[1], parts[0]])
sequences=[]
map={}
with open(sequencefile, 'r') as f:
for line in f:
parts = line.strip().split('\t')
if len(parts) >= 2:
sequences.append([parts[0], parts[1]])
map[parts[0]]={}
for dupfile in dupcount_files:
name=dupfile[0]
file=dupfile[1]
with open(file, 'r') as f:
f.readline()
for line in f:
parts = line.strip().split('\t')
seq=parts[2]
for sequence in sequences:
if sequence[1] == seq:
map[sequence[0]][name]=parts[1]
#print(map)
with open(outputfile, 'w') as f:
f.write("Name\tSequence\t%s\n" % "\t".join([sample[0] for sample in dupcount_files]))
for sequence in sequences:
f.write("%s\t%s" % (sequence[0], sequence[1]))
curmap = map[sequence[0]]
for sample in dupcount_files:
if sample[0] in curmap:
f.write("\t" + curmap[sample[0]])
else:
f.write("\t0")
f.write("\n")
|
shengqh/ngsperl
|
lib/SmallRNA/findSequence.py
|
Python
|
apache-2.0
| 1,663
|
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from typing import Union
from collections import OrderedDict
import numpy as np
from rl_coach.agents.agent import Agent
from rl_coach.agents.ddpg_agent import DDPGAgent
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.architectures.head_parameters import DDPGActorHeadParameters, TD3VHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import NetworkParameters, AlgorithmParameters, \
AgentParameters, EmbedderScheme
from rl_coach.core_types import ActionInfo, TrainingSteps, Transition
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.memories.episodic.episodic_experience_replay import EpisodicExperienceReplayParameters
from rl_coach.spaces import BoxActionSpace, GoalsSpace
class TD3CriticNetworkParameters(NetworkParameters):
def __init__(self, num_q_networks):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(),
'action': InputEmbedderParameters(scheme=EmbedderScheme.Shallow)}
self.middleware_parameters = FCMiddlewareParameters(num_streams=num_q_networks)
self.heads_parameters = [TD3VHeadParameters()]
self.optimizer_type = 'Adam'
self.adam_optimizer_beta2 = 0.999
self.optimizer_epsilon = 1e-8
self.batch_size = 100
self.async_training = False
self.learning_rate = 0.001
self.create_target_network = True
self.shared_optimizer = True
self.scale_down_gradients_by_number_of_workers_for_sync_training = False
class TD3ActorNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters()}
self.middleware_parameters = FCMiddlewareParameters()
self.heads_parameters = [DDPGActorHeadParameters(batchnorm=False)]
self.optimizer_type = 'Adam'
self.adam_optimizer_beta2 = 0.999
self.optimizer_epsilon = 1e-8
self.batch_size = 100
self.async_training = False
self.learning_rate = 0.001
self.create_target_network = True
self.shared_optimizer = True
self.scale_down_gradients_by_number_of_workers_for_sync_training = False
class TD3AlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_copying_online_weights_to_target: (StepMethod)
The number of steps between copying the online network weights to the target network weights.
:param rate_for_copying_weights_to_target: (float)
When copying the online network weights to the target network weights, a soft update will be used, which
weight the new online network weights by rate_for_copying_weights_to_target
:param num_consecutive_playing_steps: (StepMethod)
The number of consecutive steps to act between every two training iterations
:param use_target_network_for_evaluation: (bool)
If set to True, the target network will be used for predicting the actions when choosing actions to act.
Since the target network weights change more slowly, the predicted actions will be more consistent.
:param action_penalty: (float)
The amount by which to penalize the network on high action feature (pre-activation) values.
This can prevent the actions features from saturating the TanH activation function, and therefore prevent the
gradients from becoming very low.
:param clip_critic_targets: (Tuple[float, float] or None)
The range to clip the critic target to in order to prevent overestimation of the action values.
:param use_non_zero_discount_for_terminal_states: (bool)
If set to True, the discount factor will be used for terminal states to bootstrap the next predicted state
values. If set to False, the terminal states reward will be taken as the target return for the network.
"""
def __init__(self):
super().__init__()
self.rate_for_copying_weights_to_target = 0.005
self.use_target_network_for_evaluation = False
self.action_penalty = 0
self.clip_critic_targets = None # expected to be a tuple of the form (min_clip_value, max_clip_value) or None
self.use_non_zero_discount_for_terminal_states = False
self.act_for_full_episodes = True
self.update_policy_every_x_episode_steps = 2
self.num_steps_between_copying_online_weights_to_target = TrainingSteps(self.update_policy_every_x_episode_steps)
self.policy_noise = 0.2
self.noise_clipping = 0.5
self.num_q_networks = 2
class TD3AgentExplorationParameters(AdditiveNoiseParameters):
def __init__(self):
super().__init__()
self.noise_as_percentage_from_action_space = False
class TD3AgentParameters(AgentParameters):
def __init__(self):
td3_algorithm_params = TD3AlgorithmParameters()
super().__init__(algorithm=td3_algorithm_params,
exploration=TD3AgentExplorationParameters(),
memory=EpisodicExperienceReplayParameters(),
networks=OrderedDict([("actor", TD3ActorNetworkParameters()),
("critic",
TD3CriticNetworkParameters(td3_algorithm_params.num_q_networks))]))
@property
def path(self):
return 'rl_coach.agents.td3_agent:TD3Agent'
# Twin Delayed DDPG - https://arxiv.org/pdf/1802.09477.pdf
class TD3Agent(DDPGAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.q_values = self.register_signal("Q")
self.TD_targets_signal = self.register_signal("TD targets")
self.action_signal = self.register_signal("actions")
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
actor = self.networks['actor']
critic = self.networks['critic']
actor_keys = self.ap.network_wrappers['actor'].input_embedders_parameters.keys()
critic_keys = self.ap.network_wrappers['critic'].input_embedders_parameters.keys()
# TD error = r + discount*max(q_st_plus_1) - q_st
next_actions, actions_mean = actor.parallel_prediction([
(actor.target_network, batch.next_states(actor_keys)),
(actor.online_network, batch.states(actor_keys))
])
# add noise to the next_actions
noise = np.random.normal(0, self.ap.algorithm.policy_noise, next_actions.shape).clip(
-self.ap.algorithm.noise_clipping, self.ap.algorithm.noise_clipping)
next_actions = self.spaces.action.clip_action_to_space(next_actions + noise)
critic_inputs = copy.copy(batch.next_states(critic_keys))
critic_inputs['action'] = next_actions
q_st_plus_1 = critic.target_network.predict(critic_inputs)[2] # output #2 is the min (Q1, Q2)
# calculate the bootstrapped TD targets while discounting terminal states according to
# use_non_zero_discount_for_terminal_states
if self.ap.algorithm.use_non_zero_discount_for_terminal_states:
TD_targets = batch.rewards(expand_dims=True) + self.ap.algorithm.discount * q_st_plus_1
else:
TD_targets = batch.rewards(expand_dims=True) + \
(1.0 - batch.game_overs(expand_dims=True)) * self.ap.algorithm.discount * q_st_plus_1
# clip the TD targets to prevent overestimation errors
if self.ap.algorithm.clip_critic_targets:
TD_targets = np.clip(TD_targets, *self.ap.algorithm.clip_critic_targets)
self.TD_targets_signal.add_sample(TD_targets)
# train the critic
critic_inputs = copy.copy(batch.states(critic_keys))
critic_inputs['action'] = batch.actions(len(batch.actions().shape) == 1)
result = critic.train_and_sync_networks(critic_inputs, TD_targets)
total_loss, losses, unclipped_grads = result[:3]
if self.training_iteration % self.ap.algorithm.update_policy_every_x_episode_steps == 0:
# get the gradients of output #3 (=mean of Q1 network) w.r.t the action
critic_inputs = copy.copy(batch.states(critic_keys))
critic_inputs['action'] = actions_mean
action_gradients = critic.online_network.predict(critic_inputs,
outputs=critic.online_network.gradients_wrt_inputs[3]['action'])
# apply the gradients from the critic to the actor
initial_feed_dict = {actor.online_network.gradients_weights_ph[0]: -action_gradients}
gradients = actor.online_network.predict(batch.states(actor_keys),
outputs=actor.online_network.weighted_gradients[0],
initial_feed_dict=initial_feed_dict)
if actor.has_global:
actor.apply_gradients_to_global_network(gradients)
actor.update_online_network()
else:
actor.apply_gradients_to_online_network(gradients)
return total_loss, losses, unclipped_grads
def train(self):
self.ap.algorithm.num_consecutive_training_steps = self.current_episode_steps_counter
return Agent.train(self)
def update_transition_before_adding_to_replay_buffer(self, transition: Transition) -> Transition:
"""
Allows agents to update the transition just before adding it to the replay buffer.
Can be useful for agents that want to tweak the reward, termination signal, etc.
:param transition: the transition to update
:return: the updated transition
"""
transition.game_over = False if self.current_episode_steps_counter ==\
self.parent_level_manager.environment.env._max_episode_steps\
else transition.game_over
return transition
|
NervanaSystems/coach
|
rl_coach/agents/td3_agent.py
|
Python
|
apache-2.0
| 10,841
|
import sys
sys.path.append('../functions')
import cv2
import pickle
import itertools
from cluster import Cluster
from connectLib import clusterThresh
import numpy as np
import matplotlib.pyplot as plt
from plosLib import pipeline as PLOS
from random import randrange as rand
def binaryThreshold(img, percentile=90):
img = (img/256).astype('uint8')
threshImg = np.zeros_like(img)
percentile = np.percentile(img, percentile)
for i in range(len(img)):
threshImg[i] = cv2.threshold(img[i], percentile, 255, cv2.THRESH_BINARY)[1]
return threshImg
def adaptiveThreshold(inImg, sx, sy, sz, p):
outImg = np.zeros_like(inImg)
shape = outImg.shape
subXLen = shape[0]/sx
subYLen = shape[1]/sy
subZLen = shape[2]/sz
for xInc in range(1, sx + 1):
for yInc in range(1, sy + 1):
for zInc in range(1, sz + 1):
sub = inImg[(xInc-1)*subXLen: xInc*subXLen, (yInc-1)*subYLen: yInc*subYLen, (zInc-1)*subZLen: zInc*subZLen]
subThresh = binaryThreshold(sub, p)
outImg[(xInc-1)*subXLen: xInc*subXLen, (yInc-1)*subYLen: yInc*subYLen, (zInc-1)*subZLen: zInc*subZLen] = subThresh
return outImg
def neighborhoodDensity(data, interPlane = 1, intraPlane = 1, percentile = 50):
output = np.zeros_like(data)
for z in range(data.shape[0]):
for y in range(data.shape[1]):
for x in range(data.shape[2]):
zLow = z-intraPlane
zHigh = z+intraPlane
yLow = y-interPlane
yHigh = y+interPlane
xLow = x-interPlane
xHigh = x+interPlane
if zLow>=0 and zHigh<data.shape[0] and yLow>=0 and yHigh<data.shape[1] and xLow>=0 and xHigh<data.shape[2]:
subVol = data[zLow:zHigh, yLow:yHigh, xLow:xHigh]
if not all(subVol.shape) == 0:
thresh = np.percentile(subVol, percentile)
binSubVol = subVol >= thresh
output[z][y][x] = (np.count_nonzero(binSubVol)/float(interPlane*interPlane*intraPlane)) * data[z][y][x] * np.average(subVol)
return output
def generatePointSet():
center = (rand(0, 9), rand(0, 999), rand(0, 999))
toPopulate = []
for z in range(-3, 2):
for y in range(-3, 2):
for x in range(-3, 2):
curPoint = (center[0]+z, center[1]+y, center[2]+x)
#only populate valid points
valid = True
for dim in range(3):
if curPoint[dim] < 0 or curPoint[dim] >= 1000:
valid = False
if valid:
toPopulate.append(curPoint)
return set(toPopulate)
def generateTestVolume(n):
#create a test volume
volume = np.zeros((10, 1000, 1000))
myPointSet = set()
for _ in range(n):
potentialPointSet = generatePointSet()
#be sure there is no overlap
while len(myPointSet.intersection(potentialPointSet)) > 0:
potentialPointSet = generatePointSet()
for elem in potentialPointSet:
myPointSet.add(elem)
#populate the true volume
for elem in myPointSet:
volume[elem[0], elem[1], elem[2]] = rand(40000, 60000)
#introduce noise
noiseVolume = np.copy(volume)
for z in range(noiseVolume.shape[0]):
for y in range(noiseVolume.shape[1]):
for x in range(noiseVolume.shape[2]):
if not (z, y, x) in myPointSet:
toPop = rand(0, 10)
if toPop == 5:
noiseVolume[z][y][x] = rand(0, 60000)
return volume, noiseVolume
def applyGradient(volume, originX, originY):
outStack = []
maxDistance = np.sqrt((volume[0].shape[0])**2+(volume[0].shape[1])**2)
for sample in volume:
outSample = np.zeros_like(sample)
for y in range(sample.shape[0]):
for x in range(sample.shape[1]):
distance = np.sqrt((x - originX)**2+(y - originY)**2)
sigma = np.sqrt(distance)/np.sqrt(maxDistance)
modifier = 1.-(sigma * distance/maxDistance)
outSample[y][x] = modifier * sample[y][x]
outStack.append(outSample)
return np.stack(outStack)
def precision_recall_f1(labels, predictions, overlapRatio):
if len(predictions) == 0:
print 'ERROR: prediction list is empty'
return 0., 0., 0.
labelFound = np.zeros(len(labels))
truePositives = 0
falsePositives = 0
for prediction in predictions:
#casting to set is ok here since members are uinque
predictedMembers = set([tuple(elem) for elem in prediction.getMembers()])
detectionCutoff = overlapRatio * len(predictedMembers)
found = False
for idx, label in enumerate(labels):
labelMembers = set([tuple(elem) for elem in label.getMembers()])
#if the predictedOverlap is over the detectionCutoff ratio
if len(predictedMembers & labelMembers) >= detectionCutoff:
truePositives +=1
found=True
labelFound[idx] = 1
if not found:
falsePositives +=1
precision = truePositives/float(truePositives + falsePositives)
recall = np.count_nonzero(labelFound)/float(len(labels))
f1 = 0
try:
f1 = 2 * (precision*recall)/(precision + recall)
except ZeroDivisionError:
f1 = 0
return precision, recall, f1
def executeTest(algList, paramList, volumeList):
#for evey volume
for volume in volumeList:
#for every param combination
for params in itertools.product(*[paramList[alg[1]] for alg in algList]):
#run the current pipeline with the current params
testString = volume[2]
#pipelineVol = volume[1]
for idx, alg in enumerate(algList):
#pipelineVol = alg[0](popelineVol, *(params[idx]))
testString = testString + alg[1]+str(params[idx])
print testString
#TODO precision recall here
if __name__ == '__main__':
#create the test volume set
volumeList = []
for i in range(2):
#label, test = generateTestVolume(2000)
label, test = generateTestVolume(1)
volumeList.append([label, test, 'sparse_uniform_'+str(i)])
#volumeList.append([label, applyGradient(test, 0, 0), 'sparse_gradient_'+str(i)])
#label, test = generateTestVolume(15000)
volumeList.append([label, test, 'dense_uniform_'+str(i)])
#volumeList.append([label, applyGradient(test, 0, 0), 'dense_gradient_'+str(i)])
#create the list of functions to operate over
funcList = [[PLOS, '_plos'], [adaptiveThreshold, '_adaptive'], [neighborhoodDensity, '_neighborhood']]
#create the list of params to operate over
paramDict = {
'_plos': [elem for elem in itertools.product([1, 2, 3], repeat=3)],
'_adaptive': [elem for elem in [[j for j in x]+[y] for x in itertools.product([6, 8, 10], repeat=3) for y in [30, 50, 70, 90]]],
'_neighborhood': [elem for elem in [[j for j in x]+[y] for x in itertools.product([10, 15, 20], repeat=2) for y in [30, 50, 70, 90]]]
}
#iterate through all possibilities of inclusion
for i in itertools.product([0, 1], repeat=3):
print i
#populate the current pipeline
algList = []
for algNum, use in enumerate(i):
if use:
algList.append(funcList[algNum])
#iterate through all permutations of algs
for pipeline in itertools.permutations(algList):
executeTest(pipeline, paramDict, volumeList)
|
NeuroDataDesign/pan-synapse
|
pipeline_1/code/tests/hyperSearch.py
|
Python
|
apache-2.0
| 7,748
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom configurations and functions for Google App Engine."""
__author__ = 'psimakov@google.com (Pavel Simakov)'
import os
import sys
# Whether we are running in the production environment.
PRODUCTION_MODE = not os.environ.get(
'SERVER_SOFTWARE', 'Development').startswith('Development')
# this is the official location of this app for computing of all relative paths
BUNDLE_ROOT = os.path.dirname(__file__)
# Default namespace name is '' and not None.
DEFAULT_NAMESPACE_NAME = ''
# Third-party library zip files.
THIRD_PARTY_LIBS = ['babel-0.9.6.zip', 'gaepytz-2011h.zip']
for lib in THIRD_PARTY_LIBS:
thirdparty_lib = os.path.join(BUNDLE_ROOT, 'lib/%s' % lib)
if not os.path.exists(thirdparty_lib):
raise Exception('Library does not exist: %s' % thirdparty_lib)
sys.path.insert(0, thirdparty_lib)
if not PRODUCTION_MODE:
from google.appengine.api import apiproxy_stub_map # pylint: disable-msg=g-import-not-at-top
from google.appengine.datastore import datastore_stub_util # pylint: disable-msg=g-import-not-at-top
# Make dev_appserver run with PseudoRandomHRConsistencyPolicy, which we
# believe is the best for localhost manual testing; normally dev_appserver
# runs either under MasterSlave policy, which does not allow XG
# transactions, or under TimeBasedHR policy, which serves counter-intuitive
# dirty query results; this also matches policy for the functional tests
stub = apiproxy_stub_map.apiproxy.GetStub(
'datastore_v3')
if stub:
policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(
probability=1)
# stub.SetConsistencyPolicy(policy)
|
ksh/gpitraining
|
appengine_config - Copia.py
|
Python
|
apache-2.0
| 2,266
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from imdb import Imdb
import random
class ConcatDB(Imdb):
"""
ConcatDB is used to concatenate multiple imdbs to form a larger db.
It is very useful to combine multiple dataset with same classes.
Parameters
----------
imdbs : Imdb or list of Imdb
Imdbs to be concatenated
shuffle : bool
whether to shuffle the initial list
"""
def __init__(self, imdbs, shuffle):
super(ConcatDB, self).__init__('concatdb')
if not isinstance(imdbs, list):
imdbs = [imdbs]
self.imdbs = imdbs
self._check_classes()
self.image_set_index = self._load_image_set_index(shuffle)
def _check_classes(self):
"""
check input imdbs, make sure they have same classes
"""
try:
self.classes = self.imdbs[0].classes
self.num_classes = len(self.classes)
except AttributeError:
# fine, if no classes is provided
pass
if self.num_classes > 0:
for db in self.imdbs:
assert self.classes == db.classes, "Multiple imdb must have same classes"
def _load_image_set_index(self, shuffle):
"""
get total number of images, init indices
Parameters
----------
shuffle : bool
whether to shuffle the initial indices
"""
self.num_images = 0
for db in self.imdbs:
self.num_images += db.num_images
indices = list(range(self.num_images))
if shuffle:
random.shuffle(indices)
return indices
def _locate_index(self, index):
"""
given index, find out sub-db and sub-index
Parameters
----------
index : int
index of a specific image
Returns
----------
a tuple (sub-db, sub-index)
"""
assert index >= 0 and index < self.num_images, "index out of range"
pos = self.image_set_index[index]
for k, v in enumerate(self.imdbs):
if pos >= v.num_images:
pos -= v.num_images
else:
return (k, pos)
def image_path_from_index(self, index):
"""
given image index, find out full path
Parameters
----------
index: int
index of a specific image
Returns
----------
full path of this image
"""
assert self.image_set_index is not None, "Dataset not initialized"
pos = self.image_set_index[index]
n_db, n_index = self._locate_index(index)
return self.imdbs[n_db].image_path_from_index(n_index)
def label_from_index(self, index):
"""
given image index, return preprocessed ground-truth
Parameters
----------
index: int
index of a specific image
Returns
----------
ground-truths of this image
"""
assert self.image_set_index is not None, "Dataset not initialized"
pos = self.image_set_index[index]
n_db, n_index = self._locate_index(index)
return self.imdbs[n_db].label_from_index(n_index)
|
ShownX/incubator-mxnet
|
example/ssd/dataset/concat_db.py
|
Python
|
apache-2.0
| 3,974
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import sys
import time
from resource_management.core.shell import call
def hive_service(
name,
action='start'):
import params
if name == 'metastore':
pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
cmd = format(
"env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir} {hive_log_dir}")
elif name == 'hiveserver2':
pid_file = format("{hive_pid_dir}/{hive_pid}")
cmd = format(
"env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir} {hive_log_dir}")
process_id_exists = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
if action == 'start':
if name == 'hiveserver2':
check_fs_root()
demon_cmd = format("{cmd}")
Execute(demon_cmd,
user=params.hive_user,
environment={'HADOOP_HOME': params.hadoop_home},
path=params.execute_path,
not_if=process_id_exists
)
if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or \
params.hive_jdbc_driver == "org.postgresql.Driver" or \
params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
db_connection_check_command = format(
"{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification '{hive_jdbc_connection_url}' {hive_metastore_user_name} {hive_metastore_user_passwd!p} {hive_jdbc_driver}")
Execute(db_connection_check_command,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', tries=5, try_sleep=10)
# AMBARI-5800 - wait for the server to come up instead of just the PID existance
if name == 'hiveserver2':
SOCKET_WAIT_SECONDS = 120
address=params.hive_server_host
port=int(params.hive_server_port)
start_time = time.time()
end_time = start_time + SOCKET_WAIT_SECONDS
is_service_socket_valid = False
print "Waiting for the Hive server to start..."
while time.time() < end_time:
if check_thrift_port_sasl(address, port, 2, security_enabled=params.security_enabled):
is_service_socket_valid = True
break
else:
time.sleep(2)
elapsed_time = time.time() - start_time
if is_service_socket_valid == False:
raise Fail("Connection to Hive server %s on port %s failed after %d seconds" % (address, port, elapsed_time))
print "Successfully connected to Hive at %s on port %s after %d seconds" % (address, port, elapsed_time)
elif action == 'stop':
demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
Execute(demon_cmd,
not_if = format("! ({process_id_exists})")
)
def check_fs_root():
import params
fs_root_url = format("{fs_root}{hive_apps_whs_dir}")
cmd = format("metatool -listFSRoot 2>/dev/null | grep hdfs://")
code, out = call(cmd, user=params.hive_user)
if code == 0 and fs_root_url.strip() != out.strip():
cmd = format("metatool -updateLocation {fs_root}{hive_apps_whs_dir} {out}")
Execute(cmd,
environment= {'PATH' : params.execute_path },
user=params.hive_user)
|
zouzhberk/ambaridemo
|
demo-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/hive_service.py
|
Python
|
apache-2.0
| 4,224
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Checkpointing utilities for save/restore."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import six
import REDACTED.transformer_lingvo.lingvo.compat as tf
from REDACTED.transformer_lingvo.lingvo.core import cluster_factory
from REDACTED.transformer_lingvo.lingvo.core import py_utils
class Checkpointer(object):
"""Checkpointing utility class.
Needs to be created within a graph context.
"""
def __init__(self, train_dir, model, train_params=None, save_only=False):
"""Initialize Checkpointer.
Args:
train_dir: Training directory for saving checkpoints.
model: A BaseModel instance or None.
train_params: If specified, use these training params instead of those in
the `model`.
save_only: This checkpointer is only intended for saving checkpoints.
"""
self._train_dir = train_dir
self._save_only = save_only
self._save_path = os.path.join(self._train_dir, 'ckpt')
if train_params:
self._train_params = train_params
self._model = None
else:
assert model
self._train_params = model.params.train
self._model = model
if not self._save_only:
self._params = model.params
self._model_tasks = model.tasks
self._model = model
self._next_checkpoint_seconds = 0
self._save_interval_seconds = self._train_params.save_interval_seconds
self._saver = self._GetSaver()
self._uninitialized_vars = tf.report_uninitialized_variables(
tf.global_variables())
def _GetSaver(self):
"""Returns a saver."""
do_eval = cluster_factory.Current().do_eval
if not self._save_only and self._model.ema and do_eval:
tf.logging.info('Using EMA for evaluation.')
return tf.train.Saver(
self._model.ema.variables_to_restore(self._model.variables_for_ema))
return tf.train.Saver(
sharded=True,
max_to_keep=self._train_params.save_max_to_keep,
keep_checkpoint_every_n_hours=(
self._train_params.save_keep_checkpoint_every_n_hours),
pad_step_number=True, # %08d
write_version=tf.train.SaverDef.V2)
def RestoreFromPath(self, sess, checkpoint_path):
"""Load the checkpoint from specified path."""
assert not self._save_only
tf.logging.info('Load from checkpoint %s.', checkpoint_path)
self._saver.restore(sess, checkpoint_path)
tf.logging.info('Load checkpoint done.')
# Successfully restored from checkpoint.
uninitialized_var_names = self._GetUninitializedVarNames(sess)
assert not uninitialized_var_names, uninitialized_var_names
def MaybeSave(self, sess, gsteps):
"""If it's time to save, save the checkpoint.
Args:
sess: tf.Session.
gsteps: Current global step.
"""
now = time.time()
if now >= self._next_checkpoint_seconds:
self.Save(sess, gsteps)
self._next_checkpoint_seconds = now + self._save_interval_seconds
def Save(self, sess, gsteps):
"""Save the checkpoint.
Args:
sess: tf.Session.
gsteps: Current global step.
"""
tf.logging.info('Save checkpoint')
path = self._saver.save(sess, self._save_path, gsteps)
tf.logging.info('Save checkpoint done: %s', path)
def _RestoreFromLatestCheckpoint(self, sess):
"""Restore the latest checkpoint and return True, else return False."""
assert not self._save_only
path = tf.train.latest_checkpoint(self._train_dir)
if path is None:
return False
# First recover the checkpoint state in the directory.
#
# NOTE: latest_checkpoint() already calls this but to avoid duplicating
# v1 vs. v2 behavior here, we just query the state again.
ckpt_state = tf.train.get_checkpoint_state(self._train_dir)
self._saver.recover_last_checkpoints(ckpt_state.all_model_checkpoint_paths)
# Now restore the checkpoint.
self.RestoreFromPath(sess, path)
return True
def _GetUninitializedVarNames(self, sess):
uninitialized_var_names = sorted(list(sess.run(self._uninitialized_vars)))
# uninitialized_var_names is a list of strings without ":0" suffix.
# tf.report_uninitialized_variables returns binary strings.
assert all(isinstance(s, six.binary_type) for s in uninitialized_var_names)
return uninitialized_var_names
def Restore(self, sess, force_reinitialize=False):
"""Restore from latest checkpoint if available, or initialize."""
# Try and restore from the latest checkpoint.
if self._RestoreFromLatestCheckpoint(sess):
# Successfully restored from checkpoint.
uninitialized_var_names = self._GetUninitializedVarNames(sess)
assert not uninitialized_var_names, uninitialized_var_names
return
# Otherwise we need to initialize.
uninitialized_var_names = self._GetUninitializedVarNames(sess)
tf.logging.info('Uninitialized var list: %s', uninitialized_var_names)
if not force_reinitialize:
# There should only be uninitialized variables if all variables are
# uninitialized - with the exception of global_step due to
# RestoreGlobalStepIfNeeded in the _LoopEnqueue of TrainerTpu.
all_var_names = [
six.ensure_binary(v.name[:-2]) for v in tf.global_variables()
]
already_initialized_vars = (
set(all_var_names) - set(uninitialized_var_names))
already_initialized_vars.discard(b'global_step')
assert not already_initialized_vars, ('Already initialized vars: %s' %
sorted(already_initialized_vars))
# At this point all variables are uninitialized, so it is safe to run a
# global initializer.
sess.run(tf.global_variables_initializer())
tf.logging.info('Initialized all vars.')
# Restore specific variables based on init_from_checkpoint_rules.
for task in self._model.tasks:
tp = task.params.train
if tp.init_from_checkpoint_rules:
tf.logging.info('OverrideVarsFromCheckpoints %s',
tp.init_from_checkpoint_rules)
py_utils.OverrideVarsFromCheckpoints(sess, tf.global_variables(),
tp.init_from_checkpoint_rules)
if self._params.train.init_from_checkpoint_rules:
tp = self._params.train
tf.logging.info('OverrideVarsFromCheckpoints %s',
tp.init_from_checkpoint_rules)
py_utils.OverrideVarsFromCheckpoints(sess, tf.global_variables(),
tp.init_from_checkpoint_rules)
def RestoreIfNeeded(self, sess):
"""If vars are not initialized, restore from checkpoint."""
assert not self._save_only
uninitialized_var_names = self._GetUninitializedVarNames(sess)
if not uninitialized_var_names:
# All variables are already initialized.
return
self.Restore(sess)
def RestoreGlobalStepIfNeeded(self, sess):
"""If global step is not initialized, load it from the checkpoint.
Args:
sess: tf.Session.
"""
assert not self._save_only
uninitialized_vars = self._GetUninitializedVarNames(sess)
if six.ensure_binary('global_step') not in uninitialized_vars:
return
with sess.graph.as_default():
gstep = py_utils.GetGlobalStep()
path = tf.train.latest_checkpoint(self._train_dir)
if path:
reader = tf.train.NewCheckpointReader(path)
value = reader.get_tensor('global_step')
tf.logging.info('Restoring global step: %s', value)
sess.run(gstep.assign(value))
else:
tf.logging.info('Initializing global step')
sess.run(gstep.initializer)
|
mlperf/training_results_v0.7
|
Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-512/lingvo/core/checkpointer.py
|
Python
|
apache-2.0
| 8,373
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Program page smoke tests."""
# pylint: disable=no-self-use
# pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
# pylint: disable=unused-argument
import pytest # pylint: disable=import-error
from lib import base
from lib.constants import element, locator, url
from lib.page import dashboard, widget_bar
from lib.page.widget import info_widget
from lib.utils import test_utils, selenium_utils
class TestProgramPage(base.Test):
"""A part of smoke tests, section 4."""
@pytest.mark.smoke_tests
def test_object_count_updates(self, selenium, new_program_ui):
"""Checks if count updates in LHN after creating new program
object."""
_, program_info_page = new_program_ui
lhn_menu = dashboard.Header(selenium).open_lhn_menu().select_my_objects()
assert (lhn_menu.toggle_programs.members_count >=
int(program_info_page.source_obj_id_from_url))
@pytest.mark.smoke_tests
def test_modal_redirects(self, new_program_ui):
"""Tests if after saving and closing lhn_modal app redirects to
the object page.
Generally we start at random url. Here we verify that after saving
and closing lhn_modal we're redirected to an url that contains an
object id.
"""
_, program_info_page = new_program_ui
assert (url.PROGRAMS + "/" + program_info_page.source_obj_id_from_url in
program_info_page.url)
@pytest.mark.smoke_tests
def test_info_tab_is_active_by_default(self, selenium, new_program_ui):
"""Tests if after lhn_modal is saved we're redirected and info
tab is activated.
Because app uses url arguments to remember state of page
(which widget is active), we can simply use url of created
object.
"""
_, program_info_page = new_program_ui
program_info_page.navigate_to()
horizontal_bar = widget_bar.Dashboard(selenium)
assert (horizontal_bar.get_active_widget_name() ==
element.ProgramInfoWidget().WIDGET_HEADER)
@pytest.mark.smoke_tests
def test_info_tab_contains_entered_data(self, new_program_ui):
"""Verify that created object contains data we've entered
into modal."""
modal, program_info_page = new_program_ui
assert (test_utils.HtmlParser.parse_text(modal.ui_title.text) ==
program_info_page.title_entered.text)
assert (modal.ui_description.text ==
program_info_page.description_entered.text)
assert modal.ui_notes.text == program_info_page.notes_entered.text
assert modal.ui_code.text == program_info_page.code_entered.text
assert (modal.ui_program_url.text ==
program_info_page.program_url_entered.text)
assert (modal.ui_reference_url.text ==
program_info_page.reference_url_entered.text)
assert (modal.ui_effective_date.text ==
program_info_page.effective_date_entered.text)
assert modal.ui_stop_date.text == program_info_page.stop_date_entered.text
@pytest.mark.smoke_tests
def test_permalink(self, selenium, new_program_ui):
"""Verify url is copied to clipboard."""
_, program_info_page = new_program_ui
selenium_utils.open_url(selenium, program_info_page.url)
program_info_page = info_widget.Programs(selenium)
program_info_page.open_info_3bbs().select_get_permalink()
# test notification alert
base.AnimatedComponent(
selenium, [locator.WidgetInfoProgram.ALERT_LINK_COPIED],
wait_until_visible=True)
# test generated link
modal = program_info_page.open_info_3bbs().select_edit()
modal.ui_title.paste_from_clipboard(modal.ui_description)
assert modal.ui_title.text == program_info_page.url
@pytest.mark.smoke_tests
def test_edit_modal(self, selenium, new_program_ui):
"""Tests if data is saved after editing program info page edit modal.
"""
_, program_info_page = new_program_ui
selenium_utils.open_url(selenium, program_info_page.url)
program_info_page = info_widget.Programs(selenium)
modal = program_info_page.open_info_3bbs().select_edit()
test_utils.ModalNewPrograms.enter_test_data(modal)
test_utils.ModalNewPrograms.set_start_end_dates(modal, 1, -2)
modal.save_and_close()
selenium_utils.open_url(selenium, program_info_page.url)
updated_program_info_page = info_widget.Programs(selenium)
assert (test_utils.HtmlParser.parse_text(modal.ui_title.text) ==
updated_program_info_page.title_entered.text)
assert (modal.ui_description.text ==
updated_program_info_page.description_entered.text)
assert modal.ui_notes.text == updated_program_info_page.notes_entered.text
assert (modal.ui_program_url.text ==
updated_program_info_page.program_url_entered.text)
assert (modal.ui_reference_url.text ==
updated_program_info_page.reference_url_entered.text)
|
AleksNeStu/ggrc-core
|
test/selenium/src/tests/test_program_page.py
|
Python
|
apache-2.0
| 4,920
|
# This file is part of the GBI project.
# Copyright (C) 2013 Omniscale GmbH & Co. KG <http://omniscale.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .user import *
from .log import *
from .sources import *
from .wfs import *
|
omniscale/gbi-server
|
app/gbi_server/model/__init__.py
|
Python
|
apache-2.0
| 735
|
"""
------------------
theia.cli.simulate
------------------
Event simulation tool.
"""
import asyncio
import time
from random import randint, shuffle
from uuid import uuid4
from datetime import datetime
from threading import Thread
import argparse
import lorem
from theia.comm import Client
from theia.model import Event, EventSerializer
def get_parser():
"""Configure and instantiate the argument parser for the simulation tool.
:returns: configured :class:`argparse.ArgumentParser`.
"""
parser = argparse.ArgumentParser(prog='theia.cli.simulate',
description='Simulate events (debugging)')
parser.add_argument('-H', '--host', default='localhost', dest='host', help='Collector host')
parser.add_argument('-p', '--port', default=6433, dest='port', help='Collector port', type=int)
parser.add_argument('-t', nargs='*', dest='tags', help='Set of tags to choose from')
parser.add_argument('-s', nargs='*', dest='sources', help='Set of event sources to choose from')
parser.add_argument('-c', dest='content', default=None,
help='Use this event content instead of random content.')
parser.add_argument('--content-size', dest='content_size', type=int,
default=None, help='Size of content (approximately)')
parser.add_argument('--delay', dest='delay', default=1.0, type=float,
help='Delay between event in seconds')
return parser
def generate_content(content=None, size=None):
"""Generate random (lorem-ipsum style) content.
If ``content`` is provided, just passes through. Otherwise generates 'lorem-ipsum'
style random content that is of about the provided ``size``. If no size is given,
then it returns just one sentence.
:param str content: optional, if given, the content to be returned.
:param int size: optional, the size of the generated content. If not provided,
then only one sentence is returned. If provided, then generates sentences
with total size >= ``size``. Always generates full sentences.
:returns: ``str``, the generated content.
"""
if content is not None:
return content
if size is None:
return lorem.sentence()
content = lorem.sentence()
while len(content) < size:
content = content + ' ' + lorem.sentence()
return content
def generate_rand_items(items, default):
"""Generates a random subset (``list``) of the provided list of items.
The size of the subset is at least one, and at most is the whole ``items``
list.
If no items provided, then returns a list of just one item - the ``default``
one.
The order of the items in the subset is randomized as well.
:param list items: the list of items to choose from.
:param default: the default item to choose if no list of items is provided.
:returns: :class:`list` randomized subset of the items list.
"""
if not items:
return [default]
if len(items) == 1:
return items
rnd_size = randint(1, len(items))
rndi = [n for n in items]
shuffle(rndi)
return rndi[0:rnd_size]
def generate_rand_event(sources, tags, content, cnt_size):
"""Generate random event from the given choices for sources, tags and content.
:param list sources: list of choices for sources. One random source will be
chosen from the list.
:param list tags: list of choices for tags. Random subset with random order
and size greater than one will be chosen from the given list of tags.
:param str content: if not ``None``, that content will be used. If ``None``,
random content will be generated.
:param int cnt_size: generate content of at least this size. The size of the
generated content may be grater (the content always contains full sentences)
but it will never be less than ``cnt_size``.
:returns: a random :class:`theia.model.Event` generated from the given choices
values.
"""
source = sources[randint(0, len(sources) - 1)]
return Event(id=str(uuid4()), source=source, timestamp=datetime.now().timestamp(),
tags=generate_rand_items(tags, 'tag-4'),
content=generate_content(content, cnt_size))
def _send_event(args, tags, client, serializer):
"""Generate and send a single event.
"""
event = generate_rand_event(args.sources, tags, args.content, args.content_size)
client.send_event(event)
print(serializer.serialize(event).decode('UTF-8'))
print()
def simulate_events(args):
"""Connects and generates random events.
The events are generated according to the given arguments.
:param argparse.Namespace args: the parsed arguments passed to the program.
"""
loop = asyncio.get_event_loop()
client = Client(host=args.host, port=args.port, path='/event', loop=loop)
client.connect()
ser = EventSerializer()
tags = [args.tags] if isinstance(args.tags, str) else args.tags
class _SenderThread(Thread):
"""Sender Thread. Runs continuously.
"""
def __init__(self):
super(_SenderThread, self).__init__()
self.is_running = False
def run(self):
"""Generate and send events continuously.
"""
self.is_running = True
while self.is_running:
_send_event(args, tags, client, ser)
time.sleep(args.delay)
sender_thread = _SenderThread()
sender_thread.is_running = True
sender_thread.start()
loop.run_forever()
def run_simulate_events():
"""Run the simulation of events.
"""
parser = get_parser()
args = parser.parse_args()
simulate_events(args)
if __name__ == '__main__':
run_simulate_events()
|
theia-log/theia
|
theia/cli/simulate.py
|
Python
|
apache-2.0
| 5,856
|
import os
class DatabaseConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = ''mysql://dev:123456@172.21.0.2/MyDBMovies')
SQLALCHEMY_TRACK_MODIFICATIONS = False
config_by_name = dict(
db=DatabaseConfig
)
|
CALlanoR/virtual_environments
|
web/python/flask_api_sqlalchemy_mysql/movies_api_service/app/main/__init__.py
|
Python
|
apache-2.0
| 223
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "commonrepo.tw",
"name": "Common Repository"
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
|
yrchen/CommonRepo
|
commonrepo/contrib/sites/migrations/0002_set_site_domain_and_name.py
|
Python
|
apache-2.0
| 951
|