repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
Diyago/Machine-Learning-scripts | DEEP LEARNING/Kaggle Avito Demand Prediction Challenge/stem to SVD.py | Python | apache-2.0 | 8,068 | 0.028398 | #Thanks for the approach https://github.com/ML-Person/My-solution-to-Avito-Challenge-2018 (@nikita)
import pandas as pd
import numpy as np
import gc
import os
import re
import pickle
import string
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from scipy.sparse import hstack, csr_matrix
import lightgbm as lgb
# for text data
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
pd.set_option('max_columns', 84)
import warnings
warnings.filterwarnings('ignore')
PATH_TO_DATA = '/Avito'
traintrain == pdpd..read_csvread_cs (os.path.join(PATH_TO_DATA, 'train.csv'))
test = pd.read_csv(os.path.join(PATH_TO_DATA, 'test.csv'))
'''
item_id - Ad id.
user_id - User id.
region - Ad region.
city - Ad city.
parent_category_name - Top level ad category as classified by Avito's ad model.
category_name - Fine grain ad category as classified by Avito's ad model.
param_1 - Optional parameter from Avito's ad model.
param_2 - Optional parameter from Avito's ad model.
param_3 - Optional parameter from Avito's ad model.
title - Ad title.
description - Ad description.
price - Ad price.
item_seq_number - Ad sequential number for user.
activation_date - Date ad was placed.
user_type - User type.
image - Id code of image. Ties to a jpg file in train_jpg. Not every ad has an image.
image_top_1 - Avito's classification code for the image.
deal_probability - The target variable. This is the likelihood that an ad actually sold something. It's not possible to verify every transaction with certainty, so this column's value can be any float from zero to one.
'''
categorical = [
'image_top_1', 'param_1', 'param_2', 'param_3',
'city', 'region', 'category_name', 'parent_category_name', 'user_type'
]
# easy preprocessing
text_cols = [
'title', 'description', 'param_1', 'param_2', 'param_3',
'city', 'region', 'category_name', 'parent_category_name'
]
for col in text_cols:
for df in [train, test]:
df[col] = df[col].str.replace(r"[^А-Яа-яA-Za-z0-9,!?@\'\`\"\_\n]", ' ')
df[col].fillna("NA", inplace=True)
df[col] = df[col].str.lower()
forfor dfdf inin [[traintrain,, testtest]:]:
dfdf[['len_description''len_de ] = df['description'].apply(lambda x: len(str(x)))
df['num_desc_punct'] = df['description'].apply(lambda x: len([c for c in str(x) if c in string.punctuation])) / df['len_description']
for col in ['description', 'title']:
df['num_words_' + col] = df[col].apply(lambda comment: len(comment.split()))
df['num_unique_words_' + col] = df[col].apply(lambda comment: len(set(w for w in comment.split())))
# percentage of unique words
df['words_vs_unique_title'] = df['num_unique_words_title'] / df['num_words_title'] * 100
df['words_vs_unique_description'] = df['num_unique_words_description'] / df['num_words_description'] * 100
# [DUMP] TRAIN + TEST# [DUMP]
train.to_csv(os.path.join(PATH_TO_DATA, 'train_all_features.csv'), index=False, encoding='utf-8')
test.to_csv(os.path.join(PATH_TO_DATA, 'test_all_features.csv'), index=False, encoding='utf-8')
del train, test
gc.collect()
train = pd.read_csv(os.path.join(PATH_TO_DATA, 'train.csv'))
test = pd.read_csv(os.path.join(PATH_TO_DATA, 'test.csv
stemmer = SnowballStemmer("russian", ignore_stopwords=False)
train['title_stemm'] = train['title'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
test['title_stemm'] = test['title'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
train['description_stemm'] = train['description'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
test['description_stemm'] = test['description'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
train['text'] = train['param_1'] + " " + train['param_2'] + " " + train['param_3'] + " " + \
train['city'] + " " + train['category_name'] + " " + train['parent_category_name']
test['text'] = test['param_1'] + " " + test['param_2'] + " " + test['param_3'] + " " + \
test['city'] + " " + test['category_name'] + " " + test['parent_category_name']
train['text_stemm'] = train['text'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
test['text_stemm'] = test['text'].apply(lambda string: ' '.join([stemmer.stem(w) for w in string.split()]))
for df in [train, test]:
df.drop(['title', 'description', 'text'], axis=1, inplace=True)
#TF-IDF + SVD
# CountVectorizer for 'title'
title_tfidf = CountVectorizer(stop_words=stopwords.words('russian'), lowercase=True,
token_pattern=r'\w{1,}', ngram_range=(1, 1))
full_tfidf = title_tfidf.fit_transform(train['title_stemm'].values.tolist() + test['title_stemm'].values.tolist())
train_title_tfidf = title_tfidf.transform(train['title_stemm'].values.tolist())
test_title_tfidf = title_tfidf.transform(test['title_stemm'].values.tolist())
### SVD Components ###
n_comp = 10
svd_obj = TruncatedSVD(n_components=n_comp, algorithm='arpack')
svd_obj.fit(full_tfidf)
train_svd = pd.DataFrame(svd_obj.transform(train_title_tfidf))
test_svd = pd.DataFrame(svd_obj.transform(test_title_tfidf))
train_svd.columns = ['svd_title_'+str(i+1) for i in range(n_comp)]
test_svd.columns = ['svd_title_'+str(i+1) for i in range(n_comp)]
train_svd['item_id'] = train['item_id']
test_svd['item_id'] = test['item_id']
# Merge and delete
train = train.merge(train_svd, on='item_id', how='left')
test = test.merge(test_svd, on='item_id', how='left')
del full_tfidf, train_svd, test_svd
gc.collect()
# TF-IDF for 'description'
desc_tfidf = TfidfVectorizer(stop_words=stopwords.words('russian'), token_pattern=r'\w{1,}',
lowercase=True, ngram_range=(1, 2), norm='l2', smooth_idf=False,
max_features=17000)
full_tfidf = desc_tfidf.fit_transform(train['description_stemm'].values.tolist() + test['description_stemm'].values.tolist())
train_desc_tfidf = desc_tfidf.transform(train['description_stemm'].values.tolist())
test_desc_tfidf = desc_tfidf.transform(test['description_stemm'].values.tolist())
### SVD Components ###
n_comp = 10
svd_obj = TruncatedSVD(n_components=n_comp, algorithm='arpack')
svd_obj.fit(full_tfidf)
train_svd = pd.DataFrame(svd_obj.transform(train_desc_tfidf))
test_svd = pd.DataFrame(svd_obj.transform(test_desc_tfidf))
train_svd.columns = ['svd_description_'+str(i+1) for i in range(n_comp)]
test_svd.columns = ['svd_description_'+str(i+1) for i in range(n_comp)]
train_svd['item_id'] = train['item_id']
test_svd['item_id'] = test['item_id']
# Merge and delete
train = train.merge(train_svd, on='item_id', how='left')
test = test.merge(test_svd, on='item_id', how='left')
del full_tfidf, train_svd, test_svd
gc.collect()
# [STACKING]# [STACK
train_tfidf = csr_matrix(hstack([train_title_tfidf, train_desc_tfidf, train_tex | t_tfidf]))
test_tfidf = csr_matrix(hstack([test_title_tfidf, test_desc_tfidf, test_text_tfidf]))
del train_title_tfidf, train_desc_tfidf, train_text_tfidf
del test_title_tfidf, test_desc_tfidf, tes | t_text_tfidf
gc.collect()
vocab = np.hstack([
title_tfidf.get_feature_names(),
desc_tfidf.get_feature_names(),
text_tfidf.get_feature_names()
])
[DUMP] TF-IDF pickle files + vocabulary
with open(os.path.join(PATH_TO_DATA, 'train_tfidf.pkl'), 'wb') as train_tfidf_pkl:
pickle.dump(train_tfidf, train_tfidf_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'test_tfidf.pkl'), 'wb') as test_tfidf_pkl:
pickle.dump(test_tfidf, test_tfidf_pkl, protocol=2)
with open(os.path.join(PATH_TO_DATA, 'vocab.pkl'), 'wb') as vocab_pkl:
pickle.dump(vocab, vocab_pkl, protocol=2)
del train, train_tfidf, test, test_tfidf, vocab
gc.collect()
|
xialeiliu/RankIQA | src/data_layer/rank_layer_live.py | Python | mit | 4,545 | 0.010341 | import cv2
import caffe
import numpy as np
import multiprocessing as mtp
import pdb
import os.path as osp
class DataLayer(caffe.Layer):
def setup(self, bottom, top):
self._name_to_top_map = {}
self._name_to_top_map['data'] = 0
self._name_to_top_map['label'] = 1
# === Read input parameters ===
self.workers= mtp.Pool(10)
# params is a python dictionary with layer parameters.
params = eval(self.param_str)
# Check the paramameters for validity.
check_params(params)
# store input as class variables
self.batch_size = params['batch_size']
self.pascal_root = params['pascal_root']
self.im_shape = params['im_shape']
# get list of image indexes.
list_file = params['split'] + '.txt'
filename = [line.rstrip('\n') for line in open(
osp.join(self.pascal_root, list_file))]
self._roidb = []
self.scores =[]
for i in filename:
self._roidb.append(i.split()[0])
self.scores.append(float(i.split()[1]))
self._perm = None
self._cur = 0
self.num =0
top[0].reshape(
self.batch_size, 3, params['im_shape'][0], params['im_shape'][1])
top[1].reshape(self.batch_size, 1)
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch."""
db_inds = []
dis = 4 # total number of distortions in live dataset
batch = 2 # number of images for each distortion level
level = 6 # distortion levels for each mini_batch = level * dis_mini*batch
#shuff = np.random.permutation(range(dis))
Num = len(self.scores)/dis/level
for k in range(dis):
for i in range(level):
temp = self.num
for j in range(batch):
db_inds.append(len(self.scores)/dis*k+i*Num+temp)
temp = temp +1
self.num = self.num+batch
if Num-self.num<batch:
self.num=0
db_inds = np.asarray(db_inds)
return db_inds
def get_minibatch(self,minibatch_db):
"""Given a roidb, construct a minibatch sampled from it."""
# Get the input image blob, formatted for caffe
jobs =self.workers.map(preprocess,minibatch_db)
#print len(jobs)
index = 0
images_train = np.zeros([self.batch_size,3,224,224],np.float32)
#pdb.set_trace()
for index_job in range(len(jobs)):
images_train[index,:,:,:] = jobs[index_job]
index += 1
blobs = {'data': images_train}
return blobs
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
db_inds = self._get_next_minibatch_inds()
minibatch_ | db = []
for i in range(len(db_inds)):
minibatch_db.append(self._roidb[int(db_inds[i])])
#minibatc | h_db = [self._roidb[i] for i in db_inds]
#print minibatch_db
scores = []
for i in range(len(db_inds)):
scores.append(self.scores[int(db_inds[i])])
blobs = self.get_minibatch(minibatch_db)
blobs ['label'] =np.asarray(scores)
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*(blob.shape))
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def preprocess(data):
sp = 224
im = np.asarray(cv2.imread(data))
x = im.shape[0]
y = im.shape[1]
x_p = np.random.randint(x-sp,size=1)[0]
y_p = np.random.randint(y-sp,size=1)[0]
#print x_p,y_p
images = im[x_p:x_p+sp,y_p:y_p+sp,:].transpose([2,0,1])
#print images.shape
return images
def check_params(params):
"""
A utility function to check the parameters for the data layers.
"""
assert 'split' in params.keys(
), 'Params must include split (train, val, or test).'
required = ['batch_size', 'pascal_root', 'im_shape']
for r in required:
assert r in params.keys(), 'Params must include {}'.format(r)
|
dr-glenn/PiClock | Clock/q1.py | Python | mit | 3,326 | 0.001203 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'q1.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(800, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.listWidget = QtGui.QListWidget(self.centralwidget)
self.listWidget.setGeometry(QtCore.QRect(70, 40, 256, 192))
self.listWidget.setObjectName(_fromUtf8("listWidget"))
item = QtGui.QListWidgetItem()
self.listWidget.addItem(item)
item = QtGui.QListWidgetItem()
self.listWidget.addItem(item)
item = QtGui.QListWidgetItem()
self.listWidget.addItem(item)
self.radioButton = QtGui.QRadioButton(self.centralwidget)
self.radioButton.setGeometry(QtCore.QRect(170, 390, 160, 31))
self.radioButton.setObjectName(_fromUtf8("radioButton"))
self.radioButton_2 = QtGui.QRadioButton(self.centralwidget)
self.radioButton_2.setGeometry(QtCore.QRect(170, 430, 160, 31))
self.radioButton_2.setObjectName(_fromUtf8("radioButton_2"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 38))
self.menubar.setObjectName(_fromUtf8("menubar"))
| self.menuMain_Window = QtGui.QMenu(self.menubar)
self.menuMain_Window.setObjectName(_fromUtf8("menuMain_Window"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuMain_Window.menuAction())
| self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
__sortingEnabled = self.listWidget.isSortingEnabled()
self.listWidget.setSortingEnabled(False)
item = self.listWidget.item(0)
item.setText(_translate("MainWindow", "Item 1", None))
item = self.listWidget.item(1)
item.setText(_translate("MainWindow", "Item 2", None))
item = self.listWidget.item(2)
item.setText(_translate("MainWindow", "Item 3", None))
self.listWidget.setSortingEnabled(__sortingEnabled)
self.radioButton.setText(_translate("MainWindow", "Rad 1", None))
self.radioButton_2.setText(_translate("MainWindow", "Rad 2", None))
self.menuMain_Window.setTitle(_translate("MainWindow", "Main Window", None))
|
oomlout/oomlout-OOMP | old/OOMPpart_RESE_0805_X_O202_01.py | Python | cc0-1.0 | 243 | 0 | import OOMP
newPart = OOMP.oompItem(9439)
newPart.addTag("oompType" | , "RESE")
newPa | rt.addTag("oompSize", "0805")
newPart.addTag("oompColor", "X")
newPart.addTag("oompDesc", "O202")
newPart.addTag("oompIndex", "01")
OOMP.parts.append(newPart)
|
crobertsbmw/RobertsAB | admin.py | Python | mit | 283 | 0.010601 | from RobertsAB.models impor | t Test, Experiment
from django.contrib import admin
class TestInline(admin.StackedInline):
model = Test
class ExperimentAdmin(admin.ModelAdmin):
inli | nes = [TestInline]
admin.site.register(Test)
admin.site.register(Experiment, ExperimentAdmin)
|
hrayr-artunyan/shuup | shuup_tests/utils/test_numbers.py | Python | agpl-3.0 | 745 | 0.001342 | # This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file | in the root directory of this source tree.
from decimal import Decimal
import pytest
from shuup.utils.numbers import parse_decimal_string
@pytest.mark.parametrize("input_val, expected_val", [
(0.0, Decimal('0.0')),
(1.1, Decimal('1.1')),
(-1.1, Decimal('-1.1')),
(1e10, Decimal('10000000000')),
(1e10, Decimal('1e10')),
(1e-10, Decimal('0.0000000001')),
(1e-10, Decimal('1e-10'))
])
def test_parse_decimal_string_with_float_input(input_val, expected_val):
result = | parse_decimal_string(input_val)
assert result == expected_val
|
Hoikas/korman | korman/exporter/convert.py | Python | gpl-3.0 | 21,736 | 0.002484 | # This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
from collections import defaultdict
from contextlib import ExitStack
import functools
from pathlib import Path
from ..helpers import TemporaryObject
from ..korlib import ConsoleToggler
from PyHSPlasma import *
from . import animation
from . import camera
from . import decal
from . import explosions
from . import etlight
from . import image
from . import locman
from . import logger
from . import manager
from . import mesh
from . import outfile
from . import physics
from . import rtlight
from . import utils
class Exporter:
def __init__(self, op):
self._op = op # Blender export operator
self._objects = []
self.actors = set()
self.want_node_trees = defaultdict(set)
self.exported_nodes = {}
def run(self):
log = logger.ExportVerboseLogger if self._op.verbose else logger.ExportProgressLogger
with ConsoleToggler(self._op.show_console), log(self._op.filepath) as self.report, ExitStack() as self.exit_stack:
# Step 0: Init export resmgr and stuff
self.mgr = manager.ExportManager(self)
self.mesh = mesh.MeshConverter(self)
self.physics = physics.PhysicsConverter(self)
self.light = rtlight.LightConverter(self)
self.animation = animation.AnimationConverter(self)
self.output = outfile.OutputFiles(self, self._op.filepath)
self.camera = camera.CameraConverter(self)
self.image = image.ImageCache(self)
self.locman = locman.LocalizationConverter(self)
self.decal = decal.DecalConverter(self)
self.oven = etlight.LightBaker(mesh=self.mesh, report=self.report)
# Step 0.8: Init the progress mgr
self.mesh.add_progress_presteps(self.report)
self.report.progress_add_step("Collecting Objects")
self.report.progress_add_step("Verify Competence")
self.report.progress_add_step("Touching the Intangible")
self.report.progress_add_step("Harvesting Actors")
if self._op.lighting_method != "skip":
etlight.LightBaker.add_progress_steps(self.report)
self.report.progress_add_step("Exporting Scene Objects")
self.report.progress_add_step("Exporting Logic Nodes")
self.report.progress_add_step("Finalizing Plasma Logic")
self.report.progress_add_step("Handling Snakes")
self.report.progress_add_step("Exporting Textures")
self.report.progress_add_step("Composing Geometry")
self.report.progress_add_step("Saving Age Files")
self.report.progress_start("EXPORTING AGE")
# Step 0.9: Apply modifiers to all meshes temporarily.
with self.mesh:
# Step 1: Create the age info and the pages
self._export_age_info()
# Step 2: Gather a list of objects that we need to export, given what the user has told
# us to export (both in the Age and Object Properties)... fun
self._collect_objects()
# Step 2.1: Run through all the objects we collected in Step 2 and make sure there
# is no ruddy funny business going on.
self._check_sanity()
# Step 2.2: Run through all the objects again and ask them to "pre_export" themselves.
# In other words, generate any ephemeral Blender objects that need to be exported.
self._pre_export_scene_objects()
# Step 2.5: Run through all the objects we collected in Step 2 and see if any relationships
# that the artist made requires something to have | a CoordinateInterface
self._harvest_actors()
# S | tep 2.9: It is assumed that static lighting is available for the mesh exporter.
# Indeed, in PyPRP it was a manual step. So... BAKE NAO!
self._bake_static_lighting()
# Step 3: Export all the things!
self._export_scene_objects()
# Step 3.1: Ensure referenced logic node trees are exported
self._export_referenced_node_trees()
# Step 3.2: Now that all Plasma Objects (save Mipmaps) are exported, we do any post
# processing that needs to inspect those objects
self._post_process_scene_objects()
# Step 3.3: Ensure any helper Python files are packed
self._pack_ancillary_python()
# Step 4: Finalize...
self.mesh.material.finalize()
self.mesh.finalize()
# Step 5: FINALLY. Let's write the PRPs and crap.
self._save_age()
# Step 5.1: Save out the export report.
# If the export fails and this doesn't save, we have bigger problems than
# these little warnings and notices.
self.report.progress_end()
self.report.save()
# Step 5.2: If any nonfatal errors were encountered during the export, we will
# raise them here, now that everything is finished, to draw attention
# to whatever the problem might be.
self.report.raise_errors()
def _bake_static_lighting(self):
if self._op.lighting_method != "skip":
self.oven.bake_static_lighting(self._objects)
def _collect_objects(self):
scene = bpy.context.scene
self.report.progress_advance()
self.report.progress_range = len(scene.objects)
inc_progress = self.report.progress_increment
# Grab a naive listing of enabled pages
age = scene.world.plasma_age
pages_enabled = frozenset((page.name for page in age.pages if page.enabled and self._op.version in page.version))
all_pages = frozenset((page.name for page in age.pages))
# Because we can have an unnamed or a named default page, we need to see if that is enabled...
for page in age.pages:
if page.seq_suffix == 0:
default_enabled = page.enabled
default_inited = True
break
else:
default_enabled = True
default_inited = False
# Now we loop through the objects with some considerations:
# - The default page may or may not be defined. If it is, it can be disabled. If not, it
# can only ever be enabled.
# - Don't create the Default page unless it is used (implicit or explicit). It is a failure
# to export a useless file.
# - Any arbitrary page can be disabled, so check our frozenset.
# - Also, someone might have specified an invalid page, so keep track of that.
error = explosions.UndefinedPageError()
for obj in scene.objects:
if obj.plasma_object.enabled:
page = obj.plasma_object.page
if not page and not default_inited:
self.mgr.create_page(self.age_name, "Default", 0)
default_inited = True
if (default_enabled and not page) or (page in pages_enabled):
self._objects.append(obj)
elif page not in all_pages:
error.add(page, obj.name)
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/splunk_aoblib/setup_util.py | Python | isc | 13,234 | 0.001662 | from builtins import str
from builtins import range
from builtins import object
import json
import os
import solnlib.utils as utils
from splunktaucclib.global_config import GlobalConfig, GlobalConfigSchema
'''
Usage Examples:
setup_util = Setup_Util(uri, session_key)
setup_util.get_log_level()
setup_util.get_proxy_settings()
setup_util.get_credential_account("my_account_name")
setup_util.get_customized_setting("my_customized_field_name")
'''
'''
setting object structure.
It is stored in self.__cached_global_settings
Note, this structure is only maintained in this util.
setup_util transforms global settings in os environment or from ucc into this structure.
{
"proxy_settings": {
"proxy_enabled": False/True,
"proxy_url": "example.com",
"proxy_port": "1234",
"proxy_username": "",
"proxy_password": "",
"proxy_type": "http",
"proxy_rdns": False/True
},
"log_settings": {
"loglevel": "DEBUG"
},
"credential_settings": [{
"name": "account_id",
"username": "example_account",
"password": "example_password"
}, { # supported by ucc, not seen any usage in AoB
"api_key": "admin",
"api_uuid": "admin",
"endpoint": "some url",
"name": "account1"
}],
"customized_settings": {
"text_name": "content",
"pass_name": "password",
"checkbox": 0/1
}
}
'''
GLOBAL_SETTING_KEY = "global_settings"
AOB_TEST_FLAG = 'AOB_TEST'
PROXY_SETTINGS = "proxy_settings"
LOG_SETTINGS = "log_settings"
CREDENTIAL_SETTINGS = "credential_settings"
CUSTOMIZED_SETTINGS = "customized_settings"
UCC_PROXY = "proxy"
UCC_LOGGING = "logging"
UCC_CUSTOMIZED = "additional_parameters"
UCC_CREDENTIAL = "account"
CONFIGS = [CREDENTIAL_SETTINGS]
SETTINGS = [PROXY_SETTINGS, LOG_SETTINGS, CUSTOMIZED_SETTINGS]
PROXY_ENABLE_KEY = 'proxy_enabled'
PROXY_RDNS_KEY = 'proxy_rdns'
LOG_LEVEL_KEY = 'loglevel'
LOG_LEVEL_KEY_ENV = 'log_level'
TYPE_CHECKBOX = "checkbox"
ALL_SETTING_TYPES = ['text', 'password', 'checkbox', 'dropdownlist', 'm | ulti_dropdownlist', 'radiogroup']
def get_schema_path():
dirname = os.path.dirname
basedir = dirname(dirname(dirname(dirname((dirname(__file__))))))
return os.path.join(basedir, 'appserver', 'static', 'js', 'build', 'globalConfig.json')
class Setup_Util(object):
def __init__(self, uri, session_key, logger=None):
| self.__uri = uri
self.__session_key = session_key
self.__logger = logger
self.scheme, self.host, self.port = utils.extract_http_scheme_host_port(
self.__uri)
self.__cached_global_settings = {}
self.__global_config = None
def init_global_config(self):
if self.__global_config is not None:
return
schema_file = get_schema_path()
if not os.path.isfile(schema_file):
self.log_error("Global config JSON file not found!")
self.__global_config = None
else:
with open(get_schema_path()) as f:
json_schema = ''.join([l for l in f])
self.__global_config = GlobalConfig(self.__uri, self.__session_key,
GlobalConfigSchema(json.loads(json_schema)))
def log_error(self, msg):
if self.__logger:
self.__logger.error(msg)
def log_info(self, msg):
if self.__logger:
self.__logger.info(msg)
def log_debug(self, msg):
if self.__logger:
self.__logger.debug(msg)
def _parse_conf(self, key):
if os.environ.get(AOB_TEST_FLAG, 'false') == 'true':
global_settings = self._parse_conf_from_env(json.loads(os.environ.get(GLOBAL_SETTING_KEY, '{}')))
return global_settings.get(key)
else:
return self._parse_conf_from_global_config(key)
def _parse_conf_from_env(self, global_settings):
'''
this is run in test env
'''
if not self.__cached_global_settings:
# format the settings, the setting from env is from global_setting
# meta
self.__cached_global_settings = {}
for s_k, s_v in list(global_settings.items()):
if s_k == PROXY_SETTINGS:
proxy_enabled = s_v.get(PROXY_ENABLE_KEY)
proxy_rdns = s_v.get(PROXY_RDNS_KEY)
if type(proxy_enabled) != bool:
s_v[PROXY_ENABLE_KEY] = utils.is_true(proxy_enabled)
if type(proxy_rdns) != bool:
s_v[PROXY_RDNS_KEY] = utils.is_true(proxy_rdns)
self.__cached_global_settings[PROXY_SETTINGS] = s_v
elif s_k == LOG_SETTINGS:
self.__cached_global_settings[LOG_SETTINGS] = {
LOG_LEVEL_KEY: s_v.get(LOG_LEVEL_KEY_ENV)
}
elif s_k == CREDENTIAL_SETTINGS:
# add account id to accounts
for i in range(0, len(s_v)):
s_v[i]['name'] = 'account' + str(i)
self.__cached_global_settings[CREDENTIAL_SETTINGS] = s_v
else: # should be customized settings
self.__cached_global_settings[CUSTOMIZED_SETTINGS] = {}
for s in s_v:
field_type = s.get('type')
if not field_type:
self.log_error(
'unknown type for customized var:{}'.format(s))
continue
self.__cached_global_settings['customized_settings'][s.get('name', '')] = self._transform(
s.get("value", ""), field_type)
return self.__cached_global_settings
def _parse_conf_from_global_config(self, key):
if self.__cached_global_settings and key in self.__cached_global_settings:
return self.__cached_global_settings.get(key)
self.init_global_config()
if self.__global_config is None:
return None
if key in CONFIGS:
accounts = self.__global_config.configs.load().get(UCC_CREDENTIAL, [])
if accounts:
for account in accounts:
if 'disabled' in account:
del account['disabled']
self.__cached_global_settings[CREDENTIAL_SETTINGS] = accounts
elif key in SETTINGS:
settings = self.__global_config.settings.load()
self.__cached_global_settings.update({UCC_PROXY: None, UCC_LOGGING: None, UCC_CUSTOMIZED: None})
customized_setting = {}
for setting in settings.get('settings', []):
# filter out disabled setting page and 'disabled' field
if setting.get('disabled', False):
continue
if setting['name'] == UCC_LOGGING:
self.__cached_global_settings[LOG_SETTINGS] = {
LOG_LEVEL_KEY: setting.get(LOG_LEVEL_KEY)
}
elif setting['name'] == UCC_PROXY:
if 'disabled' in setting:
del setting['disabled']
setting[PROXY_ENABLE_KEY] = utils.is_true(setting.get(PROXY_ENABLE_KEY, '0'))
setting[PROXY_RDNS_KEY] = utils.is_true(setting.get(PROXY_RDNS_KEY, '0'))
self.__cached_global_settings[PROXY_SETTINGS] = setting
else: # should be customized settings
if 'disabled' in setting:
del setting['disabled']
customized_setting.update(setting)
self.__cached_global_settings[CUSTOMIZED_SETTINGS] = customized_setting
return self.__cached_global_settings.get(key)
def get_log_level(self):
log_level = "INFO"
log_settings = self._parse_conf(LOG_SETTINGS)
if log_settings is None:
self.log_info("Log level is not set, use default INFO")
else:
log_level = log_settings.get(LOG_LEVEL_KEY, None)
if not log_level:
|
brandond/ansible | lib/ansible/modules/storage/hpe3par/ss_3par_cpg.py | Python | gpl-3.0 | 9,304 | 0.000967 | #!/usr/bin/python
# Copyright: (c) 2018, Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
short_description: Manage HPE StoreServ 3PAR CPG
author:
- Farhan Nomani (@farhan7500)
- Gautham P Hegde (@gautamphegde)
description:
- Create and delete CPG on HPE 3PAR.
module: ss_3par_cpg
options:
cpg_name:
description:
- Name of the CPG.
required: true
disk_type:
choices:
- FC
- NL
- SSD
description:
- Specifies that physical disks must have the specified device type.
domain:
description:
- Specifies the name of the domain in which the object will reside.
growth_increment:
description:
- Specifies the growth increment(in MiB, GiB or TiB) the amount of logical disk storage
created on each auto-grow operation.
growth_limit:
description:
- Specifies that the autogrow operation is limited to the specified
storage amount that sets the growth limit(in MiB, GiB or TiB).
growth_warning:
description:
- Specifies that the threshold(in MiB, GiB or TiB) of used logical disk space when exceeded
results in a warning alert.
high_availability:
choices:
- PORT
- CAGE
- MAG
description:
- Specifies that the layout must support the failure of one port pair,
one cage, or one magazine.
raid_type:
choices:
- R0
- R1
- R5
- R6
description:
- Specifies the RAID type for the logical disk.
set_size:
description:
- Specifies the set size in the number of chunklets.
state:
choices:
- present
- absent
description:
- Whether the specified CPG should exist or not.
required: true
secure:
description:
- Specifies whether the certificate needs to be validated while communicating.
type: bool
default: no
extends_documentation_fragment: hpe3par
version_added: 2.8
'''
EXAMPLES = r'''
- name: Create CPG sample_cpg
ss_3par_cpg:
storage_system_ip: 10.10.10.1
storage_system_username: username
storage_system_password: password
state: present
cpg_name: sample_cpg
domain: sample_domain
growth_increment: 32000 MiB
growth_limit: 64000 MiB
growth_warning: 48000 MiB
raid_type: R6
set_size: 8
high_availability: MAG
disk_type: FC
secure: no
- name: Delete CPG sample_cpg
ss_3par_cpg:
storage_system_ip: 10.10.10.1
storage_system_username: username
storage_system_password: password
state: absent
cpg_name: sample_cpg
secure: no
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.storage.hpe3par import hpe3par
try:
from hpe3par_sdk import client
from hpe3parclient import exceptions
HAS_3PARCLIENT = True
except ImportError:
HAS_3PARCLIENT = False
def validate_set_size(raid_type, set_size):
if raid_type:
set_size_array = client.HPE3ParClient.RAID_MAP[raid_type]['set_sizes']
if set_size in set_size_array:
return True
return False
def cpg_ldlayout_map(ldlayout_dict):
if ldlayout_dict['RAIDType'] is not None and ldlayout_dict['RAIDType']:
ldlayout_dict['RAIDType'] = client.HPE3ParClient.RAID_MAP[
ldlayout_dict['RAIDType']]['raid_value']
if ldlayout_dict['HA'] is not None and ldlayout_dict['HA']:
ldlayout_dict['HA'] = getattr(
client.HPE3ParClient, ldlayout_dict['HA'])
return ldlayout_dict
def create_cpg(
client_obj,
cpg_name,
domain,
growth_increment,
growth_limit,
growth_warning,
raid_type,
set_size,
high_availability,
disk_type):
try:
if not validate_set_size(raid_type, set_size):
return (False, False, "Set size %s not part of RAID set %s" % (set_size, raid_type))
if not client_obj.cpgExists(cpg_name):
ld_layout = dict()
disk_patterns = []
if disk_type:
disk_type = getattr(client.HPE3ParClient, disk_type)
disk_patterns = [{'diskType': disk_type}]
ld_layout = {
'RAIDType': raid_type,
'setSize': set_size,
'HA': high_availability,
'diskPatterns': disk_patterns}
ld_layout = cpg_ldlayout_map(ld_layout)
if growth_increment is not None:
growth_increment = hpe3par.convert_to_binary_multiple(
growth_increment)
if growth_limit is not None:
growth_limit = hpe3par.convert_to_binary_multiple(
growth_limit)
if growth_warning is not None:
growth_warning = hpe3par.convert_to_binary_multiple(
growth_warning)
optional = {
'domain': domain,
' | growthIncrementMiB': growth_increment,
'growthLimitMiB': growth_limit,
'usedLDWarningA | lertMiB': growth_warning,
'LDLayout': ld_layout}
client_obj.createCPG(cpg_name, optional)
else:
return (True, False, "CPG already present")
except exceptions.ClientException as e:
return (False, False, "CPG creation failed | %s" % (e))
return (True, True, "Created CPG %s successfully." % cpg_name)
def delete_cpg(
client_obj,
cpg_name):
try:
if client_obj.cpgExists(cpg_name):
client_obj.deleteCPG(cpg_name)
else:
return (True, False, "CPG does not exist")
except exceptions.ClientException as e:
return (False, False, "CPG delete failed | %s" % e)
return (True, True, "Deleted CPG %s successfully." % cpg_name)
def main():
module = AnsibleModule(argument_spec=hpe3par.cpg_argument_spec(),
required_together=[['raid_type', 'set_size']])
if not HAS_3PARCLIENT:
module.fail_json(msg='the python hpe3par_sdk library is required (https://pypi.org/project/hpe3par_sdk)')
if len(module.params["cpg_name"]) < 1 or len(module.params["cpg_name"]) > 31:
module.fail_json(msg="CPG name must be at least 1 character and not more than 31 characters")
storage_system_ip = module.params["storage_system_ip"]
storage_system_username = module.params["storage_system_username"]
storage_system_password = module.params["storage_system_password"]
cpg_name = module.params["cpg_name"]
domain = module.params["domain"]
growth_increment = module.params["growth_increment"]
growth_limit = module.params["growth_limit"]
growth_warning = module.params["growth_warning"]
raid_type = module.params["raid_type"]
set_size = module.params["set_size"]
high_availability = module.params["high_availability"]
disk_type = module.params["disk_type"]
secure = module.params["secure"]
wsapi_url = 'https://%s:8080/api/v1' % storage_system_ip
try:
client_obj = client.HPE3ParClient(wsapi_url, secure)
except exceptions.SSLCertFailed:
module.fail_json(msg="SSL Certificate Failed")
except exceptions.ConnectionError:
module.fail_json(msg="Connection Error")
except exceptions.UnsupportedVersion:
module.fail_json(msg="Unsupported WSAPI version")
except Exception as e:
module.fail_json(msg="Initializing client failed. %s" % e)
if storage_system_username is None or storage_system_password is None:
module.fail_json(msg="Storage system username or password is None")
if cpg_name is None:
module.fail_json(msg="CPG Name is None")
# States
if module.params["state"] == "present":
try:
cl |
christianbrodbeck/nipype | nipype/pipeline/plugins/dagman.py | Python | bsd-3-clause | 4,865 | 0.001028 | """Parallel workflow execution via Condor DAGMan
"""
import os
import sys
from .base import (GraphPluginBase, logger)
from ...interfaces.base import CommandLine
class CondorDAGManPlugin(GraphPluginBase):
"""Execute using Condor DAGMan
The plugin_args input to run can be used to control the DAGMan execution.
Currently supported options are:
- template : submit spec template to use for job submission. The template
all generated submit specs are appended to this template. This
can be a str or a filename.
- submit_specs : additional submit specs that are appended to the generated
submit specs to allow for overriding or extending the defaults.
This can be a str or a filename.
- dagman_args : arguments to be prepended to the job execution script in the
dagman call
"""
# XXX feature wishlist
# - infer data file dependencies from jobs
# | - infer CPU requirements from jobs
# - | infer memory requirements from jobs
# - looks like right now all jobs come in here, regardless of whether they
# actually have to run. would be good to be able to decide whether they
# actually have to be scheduled (i.e. output already exist).
def __init__(self, **kwargs):
self._template = "universe = vanilla\nnotification = Never"
self._submit_specs = ""
self._dagman_args = ""
if 'plugin_args' in kwargs:
plugin_args = kwargs['plugin_args']
if 'template' in plugin_args:
self._template = plugin_args['template']
if os.path.isfile(self._template):
self._template = open(self._template).read()
if 'submit_specs' in plugin_args:
self._submit_specs = plugin_args['submit_specs']
if os.path.isfile(self._submit_specs):
self._submit_specs = open(self._submit_specs).read()
if 'dagman_args' in plugin_args:
self._dagman_args = plugin_args['dagman_args']
super(CondorDAGManPlugin, self).__init__(**kwargs)
def _submit_graph(self, pyfiles, dependencies):
# location of all scripts, place dagman output in here too
batch_dir, _ = os.path.split(pyfiles[0])
# DAG description filename
dagfilename = os.path.join(batch_dir, 'workflow.dag')
with open(dagfilename, 'wt') as dagfileptr:
# loop over all scripts, create submit files, and define them
# as jobs in the DAG
for idx, pyscript in enumerate(pyfiles):
# XXX redundant with previous value? or could it change between
# scripts?
batch_dir, name = os.path.split(pyscript)
name = '.'.join(name.split('.')[:-1])
submitspec = '\n'.join(
(self._template,
'executable = %s' % sys.executable,
'arguments = %s' % pyscript,
'output = %s' % os.path.join(batch_dir,
'%s.out' % name),
'error = %s' % os.path.join(batch_dir,
'%s.err' % name),
'log = %s' % os.path.join(batch_dir,
'%s.log' % name),
'getenv = True',
self._submit_specs,
'queue'
))
# write submit spec for this job
submitfile = os.path.join(batch_dir,
'%s.submit' % name)
with open(submitfile, 'wt') as submitfileprt:
submitfileprt.writelines(submitspec)
submitfileprt.close()
# define job in DAG
dagfileptr.write('JOB %i %s\n' % (idx, submitfile))
# define dependencies in DAG
for child in dependencies:
parents = dependencies[child]
if len(parents):
dagfileptr.write('PARENT %s CHILD %i\n'
% (' '.join([str(i) for i in parents]),
child))
# hand over DAG to condor_dagman
cmd = CommandLine('condor_submit_dag', environ=os.environ.data)
# needs -update_submit or re-running a workflow will fail
cmd.inputs.args = '-update_submit %s %s' % (dagfilename,
self._dagman_args)
cmd.run()
logger.info('submitted all jobs to Condor DAGMan')
|
CraigglesO/Ciphers | vigenere_plaintext_encrypt.py | Python | apache-2.0 | 3,166 | 0.026532 | #########################################################################################
#################### HOW TO USE ##########################
#########################################################################################
# This takes input from the terminal so run (in the proper cd): #
# 'python vigenere_plaintext_encrypt.py textFile.txt key' #
# Make sure the file is in the same folder as this script #
# You can also directly input the plain text: #
# 'python vigenere_plaintext_encrypt.py ThisIsPlainTextCamelCasing key' # |
# #
# so obviously the first variable is the plaintext with no spaces allowed #
# and the key is an arbitrary length you use to encode the words #
| # #
# #
# #
# For decrypting your code check the brother script 'vigenere_plaintext_decrypt.py' #
#########################################################################################
#########################################################################################
# Created by Craig O'Connor - Thursday, August 15, 2013 #
#########################################################################################
from sys import argv
script, plain_text, key = argv
plain_text_string = "%s" % plain_text
if ".txt" in plain_text_string:
with open(plain_text_string, 'r') as f:
plain_text_string = f.read()
plain_text_string = plain_text_string.lower()
key_string = "%s" % key
key_string = key_string.lower()
plain_text_num = []
key_num = []
encryption_val = []
encryption_char = ""
#Make sure the key length is long enough to convert the plaintext
while len(key_string) < len(plain_text_string):
key_string += key_string
#This is our value system using a dictionary for a table
num_char = { 0 : 'a', 1 : 'b', 2 : 'c', 3 : 'd', 4 : 'e', 5 : 'f', 6 : 'g', 7 : 'h', 8 : 'i',
9 : 'j', 10 : 'k', 11 : 'l', 12 : 'm', 13 : 'n', 14 : 'o', 15 : 'p', 16 : 'q',
17 : 'r', 18 : 's', 19 : 't', 20 : 'u', 21 : 'v', 22 : 'w', 23 : 'x', 24 : 'y',
25 : 'z' }
#lets convert the plain_text and key into there character values and place each value in its own compartment
for i, c in enumerate(plain_text_string):
for value, char in num_char.iteritems():
if char == c:
plain_text_num.append(value)
for i, c in enumerate(key_string):
for value, char in num_char.iteritems():
if char == c:
key_num.append(value)
#Create encryption values
for i in range(0,len(plain_text_num)):
#Cipher_value = (Message_value + Key_value) mod 26
encryption_val.append((plain_text_num[i] + key_num[i]) % 26)
#Finish up, turn those values into the proper characters:
for i in range(0,len(encryption_val)):
for value, char in num_char.iteritems():
if value == encryption_val[i]:
encryption_char += char
print (encryption_char)
with open('cipher_text.txt', 'w') as f:
f.write(encryption_char) |
OKThess/website | main/migrations/0044_okthessmeetup.py | Python | mit | 775 | 0.002581 | # -*- c | oding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-16 16:08
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0043_about'),
]
operations = [
migrations.CreateModel(
name='OkthessMeetup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=Fal | se, verbose_name='ID')),
('date', models.DateField()),
('time', models.TimeField()),
('title', models.TextField(blank=True, null=True)),
('agenda', ckeditor.fields.RichTextField(blank=True, null=True)),
],
),
]
|
jcugat/django-custom-user | src/custom_user/tests.py | Python | bsd-3-clause | 22,621 | 0.001371 | """EmailUser tests."""
import os
import re
from io import StringIO
from unittest import mock
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.core import mail, management
from django.forms.fields import Field
from django.http import HttpRequest, HttpResponse
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext as _
from .forms import EmailUserChangeForm, EmailUserCreationForm
class UserTest(TestCase):
user_email = "newuser@localhost.local"
user_password = "1234"
def create_user(self):
"""
Create and return a new user with self.user_email as login and
self.user_password as password.
"""
return get_user_model().objects.create_user(self.user_email, self.user_password)
def test_user_creation(self):
# Create a new user saving the time frame
right_now = timezone.now().replace(
microsecond=0
) # MySQL doesn't store microseconds
with mock.patch.object(timezone, "now", return_value=right_now):
self.create_user()
# Check user exists and email is correct
self.assertEqual(get_user_model().objects.all().count(), 1)
self.assertEqual(get_user_model().objects.all()[0].email, self.user_email)
# Check date_joined and last_login dates
self.assertEqual(get_user_model().objects.all()[0].date_joined, right_now)
self.assertEqual(get_user_model().objects.all()[0].last_login, right_now)
# Check flags
self.assertTrue(get_user_model().objects.all()[0].is_active)
self.assertFalse(get_user_model().objects.all()[0].is_staff)
self.assertFalse(get_user_model().objects.all()[0].is_superuser)
def test_user_get_full_name(self):
user = self.create_user()
self.assertEqual(user.get_full_name(), self.user_email)
def test_user_get_short_name(self):
user = self.create_user()
self.assertEqual(user.get_short_name(), self.user_email)
def test_email_user(self):
# Email definition
subject = "Email Subject"
message = "Email Message"
from_email = "from@normal.com"
user = self.create_user()
# Test that no message exists
self.assertEqual(len(mail.outbox), 0)
# Send test email
user.email_user(subject, message, from_email)
# Test that one message has been sent
self.assertEqual(len(mail.outbox), 1)
# Verify that the email is correct
self.assertEqual(mail.outbox[0].subject, subject)
self.assertEqual(mail.outbox[0].body, message)
self.assertEqual(mail.outbox[0].from_email, from_email)
self.assertEqual(mail.outbox[0].to, [user.email])
def test_email_user_kwargs(self):
# valid send_mail parameters
kwargs = {
"fail_silently": False,
"auth_user": None,
"auth_password": None,
"connection": None,
}
user = get_user_model()(email="foo@bar.com")
user.email_user(
subject="Subject here",
message="This is a message",
from_email="from@domain.com",
**kwargs
)
# Test that one message has been sent.
self.assertEqual(len(mail.outbox), 1)
# Verify that test email contains the correct attributes:
message = mail.outbox[0]
self.assertEqual(message.subject, "Subject here")
self.assertEqual(message.body, "This is a message")
self.assertEqual(message.from_email, "from@domain.com")
self.assertEqual(message.to, [user.email])
class UserManagerTest(TestCase):
def test_create_user(self):
email_lowercase = "normal@normal.com"
user = get_user_model().objects.create_user(email_lowercase)
self.assertEqual(user.email, email_lowercase)
self.assertFalse(user.has_usable_password())
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
def test_create_user_is_staff(self):
email_lowercase = "normal@normal.com"
user = get_user_model().objects.create_user(email_lowercase, is_staff=True)
self.assertEqual(user.email, email_lowercase)
self.assertFalse(user.has_usable_password())
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertFalse(user.is_superuser)
def test_create_superuser(self):
email_lowercase = "normal@normal.com"
password = "password1234$%& | /"
user = get_user_model().objects.create_superuser(email_lowercase, password)
self.assertEqual(user.email, email_lowercase)
self.assertTrue(user.check_password, password)
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
def test_create_super_user_raises_error_on_false_is_superuser(self):
with self.assertRaisesMessage(
ValueError, "Superuser | must have is_superuser=True."
):
get_user_model().objects.create_superuser(
email="test@test.com",
is_superuser=False,
)
def test_create_superuser_raises_error_on_false_is_staff(self):
with self.assertRaisesMessage(ValueError, "Superuser must have is_staff=True."):
get_user_model().objects.create_superuser(
email="test@test.com",
is_staff=False,
)
def test_user_creation_is_active(self):
# Create deactivated user
email_lowercase = "normal@normal.com"
password = "password1234$%&/"
user = get_user_model().objects.create_user(
email_lowercase, password, is_active=False
)
self.assertFalse(user.is_active)
def test_user_creation_is_staff(self):
# Create staff user
email_lowercase = "normal@normal.com"
password = "password1234$%&/"
user = get_user_model().objects.create_user(
email_lowercase, password, is_staff=True
)
self.assertTrue(user.is_staff)
def test_create_user_email_domain_normalize_rfc3696(self):
# According to https://tools.ietf.org/html/rfc3696#section-3
# the "@" symbol can be part of the local part of an email address
returned = get_user_model().objects.normalize_email(r"Abc\@DEF@EXAMPLE.com")
self.assertEqual(returned, r"Abc\@DEF@example.com")
def test_create_user_email_domain_normalize(self):
returned = get_user_model().objects.normalize_email("normal@DOMAIN.COM")
self.assertEqual(returned, "normal@domain.com")
def test_create_user_email_domain_normalize_with_whitespace(self):
returned = get_user_model().objects.normalize_email(
r"email\ with_whitespace@D.COM"
)
self.assertEqual(returned, r"email\ with_whitespace@d.com")
def test_empty_username(self):
self.assertRaisesMessage(
ValueError,
"The given email must be set",
get_user_model().objects.create_user,
email="",
)
class MigrationsTest(TestCase):
def test_makemigrations_no_changes(self):
with mock.patch("sys.stdout", new_callable=StringIO) as mocked:
management.call_command("makemigrations", "custom_user", dry_run=True)
self.assertEqual(
mocked.getvalue(), "No changes detected in app 'custom_user'\n"
)
class TestAuthenticationMiddleware(TestCase):
@classmethod
def setUpTestData(cls):
cls.user_email = "test@example.com"
cls.user_password = "test_password"
cls.user = get_user_model().objects.create_user(
cls.user_email, cls.user_password
)
def setUp(self):
self.middleware = AuthenticationMiddleware(lambda req: HttpResponse())
self.client.force_login(self.user)
self. |
fbradyirl/home-assistant | homeassistant/components/lovelace/__init__.py | Python | apache-2.0 | 6,719 | 0.000298 | """Support for the Lovelace UI."""
from functools import wraps
import logging
import os
import time
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.yaml import load_yaml
_LOGGER = logging.getLogger(__name__)
DOMAIN = "lovelace"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_MODE = "mode"
MODE_YAML = "yaml"
MODE_STORAGE = "storage"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_MODE, default=MODE_STORAGE): vol.All(
vol.Lower, vol.In([MODE_YAML, MODE_STORAGE])
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
EVENT_LOVELACE_UPDATED = "lovelace_u | pdated"
LOVELACE_CONFIG_FILE = "ui-lovelace.yaml"
WS_TYPE_GET_LOVELACE_UI = "lovelace/config"
WS_TYPE_SAVE_CONFIG = "lovelace/config/save"
SCHEMA_GET_LOVELACE_UI = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
| vol.Required("type"): WS_TYPE_GET_LOVELACE_UI,
vol.Optional("force", default=False): bool,
}
)
SCHEMA_SAVE_CONFIG = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_SAVE_CONFIG,
vol.Required("config"): vol.Any(str, dict),
}
)
class ConfigNotFound(HomeAssistantError):
"""When no config available."""
async def async_setup(hass, config):
"""Set up the Lovelace commands."""
# Pass in default to `get` because defaults not set if loaded as dep
mode = config.get(DOMAIN, {}).get(CONF_MODE, MODE_STORAGE)
hass.components.frontend.async_register_built_in_panel(
DOMAIN, config={"mode": mode}
)
if mode == MODE_YAML:
hass.data[DOMAIN] = LovelaceYAML(hass)
else:
hass.data[DOMAIN] = LovelaceStorage(hass)
hass.components.websocket_api.async_register_command(
WS_TYPE_GET_LOVELACE_UI, websocket_lovelace_config, SCHEMA_GET_LOVELACE_UI
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SAVE_CONFIG, websocket_lovelace_save_config, SCHEMA_SAVE_CONFIG
)
hass.components.system_health.async_register_info(DOMAIN, system_health_info)
return True
class LovelaceStorage:
"""Class to handle Storage based Lovelace config."""
def __init__(self, hass):
"""Initialize Lovelace config based on storage helper."""
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._data = None
self._hass = hass
async def async_get_info(self):
"""Return the YAML storage mode."""
if self._data is None:
await self._load()
if self._data["config"] is None:
return {"mode": "auto-gen"}
return _config_info("storage", self._data["config"])
async def async_load(self, force):
"""Load config."""
if self._data is None:
await self._load()
config = self._data["config"]
if config is None:
raise ConfigNotFound
return config
async def async_save(self, config):
"""Save config."""
if self._data is None:
await self._load()
self._data["config"] = config
self._hass.bus.async_fire(EVENT_LOVELACE_UPDATED)
await self._store.async_save(self._data)
async def _load(self):
"""Load the config."""
data = await self._store.async_load()
self._data = data if data else {"config": None}
class LovelaceYAML:
"""Class to handle YAML-based Lovelace config."""
def __init__(self, hass):
"""Initialize the YAML config."""
self.hass = hass
self._cache = None
async def async_get_info(self):
"""Return the YAML storage mode."""
try:
config = await self.async_load(False)
except ConfigNotFound:
return {
"mode": "yaml",
"error": "{} not found".format(
self.hass.config.path(LOVELACE_CONFIG_FILE)
),
}
return _config_info("yaml", config)
async def async_load(self, force):
"""Load config."""
is_updated, config = await self.hass.async_add_executor_job(
self._load_config, force
)
if is_updated:
self.hass.bus.async_fire(EVENT_LOVELACE_UPDATED)
return config
def _load_config(self, force):
"""Load the actual config."""
fname = self.hass.config.path(LOVELACE_CONFIG_FILE)
# Check for a cached version of the config
if not force and self._cache is not None:
config, last_update = self._cache
modtime = os.path.getmtime(fname)
if config and last_update > modtime:
return False, config
is_updated = self._cache is not None
try:
config = load_yaml(fname)
except FileNotFoundError:
raise ConfigNotFound from None
self._cache = (config, time.time())
return is_updated, config
async def async_save(self, config):
"""Save config."""
raise HomeAssistantError("Not supported")
def handle_yaml_errors(func):
"""Handle error with WebSocket calls."""
@wraps(func)
async def send_with_error_handling(hass, connection, msg):
error = None
try:
result = await func(hass, connection, msg)
except ConfigNotFound:
error = "config_not_found", "No config found."
except HomeAssistantError as err:
error = "error", str(err)
if error is not None:
connection.send_error(msg["id"], *error)
return
if msg is not None:
await connection.send_big_result(msg["id"], result)
else:
connection.send_result(msg["id"], result)
return send_with_error_handling
@websocket_api.async_response
@handle_yaml_errors
async def websocket_lovelace_config(hass, connection, msg):
"""Send Lovelace UI config over WebSocket configuration."""
return await hass.data[DOMAIN].async_load(msg["force"])
@websocket_api.async_response
@handle_yaml_errors
async def websocket_lovelace_save_config(hass, connection, msg):
"""Save Lovelace UI configuration."""
await hass.data[DOMAIN].async_save(msg["config"])
async def system_health_info(hass):
"""Get info for the info page."""
return await hass.data[DOMAIN].async_get_info()
def _config_info(mode, config):
"""Generate info about the config."""
return {
"mode": mode,
"resources": len(config.get("resources", [])),
"views": len(config.get("views", [])),
}
|
hicham-a/luna | test/test06.py | Python | gpl-3.0 | 308 | 0.016234 | from Luna import Luna
from bson import ObjectId
luna = Luna('luna',['cluster','nodes'])
print 1
print luna.find_obj('nodes', 'node005')
print 2
print luna.add_array_elem('nodes', ['node005', 'node006'], 'nodes1', ['a1', | 'a2', 'a3'] )
print 3
print luna.change_obj('nodes',['node001', 'node003'], {'zzz': 6})
| |
putcn/Paddle | python/paddle/fluid/layers/metric.py | Python | apache-2.0 | 2,939 | 0.000681 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to metric.
"""
import warnings
from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant
from ..framework import Variable
from ..param_attr import ParamAttr
import nn
__all__ = ['accuracy', 'auc']
def accuracy(input, label, k=1, correct=None, total=None):
"""
This function computes the accuracy using the input and label.
The output is the top k inputs and their indices.
"""
helper = LayerHelper("accuracy", **locals())
topk_out, topk_indices = nn.topk(input, k=k)
acc_out = helper.create_tmp_variable(dtype="float32")
if correct is None:
correct = helper.create_tmp_variable(dtype="int64")
if total is None:
total = helper.create_tmp_variable(dtype="int64")
he | lper.append_op(
type="accuracy",
in | puts={
"Out": [topk_out],
"Indices": [topk_indices],
"Label": [label]
},
outputs={
"Accuracy": [acc_out],
"Correct": [correct],
"Total": [total],
})
return acc_out
def auc(input, label, curve='ROC', num_thresholds=200):
warnings.warn(
"This interface not recommended, fluid.layers.auc compute the auc at every minibatch, \
but can not aggregate them and get the pass AUC, because pass \
auc can not be averaged with weighted from the minibatch auc value. \
Please use fluid.metrics.Auc, it can compute the auc value via Python natively, \
which can get every minibatch and every pass auc value.", Warning)
helper = LayerHelper("auc", **locals())
topk_out = helper.create_tmp_variable(dtype=input.dtype)
topk_indices = helper.create_tmp_variable(dtype="int64")
topk_out, topk_indices = nn.topk(input, k=k)
auc_out = helper.create_tmp_variable(dtype="float32")
if correct is None:
correct = helper.create_tmp_variable(dtype="int64")
if total is None:
total = helper.create_tmp_variable(dtype="int64")
helper.append_op(
type="accuracy",
inputs={
"Out": [topk_out],
"Indices": [topk_indices],
"Label": [label]
},
attrs={"curve": curve,
"num_thresholds": num_thresholds},
outputs={"AUC": [auc_out], })
return auc_out
|
bergonzzi/eracareers | era_scraper/eracareers/middlewares/elasticsearch_new.py | Python | apache-2.0 | 2,816 | 0.002486 | # -*- coding: utf-8 -*-
# Copyright 2014 Michael Malocha <michael@knockrentals.com>
#
# Expanded from the work by Julien Duponchelle <julien@duponchelle.info>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elastic Search Pipeline for scrappy expanded with support for multiple items"""
from pyes import ES
import hashlib
import types
import json
class ElasticSearchPipeline(object):
settings = None
es = None
@classmethod
def from_crawler(cls, crawler):
ext = cls()
ext.settings = crawler.settings
basic_auth = {}
if ext.settings['ELASTICSEARCH_USERNAME']:
basic_auth['username'] = ext.settings['ELASTICSEARCH_USERNAME']
if ext.settings['ELASTICSEARCH_PASSWORD']:
basic_auth['password'] = ext.settings['ELASTICSEARCH_PASSWORD']
if ext.settings['ELASTICSEARCH_PORT']:
uri = "%s:%d" % (ext.settings['ELASTICSEARCH_SERVER'], ext.settings['ELASTICSEARCH_PORT'])
else:
uri = "%s" % (ext.settings['ELASTICSEARCH_SERVER'])
if ext.settings['ELASTICSEARCH_MAPPING']:
mapping = json.loads(ext.settings['ELASTICSEARCH_MAPPING'])
ext.es = ES([uri], basic_auth=basic_auth)
return ext
def open_spider(self, spider):
def index_item(self, item, spider):
if self.settings.get('ELASTICSEARCH_UNIQ_KEY'):
uniq_key = self.settings.get('ELASTICSEARCH_UNIQ_KEY')
local_id = hashlib.sha1(item[uniq_key]).hexdigest()
spider.logger.info("Generated unique key %s", local_id)
op_type = 'index'
else:
op_type = 'create'
local_id = item['id']
self.es.index(dict(item),
self.settings.get('ELASTICSEARCH_INDEX'),
self.settings.get('ELASTICSEARCH_TYPE'),
id=local_id,
op_type=op_type)
def process_item(self, item, spider):
if isinstance(item, types.GeneratorType) or isinstance(item, types.ListType):
for | each in item:
self.process_item(each, spider)
else:
self.index_item(item, spider)
spider.logger.info("Item sent | to Elastic Search %s" % (self.settings.get('ELASTICSEARCH_INDEX')))
return item |
robhudson/warehouse | tests/test_serving.py | Python | apache-2.0 | 1,290 | 0 | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions an | d
# limitations under the Licens | e.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import pretend
from warehouse import serving
from warehouse.serving import WSGIRequestHandler
def test_request_handler_log(monkeypatch):
_log = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(serving, "_log", _log)
monkeypatch.setattr(WSGIRequestHandler, "__init__", lambda *a, **kw: None)
handler = WSGIRequestHandler()
handler.address_string = pretend.call_recorder(lambda: "127.0.0.1")
handler.log("info", "test message")
assert _log.calls == [pretend.call("info", "127.0.0.1 - test message\n")]
assert handler.address_string.calls == [pretend.call()]
|
intel-analytics/BigDL | python/ppml/test/bigdl/ppml/algorithms/test_hfl_logistic_regression.py | Python | apache-2.0 | 1,167 | 0 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in complia | nce with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or ag | reed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import numpy as np
from bigdl.ppml import FLServer
from bigdl.ppml.algorithms.fgboost_regression import FGBoostRegression
from bigdl.ppml.utils import init_fl_context
class TestHflLogisticRegression(unittest.TestCase):
def setUp(self) -> None:
self.fl_server = FLServer()
self.fl_server.build()
self.fl_server.start()
init_fl_context()
def tearDown(self) -> None:
self.fl_server.stop()
def test_dummy_data(self):
x, y = np.ones([2, 3]), np.ones([2])
if __name__ == '__main__':
unittest.main()
|
mikerjacobi/goji-skeleton | go.py | Python | apache-2.0 | 547 | 0.012797 | #!/usr/bin/python
| import os
container_name = "goji"
host_port = "8003"
cmd = "docker ps -a | grep %s"%container_name
container_exists = len(os.popen(cmd).read().split('\n')) > 1
print cmd
print "exists:", container_exists
if container_exists:
cmd = "go build && docker restart %s"%container_name
print cmd
os.system(cmd)
else:
cmd = "docker run -d -p %(port)s:80 -v %(cwd)s:/go/src --name %(name)s dev"%({
"port":host_port,
"cwd":os.getcwd(),
| "name":container_name
})
print cmd
os.system(cmd)
|
irares/WirelessHart-Gateway | NetworkManager/scripts/Python/ReviewOperations.py | Python | gpl-3.0 | 9,145 | 0.044505 | #!/usr/bin/python
#
# python module for detecting unconfirmed operations from
# nmOperations.log, packets and events in nm.log file
# complaints: radu marginean
#
import re
import os
_operations = {}
_packets = {}
_events = {}
def _PrintUnconfirmedOperations(nodeNickname_):
global _operations
tmpDict = {}
for (id, own) in _operations.keys():
(reason, line) = _operations[(id, own)]
timestamp = reason[:18]
tmpDict[(timestamp, own, id)] = (reason, line)
keys = tmpDict.keys ()
keys.sort()
print "\nList of unconfirmed operations: \n"
for (own, timestamp, id) in keys:
(reason, line) = tmpDict[(own, timestamp, id)]
if nodeNickname_ == '0' or nodeNickname_ == own:
print "Unconfirmed operation:"
print reason, line
print ''
def _PrintUnconfirmedPackets():
global _packets
tmpDict = {}
for handle in _packets.keys():
line = _packets[handle]
timestamp = line[:18]
if timestamp in tmpDict.keys():
tmpDict[timestamp] += line;
else:
tmpDict[timestamp] = line;
timestamps = tmpDict.keys();
timestamps.sort();
if len(timestamps) > 0:
print "Unconfirmed packets:"
for timestamp in timestamps:
print tmpDict[timestamp]
else:
print "No unconfirmed packets."
def _PrintUnconfirmedEvents():
global _events
tmpDict = {}
for evtId in _events.keys():
line = _events[evtId]
timestamp = line[:18]
if timestamp in tmpDict.keys():
tmpDict[timestamp] += line;
else:
tmpDict[timestamp] = line;
timestamps = tmpDict.keys();
timestamps.sort();
if len(timestamps) > 0:
print "Unconfirmed events:"
for timestamp in timestamps:
print tmpDict[timestamp]
else:
print "No unconfirmed events."
def _CheckOperations(logFile_, verbose_, nodeNickname_):
global _operations
log = open(logFile_, "r")
reason = ''
lineString = ''
for line in log:
if re.search("Reason: Write session F981, parent=F980", line) is not None:
print "Network manager was restarted at:", line[:19]
_PrintUnconfirmedOperations(nodeNickname_)
_operations.clear()
continue
if re.search("Reason:", line) is not None:
reason = line;
continue
lineString = line;
# extract id, owner and state
indx = line.find('id=')
if indx == -1:
continue
line = line[indx + 3:]
indx = line.find(',')
if indx == -1:
continue
id = re.sub(r"\s", "", line[:indx])
line = line[indx + 1:]
indx = line.find('own=')
if indx == -1:
continue
line = line[indx + 4:]
indx = line.find(',')
if indx == -1:
continue
own = re.sub(r"\s", "", line[:indx])
line = line[indx + 1:]
indx = line.find('state=')
if indx == -1:
continue
line = line[indx + 6:]
indx = line.find(',')
if indx == -1:
continue
state = re.sub(r"\s", "", line[:i | ndx])
#confirmed
if state == 'C':
if (id, own) in _operations.keys(): |
del _operations[(id, own)]
else:
if verbose_:
print "Operation confirmed without being listed as generated:"
print reason, lineString
elif state == 'G':
_operations[(id, own)] = (reason, lineString)
log.close()
def _CheckPackets(logFile_, verbose_):
global _packets
log = open(logFile_, "r")
for line in log:
lineString = line
if re.search("Start NetworkManager", line) is not None:
print "\nNetwork Manager restart detected. Unconfirmed packets so far:"
_PrintUnconfirmedPackets()
_packets.clear()
print "\nNew Network Manager session starts here:"
print line
continue
if re.search("logSendPacket()", line) is not None:
indx = line.find('handle=')
if indx == -1:
continue
line = line[indx + 7:]
indx = line.find(',')
if indx == -1:
continue
handle = re.sub(r"\s", "", line[:indx])
_packets[handle] = lineString
continue
if re.search("logConfPacket()", line) is not None:
indx = line.find('handle=')
if indx == -1:
continue
line = line[indx + 7:]
indx = line.find(',')
if indx == -1:
continue
handle = re.sub(r"\s", "", line[:indx])
if handle in _packets.keys():
del _packets[handle]
else:
if verbose_:
print "Packet was confirmed without being listed as sent:"
print lineString
pass
log.close()
def _CheckEvents(logFile_, verbose_):
global _events
log = open(logFile_, "r")
for line in log:
lineString = line
if re.search("Start NetworkManager", line) is not None:
print "\nNetwork Manager restart detected. Unconfirmed events so far:"
_PrintUnconfirmedEvents()
_events.clear()
print "\nNew Network Manager session starts here:"
print line
continue
if re.search("logNewEvent()", line) is not None:
indx = line.find('evId=')
if indx == -1:
continue
line = line[indx + 5:]
indx = line.find(';')
if indx == -1:
continue
evId = re.sub(r"\s", "", line[:indx])
_events[evId] = lineString
continue
if re.search("logConfEvent()", line) is not None:
indx = line.find('evId=')
if indx == -1:
continue
line = line[indx + 5:]
indx = line.find(';')
if indx == -1:
continue
evId = re.sub(r"\s", "", line[:indx])
if evId in _events.keys():
del _events[evId]
else:
if verbose_:
print "Event was confirmed without being listed as new:"
print lineString
log.close()
def ListUnconfirmedOperations(logsFolder_, verbose_, nodeNickname_ = '0'):
if not os.path.exists(logsFolder_):
print "Input folder does not exist. Exit."
exit(1)
if os.path.isdir(logsFolder_) == False:
print "Script takes as input a folder name."
exit(1)
files = []
#logs file may be ordered in two ways
# 1. in the reversed order of creation if log file names look like nm.log, nm.log.1, nm.log.2 etc
# 2. in the order of creation if log file names look like: nm.log.1_2010_11_27_09_04_52
logsOrderIsReversed = True
for file in os.listdir(logsFolder_):
if file == '.' or file == '..': continue
if file[:16] != 'nmOperations.log': continue
if logsOrderIsReversed and re.search( r'\d\d\d\d_\d\d_\d\d_\d\d_\d\d_\d\d', file) is not None:
logsOrderIsReversed = False
files.append(file)
files.sort()
if logsOrderIsReversed:
tmp = {}
for file in files:
indx = file.find('.log')
if indx == -1:
print "Unexpected log file name format. Exit."
exit(1)
if indx + 4 == len(file):
tmp[0] = file
else:
tmp[int(file[indx+5:])] = file
tmp.keys().sort()
files = [tmp[key] for key in tmp.keys()]
files.reverse()
for file in files:
_CheckOperations(logsFolder_ + '/' + file, verbose_, nodeNickname_)
_PrintUnconfirmedOperations(nodeNickname_)
def ListUnconfirmedPacktes(logsFolder_, verbose_):
if not os.path.exists(logsFolder_):
print "Input folder does not exist. Exit."
exit(1)
if os.path.isdir(logsFolder_) == False:
print "Script takes as input a folder name."
exit(1)
files = []
#logs file may be ordered in two ways
# 1. in the reversed order of creation if log file names look like nm.log, nm.log.1, nm.log.2 etc
# 2. in the order of creation if log file names look like: nm.log.1_2010_11_27_09_04_52
logsOrderIsReversed = True
for file in os.listdir(logsFolder_):
if file == '.' or file == '..': continue
if file[:6] != 'nm.log': continue
if logsOrderIsReversed and re.search( r'\d\d\d\d_\d\d_\d\d_\d\d_\d\d_\d\d', file) is not None:
logsOrderIsReversed = False
files.append(file)
files.sort()
if logsOrderIsReversed:
tmp = {}
for file in files:
indx = file.find('.log')
if indx == -1:
print "Unexpected log file name format. Exit."
exit(1)
if indx + 4 == len(file):
tmp[0] = file
else:
tmp[int(file[indx+5:])] = file
tmp.keys().sort()
files = [tmp[key] for key in tmp.keys()]
files.reverse()
for file in files:
_CheckPackets(logsFolder_ + '/' + file, verbose_)
_PrintUnconfirmedPackets();
def ListUnconfirmedEvents(logsFolder_, verbose_):
if not os.path.exists(logsFolder_):
print "Input folder does not exist. Exit."
exit(1)
if os.path.isdir(logsFolder_) == False:
print "Script takes as input a folder name. Exit."
exit(1)
files = []
#logs file may be |
nkiraly/ansible-sshjail | sshjail.py | Python | mit | 4,773 | 0.005447 | import distutils.spawn
import traceback
import os
import shutil
import subprocess
import re
import sys
from ansible import errors
from ansible.callbacks import vvv
import ansible.constants as C
from ansible.runner.connection_plugins.ssh import Connection as SSHConn
class Connection(object):
''' jail-over-ssh based connections '''
def match_jail(self):
if self.jid == None:
code, _, stdout, stderr = self._exec_command("jls -q jid name host.hostname path")
if code != 0:
vvv("JLS stdout: %s" % stdout)
raise errors.AnsibleError("jls returned non-zero!")
lines = stdout.strip().split('\n')
found = False
for line in lines:
if line.strip() == '':
break
jid, name, hostname, path = line.strip().split()
if name == self.jailspec or hostname == self.jailspec:
self.jid = jid
self.jname = name
self.jhost = hostname
self.jpath = path
found = True
break
if not found:
raise errors.AnsibleError("failed to find a jail with name or hostname of '%s'" % self.jailspec)
def get_jail_path(self):
self.match_jail()
return self.jpath
def get_jail_id(self):
self.match_jail()
return self.jid
def get_tmp_file(self):
code, _, stdout, stderr = self._exec_command('mktemp', '', None)
return stdout.strip().split('\n')[-1]
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
# my-jail@my.jailhost => my-jail is jail name/hostname, my.jailhost is jailhost hostname
self.host = host
self.jailspec, self.jailhost = host.split('@',1)
# piggyback off of the standard SSH connection
self.runner = runner
self.has_pipelining = False
self.ssh = SSHConn(runner, self.jailhost, port, user, password, private_key_file, *args)
# jail information loaded on first use by match_jail
self.jid = None
self.jname = None
self.jhost = None
self.jpath = None
def connect(self, port=None):
self.ssh.connect();
return self
# runs a command on the jailhost, rather than inside the jail
def _exec_command(self, cmd, tmp_path='', become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
return self.ssh.exec_command(cmd, tmp_path, become_user, sudoable, executable, in_data)
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
''' run a command in the jail '''
if executable:
cmd = ' '.join([executable, '-c', '"%s"' % cmd])
local_cmd = 'which -s jailme && jailme %s %s || j | exec %s %s' % (
self.get_jail_id(), cmd,
self.get_jail_id(), c | md
)
vvv("JAIL (%s) %s" % (become_user, local_cmd), host=self.host)
return self._exec_command(local_cmd, tmp_path, become_user, True, executable, in_data)
def _normalize_path(self, path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
''' transfer a file from local to remote jail '''
tmp = self.get_tmp_file()
self.ssh.put_file(in_path, tmp)
out_path = self._normalize_path(out_path, self.get_jail_path())
code, _, stdout, stderr = self._exec_command(' '.join(['chmod 0644',tmp]))
if code != 0:
raise errors.AnsibleError("failed to make temp file %s world readable:\n%s\n%s" % (tmp, stdout, stderr))
code, _, stdout, stderr = self._exec_command(' '.join(['cp',tmp,out_path]), '', self.runner.become_user, True)
if code != 0:
raise errors.AnsibleError("failed to move file from %s to %s:\n%s\n%s" % (tmp, out_path, stdout, stderr))
code, _, stdout, stderr = self._exec_command(' '.join(['rm',tmp]))
if code != 0:
raise errors.AnsibleError("failed to remove temp file %s:\n%s\n%s" % (tmp, stdout, stderr))
def fetch_file(self, in_path, out_path):
''' fetch a file from remote jail to local '''
tmp = self.get_tmp_file()
in_path = self._normalize_path(in_path, self.get_jail_path())
self._exec_command(' '.join(['mv',in_path,tmp]), '', self.juser, True)
self.ssh.fetch_file(tmp, out_path)
def close(self):
''' terminate the connection; nothing to do here '''
pass
|
realmarcin/data_api | data_loading/genome_loading/upload_assemblies.py | Python | mit | 17,445 | 0.012554 | #!/usr/bin/env python
# standard library imports
import os
import sys
import logging
import re
import hashlib
import time
import traceback
import os.path
# 3rd party imports
import simplejson
# KBase imports
import doekbase.Transform.script_utils as script_utils
import TextFileDecoder
import doekbase.workspace.client
# transformation method that can be called if this module is imported
# Note the logger has different levels it could be run.
# See: https://docs.python.org/2/library/logging.html#logging-levels
#
# The default level is set to INFO which includes everything except DEBUG
def transform(shock_service_url=None,
#handle_service_url=None,
#output_file_name=None,
input_fasta_directory=None,
#working_directory=None, shock_id=None, handle_id=None,
#input_mapping=None, fasta_reference_only=False,
wsname=None,
wsurl=None,
genome_list_file=None,
# taxon_wsname=None,
# taxon_names_file=None,
level=logging.INFO, logger=None):
"""
Uploads CondensedGenomeAssembly
Args:
shock_service_url: A url for the KBase SHOCK service.
input_fasta_directory: The directory where files will be read from.
level: Logging level, defaults to logging.INFO.
Returns:
JSON file on disk that can be saved as a KBase workspace object.
Authors:
Jason Baumohl, Matt Henderson
"""
if logger is None:
logger = script_utils.stderrlogger(__file__)
assembly_ws_client = doekbase.workspace.client.Workspace(wsurl)
assembly_workspace_object = assembly_ws_client.get_workspace_info({'workspace':wsname})
# taxon_ws_client = doekbase.workspace.client.Workspace(wsurl)
# taxon_workspace_object = ws_client.get_workspace_info({'workspace':taxon_wsname})
workspace_id = assembly_workspace_object[0]
workspace_name = assembly_workspace_object[1]
# #key scientific name, value is taxon object name (taxid_taxon)
# scientific_names_lookup = dict()
# taxon_names_file = taxon_names_file[0]
# if os.path.isfile(taxon_names_file):
# print "Found taxon_names_File"
# name_f = open(taxon_names_file, 'r')
# counter = 0
# for name_line in name_f:
# temp_list = re.split(r'\t*\|\t*', name_line)
# if temp_list[3] == "scientific name":
# scientific_names_lookup[temp_list[1]] = "%s_taxon" % (str(temp_list[0]))
# name_f.close()
genomes_list = list()
# genome_list_file = genome_list_file[0]
if os.path.isfile(genome_list_file):
print "Found Genome_list_File"
genomes_f = open(genome_list_file, 'r')
for genome_line in genomes_f:
temp_list = re.split(r'\n*', genome_line)
genomes_list.append(temp_list[0])
genomes_f.close()
logger.info("Starting conversion of FASTA to Assemblies")
token = os.environ.get('KB_AUTH_TOKEN')
# if input_mapping is None:
# logger.info("Scanning for FASTA files.")
# valid_extensions = [".fa",".fasta",".fna"]
# files = os.listdir(input_directory)
# fasta_files = [x for x in files if os.path.splitext(x)[-1] in valid_extensions]
# assert len(fasta_files) != 0
# logger.info("Found {0}".format(str(fasta_files)))
# input_file_name = os.path.join(input_directory,files[0])
# if len(fasta_files) > 1:
# logger.warning("Not sure how to handle multiple FASTA files in this context. Using {0}".format(input_file_name))
# else:
# input_file_name = os.path.join(os.path.join(input_directory, "FASTA.DNA.Assembly"), simplejson.loads(input_mapping)["FASTA.DNA.Assembly"])
for genome_id in genomes_list:
logger.info("Building Object.")
temp_genome_id = genome_id
temp_genome_id.replace("|","\|")
input_file_name = "%s/%s.fasta" % (input_fasta_directory,temp_genome_id)
if not os.path.isfile(input_file_name):
raise Exception("The input file name {0} is not a file!".format(input_file_name))
# if not os.path.isdir(args.working_directory):
# raise Exception("The working directory {0} is not a valid directory!".format(working_directory))
# logger.debug(fasta_reference_only)
input_file_handle = TextFileDecoder.open_textdecoder(input_file_name, 'ISO-8859-1')
# input_file_handle = open(input_file_name, 'r')
fasta_header = None
sequence_list = []
fasta_dict = dict()
first_header_found = False
contig_set_md5_list = []
# Pattern for replacing white space
pattern = re.compile(r'\s+')
sequence_exists = False
total_length = 0
gc_length = 0
#Note added X and x due to kb|g.1886.fasta
valid_chars = "-AaCcGgTtUuWwSsMmKkRrYyBbDdHhVvNnXx"
amino_acid_specific_characters = "PpLlIiFfQqEe"
sequence_start = 0
sequence_stop = 0
current_line = input_file_handle.readline()
# for current_line in input_file_handle:
while current_line != None and len(current_line) > 0:
# print "CURRENT LINE: " + current_line
if (current_line[0] == ">"):
# found a header line
# Wrap up previous fasta | sequence
if (not sequence_exists) and first_header_found:
logger.error("There is no sequence related to FASTA record : {0}".format(fasta_header))
raise Exception("There is no sequence related to FASTA record : {0}" | .format(fasta_header))
if not first_header_found:
first_header_found = True
# sequence_start = input_file_handle.tell()
sequence_start = 0
else:
sequence_stop = input_file_handle.tell() - len(current_line)
# build up sequence and remove all white space
total_sequence = ''.join(sequence_list)
total_sequence = re.sub(pattern, '', total_sequence)
if not total_sequence :
logger.error("There is no sequence related to FASTA record : {0}".format(fasta_header))
raise Exception("There is no sequence related to FASTA record : {0}".format(fasta_header))
for character in total_sequence:
if character not in valid_chars:
if character in amino_acid_specific_characters:
raise Exception("This fasta file may have amino acids in it instead of the required nucleotides.")
raise Exception("This FASTA file has non nucleic acid characters : {0}".format(character))
length = len(total_sequence)
total_length = total_length + length
contig_gc_length = len(re.findall('G|g|C|c',total_sequence))
contig_dict = dict()
contig_dict["gc_content"] = float(contig_gc_length)/float(length)
gc_length = gc_length + contig_gc_length
fasta_key = fasta_header.strip()
contig_dict["contig_id"] = fasta_key
contig_dict["length"] = length
contig_dict["name"] = fasta_key
contig_dict["description"] = "Note MD5 is generated from uppercasing the sequence"
contig_md5 = hashlib.md5(total_sequence.upper()).hexdigest()
contig_dict["md5"] = contig_md5
contig_set_md5_list.append(contig_md5)
contig_dict["is_circular"] = "unknown"
contig_dict["start_position"] = sequence_start
contig_dict["num_bytes"] = sequence_stop - sequence_start
# print "Sequence Start: " + str(sequence_start) + "Fasta: " + fasta_key
# print "Sequence Stop: " + str(sequenc |
zhendilin/django-block-ip | block_ip/models.py | Python | bsd-3-clause | 866 | 0.001155 | import ipcalc
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.cache import cache
from django.db.models.signals import post_save, post_delete
class BlockIP(models.Model):
network = models.CharField(_ | ('IP address or mask'), max_length=18)
reason_for_block = models.TextField(blank=True, null=True, help_text=_("Optional reason for block"))
def __unicode__(self):
return 'BlockIP: %s' % self.network
def get_network(self):
return ipcalc.Network(self.network)
class Meta:
verbose_name = _('IPs & masks to ban')
verbose_name_plural = _(' | IPs & masks to ban')
def _clear_cache(sender, instance, **kwargs):
cache.set('blockip:list', BlockIP.objects.all())
post_save.connect(_clear_cache, sender=BlockIP)
post_delete.connect(_clear_cache, sender=BlockIP)
|
Dai-trying/daixmms2client | connector.py | Python | gpl-3.0 | 1,747 | 0 |
# Copyright 2016 by Dai Trying
#
# This file is part of daixmms2client.
#
# daixmms2client is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# daixmms2client is distributed in | the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with daixmms2client. If not, see <http://www.gnu.org/licenses/>.
from PyQt5.QtCore import QObject, QSocketNotifier
class XMMSConnector(QObject):
def __init__(self, xmms):
QObject.__init__(self)
fd = xmms.get_fd()
self.xmms = xmms
self.xmms.set_need_out_fun(self.checkWrite)
self.rSock = QSocketNotifier(fd, QSocketNotifier.Read, self)
self.rSock.activated.connect(self.handleRead)
self.rSock.setEnabled(True)
self.wSock = QSocketNotifier(fd, QSocketNotifier.Write, self)
self.wSock.activated.connect(self.handleWrite)
self.wSock.setEnabled(False)
def checkWrite(self, i):
if self.xmms.want_ioout():
self.toggleWrite(True)
else:
self.toggleWrite(False)
def toggleRead(self, bool_val):
self.rSock.setEnabled(bool_val)
def toggleWrite(self, bool_val):
self.wSock.setEnabled(bool_val)
def handleRead(self, i):
self.xmms.ioin()
def handleWrite(self, i):
self.xmms.ioout()
|
uclouvain/OSIS-Louvain | learning_unit/api/serializers/learning_unit.py | Python | agpl-3.0 | 6,633 | 0.002413 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.conf import settings
from rest_framework import serializers
from base.models.enums.summary_status import SummaryStatus
from base.models.learning_unit_year import LearningUnitYear
from learning_unit.api.serializers.campus import LearningUnitCampusSerializer
from learning_unit.api.serializers.component import LearningUnitComponentSerializer
from learning_unit.api.serializers.utils import LearningUnitHyperlinkedIdentityField, \
LearningUnitHyperlinkedRelatedField
class LearningUnitTitleSerializer(serializers.ModelSerializer):
title = serializers.SerializerMethodField()
class Meta:
model = LearningUnitYear
fields = (
'title',
)
def get_title(self, learning_unit_year):
language = self.context['language']
return getattr(
learning_unit_year,
'full_title' + ('_' + language if language not in settings.LANGUAGE_CODE_FR else '')
)
class LearningUnitSerializer(LearningUnitTitleSerializer):
url = LearningUnitHyperlinkedIdentityField(read_only=True)
osis_url = serializers.HyperlinkedIdentityField(
view_name='learning_unit',
lookup_url_kwarg="learning_unit_year_id",
read_only=True
)
requirement_entity = serializers.CharField(
source='entity_requirement',
read_only=True
)
allocation_entity = serializers.CharField(
source='entity_allocation',
read_only=True
)
academic_year = serializers.IntegerField(source='academic_year.year')
type = serializers.CharField(source='learning_container_year.container_type')
type_text = serializers.CharField(source='get_container_type_display', read_only=True)
subtype_text = serializers.CharField(source='get_subtype_display', read_only=True)
has_proposal = serializers.SerializerMethodField()
class Meta(LearningUnitTitleSerializer.Meta):
model = LearningUnitYear
fields = LearningUnitTitleSerializer.Meta.fields + (
'url',
'osis_url',
'acronym',
'academic_year',
'credits',
'status',
'requirement_entity',
'allocation_entity',
'type',
'type_text',
'subtype',
'subtype_text',
'has_proposal',
)
def get_has_proposal(self, learning_unit_year):
return getattr(learning_unit_year, "has_proposal", None)
class LearningUnitDetailedSerializer(LearningUnitSerializer):
periodicity_text = serializers.CharField(source='get_periodicity_display', read_only=True)
quadrimester_text = serializers.CharField(source='get_quadrimester_display', read_only=True)
language = serializers.CharField(source='language.code', read_only=True)
team = serializers.BooleanField(source='learning_container_year.team', read_only=True)
campus = LearningUnitCampusSerializer(read_only=True)
components = LearningUnitComponentSerializer(many=True, source='learningcomponentyear_set', read_only=True)
parent = LearningUnitHyperlinkedRelatedField(read_only=True, lookup_field='acronym')
partims = LearningUnitHyperlinkedRelatedField(read_only=True, many=True, source='get_partims_related')
proposal = serializers.SerializerMethodField()
summary_status = serializers.SerializerMethodField()
remark = serializers.CharField(source='other_remark', read_only=True)
remark_en = serializers.CharField(source='other_remark_english', read_only=True)
class Meta(LearningUnitSerializer.Meta):
model = LearningUnitYear
fields = LearningUnitSerializer.Meta.fields + (
'quadrimester',
'quadrimester_text',
'periodicity',
'periodicity_text',
'campus',
'team',
'language',
'exchange_students',
'french_friendly',
'english_fr | iendly',
'components',
'parent',
'partims',
'proposal',
'summary_status',
'professional_integration',
'remark',
'remark_en' | ,
)
@staticmethod
def get_proposal(learning_unit_year):
if not hasattr(learning_unit_year, "proposallearningunit"):
return {}
return {
"folder": learning_unit_year.proposallearningunit.folder,
"type": learning_unit_year.proposallearningunit.get_type_display(),
"status": learning_unit_year.proposallearningunit.get_state_display(),
}
@staticmethod
def get_summary_status(learning_unit_year):
if getattr(learning_unit_year, "summary_status", False):
return SummaryStatus.MODIFIED.value
elif learning_unit_year.summary_locked:
return SummaryStatus.BLOCKED.value
return SummaryStatus.NOT_MODIFIED.value
class ExternalLearningUnitDetailedSerializer(LearningUnitDetailedSerializer):
local_url = serializers.CharField(source='externallearningunityear.url')
local_code = serializers.CharField(source='externallearningunityear.external_acronym')
class Meta(LearningUnitDetailedSerializer.Meta):
model = LearningUnitYear
fields = LearningUnitDetailedSerializer.Meta.fields + (
'local_code',
'local_url'
)
|
GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awxkit/awxkit/api/pages/settings.py | Python | apache-2.0 | 1,336 | 0 | from awxkit.api.resources import resources
from . import base
from . impor | t page
class Setting(base.Bas | e):
pass
page.register_page([resources.setting,
resources.settings_all,
resources.settings_authentication,
resources.settings_changed,
resources.settings_github,
resources.settings_github_org,
resources.settings_github_team,
resources.settings_google_oauth2,
resources.settings_jobs,
resources.settings_ldap,
resources.settings_radius,
resources.settings_saml,
resources.settings_system,
resources.settings_tacacsplus,
resources.settings_ui,
resources.settings_user,
resources.settings_user_defaults], Setting)
class Settings(page.PageList, Setting):
def get_endpoint(self, endpoint):
"""Helper method used to navigate to a specific settings endpoint.
(Pdb) settings_pg.get_endpoint('all')
"""
base_url = '{0}{1}/'.format(self.endpoint, endpoint)
return self.walk(base_url)
get_setting = get_endpoint
page.register_page(resources.settings, Settings)
|
ayepezv/GAD_ERP | addons/hr_holidays/tests/test_holidays_flow.py | Python | gpl-3.0 | 11,913 | 0.002434 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo.exceptions import AccessError, ValidationError
from odoo.tools import mute_logger, test_reports
from odoo.addons.hr_holidays.tests.common import TestHrHolidaysBase
class TestHolidaysFlow(TestHrHolidaysBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_leave_request_flow(self):
""" Testing leave request flow """
Holidays = self.env['hr.holidays']
HolidaysStatus = self.env['hr.holidays.status']
def _check_holidays_status(holiday_status, ml, lt, rl, vrl):
self.assertEqual(holiday_status.max_leaves, ml,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.leaves_taken, lt,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.remaining_leaves, rl,
'hr_holidays: wrong type days computation')
self.assertEqual(holiday_status.virtual_remaining_leaves, vrl,
'hr_holidays: wrong type days computation')
# HrUser creates some holiday statuses -> crash because only HrManagers should do this
with self.assertRaises(AccessError):
HolidaysStatus.sudo(self.user_hruser_id).create({
'name': 'UserCheats',
'limit': True,
})
# HrManager creates some holiday statuses
HolidayStatusManagerGroup = HolidaysStatus.sudo(self.user_hrmanager_id)
HolidayStatusManagerGroup.create({
'name': 'WithMeetingType',
'limit': True,
'categ_id': self.env['calendar.event.type'].sudo(self.user_hrmanager_id).create({'name': ' | NotLimitedMeetingType'}).id
})
self.holidays_status_1 = HolidayStatusManagerGroup.create({
'name': 'NotLimited',
'limit': True,
})
self.holidays_status_2 = HolidayStatusManagerGroup.create({
'name': 'Limited',
'limit': False,
'double_validation': True,
})
# --------------------------------------------------
# Case1: unlimited type of leave req | uest
# --------------------------------------------------
# Employee creates a leave request for another employee -> should crash
HolidaysEmployeeGroup = Holidays.sudo(self.user_employee_id)
with self.assertRaises(ValidationError):
HolidaysEmployeeGroup.create({
'name': 'Hol10',
'employee_id': self.employee_hruser_id,
'holiday_status_id': self.holidays_status_1.id,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
Holidays.search([('name', '=', 'Hol10')]).unlink()
# Employee creates a leave request in a no-limit category
hol1_employee_group = HolidaysEmployeeGroup.create({
'name': 'Hol11',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1.id,
'date_from': (datetime.today() - relativedelta(days=1)),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
hol1_user_group = hol1_employee_group.sudo(self.user_hruser_id)
self.assertEqual(hol1_user_group.state, 'confirm', 'hr_holidays: newly created leave request should be in confirm state')
# Employee validates its leave request -> should not work
hol1_employee_group.signal_workflow('validate')
self.assertEqual(hol1_user_group.state, 'confirm', 'hr_holidays: employee should not be able to validate its own leave request')
# HrUser validates the employee leave request
hol1_user_group.signal_workflow('validate')
self.assertEqual(hol1_user_group.state, 'validate', 'hr_holidays: validates leave request should be in validate state')
# --------------------------------------------------
# Case2: limited type of leave request
# --------------------------------------------------
# Employee creates a new leave request at the same time -> crash, avoid interlapping
with self.assertRaises(ValidationError):
HolidaysEmployeeGroup.create({
'name': 'Hol21',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_1.id,
'date_from': (datetime.today() - relativedelta(days=1)).strftime('%Y-%m-%d %H:%M'),
'date_to': datetime.today(),
'number_of_days_temp': 1,
})
# Employee creates a leave request in a limited category -> crash, not enough days left
with self.assertRaises(ValidationError):
HolidaysEmployeeGroup.create({
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2.id,
'date_from': (datetime.today() + relativedelta(days=0)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=1)),
'number_of_days_temp': 1,
})
# Clean transaction
Holidays.search([('name', 'in', ['Hol21', 'Hol22'])]).unlink()
# HrUser allocates some leaves to the employee
aloc1_user_group = Holidays.sudo(self.user_hruser_id).create({
'name': 'Days for limited category',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2.id,
'type': 'add',
'number_of_days_temp': 2,
})
# HrUser validates the allocation request
aloc1_user_group.signal_workflow('validate')
aloc1_user_group.signal_workflow('second_validate')
# Checks Employee has effectively some days left
hol_status_2_employee_group = self.holidays_status_2.sudo(self.user_employee_id)
_check_holidays_status(hol_status_2_employee_group, 2.0, 0.0, 2.0, 2.0)
# Employee creates a leave request in the limited category, now that he has some days left
hol2 = HolidaysEmployeeGroup.create({
'name': 'Hol22',
'employee_id': self.employee_emp_id,
'holiday_status_id': self.holidays_status_2.id,
'date_from': (datetime.today() + relativedelta(days=2)).strftime('%Y-%m-%d %H:%M'),
'date_to': (datetime.today() + relativedelta(days=3)),
'number_of_days_temp': 1,
})
hol2_user_group = hol2.sudo(self.user_hruser_id)
# Check left days: - 1 virtual remaining day
_check_holidays_status(hol_status_2_employee_group, 2.0, 0.0, 2.0, 1.0)
# HrUser validates the first step
hol2_user_group.signal_workflow('validate')
self.assertEqual(hol2.state, 'validate1',
'hr_holidays: first validation should lead to validate1 state')
# HrUser validates the second step
hol2_user_group.signal_workflow('second_validate')
self.assertEqual(hol2.state, 'validate',
'hr_holidays: second validation should lead to validate state')
# Check left days: - 1 day taken
_check_holidays_status(hol_status_2_employee_group, 2.0, 1.0, 1.0, 1.0)
# HrManager finds an error: he refuses the leave request
hol2.sudo(self.user_hrmanager_id).signal_workflow('refuse')
self.assertEqual(hol2.state, 'refuse',
'hr_holidays: refuse should lead to refuse state')
# Check left days: 2 days left again
_check_holidays_status(hol_status_2_employee_group, 2.0, 0.0, 2.0, 2.0)
# Annoyed, HrUser tries to fix its error and tries to reset the leave request -> does not work, only HrManager
hol2_user_group.signal_workflow('reset')
self.assertEqual(hol2.stat |
andreivasiliu2211/upm | examples/python/bmp280.py | Python | mit | 2,069 | 0.0058 | #!/usr/bin/python
# Author: Jon Trulson <jtrulson@ics.com>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Soft | ware"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the So | ftware is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time, sys, signal, atexit
import pyupm_bmp280 as sensorObj
# Instantiate a BMP280 instance using default i2c bus and address
sensor = sensorObj.BMP280()
# For SPI, bus 0, you would pass -1 as the address, and a valid pin for CS:
# BMP280(0, -1, 10);
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit
def exitHandler():
print "Exiting"
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while (1):
sensor.update()
print "Compensation Temperature:", sensor.getTemperature(), "C /",
print sensor.getTemperature(True), "F"
print "Pressure: ", sensor.getPressure(), "Pa"
print "Computed Altitude:", sensor.getAltitude(), "m"
print
time.sleep(1)
|
niliafsari/KSP-SN | Lbolcorr.py | Python | bsd-3-clause | 11,518 | 0.023615 | from astropy.io import fits
from astropy.wcs import WCS
import numpy as np
import matplotlib
import os
import glob
from findSN import *
from matplotlib.ticker import AutoMinorLocator
import sys
sys.path.insert(0, '/home/afsari/')
from SNAP2.Analysis import *
current_path=os.path.dirname(os.path.abspath(__file__))
matplotlib.rcParams['axes.linewidth'] = 1.5 #set the value globally
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['xtick.major.width'] = 2
matplotlib.rcParams['xtick.minor.size'] = 2
matplotlib.rcParams['xtick.minor.width'] = 1.5
matplotlib.rcParams['ytick.major.size'] = 5
matplotlib.rcParams['ytick.major.width'] = 2
matplotlib.rcParams['ytick.minor.size'] = 2
matplotlib.rcParams['ytick.minor.width'] = 1.5
matplotlib.rcParams.update({'font.size': 15})
sys.path.insert(0, '/home/afsari/SuperBoL-master/')
from superbol.luminosity import calc_Lbol
coef = {'B': 4.107, 'V': 2.682, 'I': 1.516, 'i': 1.698}
coef = {'B': 4.315, 'V': 3.315, 'I': 1.940, 'i': 2.086}
fluxes = np.array([4023.0* 1000000.0, 3562.0 * 1000000.0, 2282.0 * 1000000.0])
ebv=0.0
dis=5.6153658728e+26 #cm
sn_name="KSPN2188_v1"
magB = np.load("phot_csv/compiledSN_" + "B" + "_" + sn_name + ".npy")
magB = magB.astype(np.float)
sn1987a = np.genfromtxt ('phot_csv/1987a.csv', delimiter=",")
u_t = magB[:, 0]
u_t[u_t<370] = np.floor(u_t[u_t<370]*100)/100
u_t[u_t>370]=np.floor(u_t[u_t>370])
index=np.argsort(u_t)
u_t=u_t[index]
print u_t
u = magB[:, 3]
u=u[index]
u_e = magB[:, 4]
u_e=u_e[index]
u_app=magB[:, 1]
u_app_err=magB[:, 2]
u_app=u_app[index]
u_app_err=u_app_err[index]
magV = np.load("phot_csv/compiledSN_" + "V" + "_" + sn_name + ".npy")
magV = magV.astype(np.float)
v_t = magV[:, 0]
v_t[v_t<370] = np.floor(v_t[v_t<370]*100)/100
v_t[v_t>370] = np.floor(v_t[v_t>370])
index=np.argsort(v_t)
v_t=v_t[index]
v = magV[:, 3]
v=v[index]
v_app=magV[:, 1]
v_app_err=magV[:, 2]
v_app=v_app[index]
v_app_err=v_app_err[index]
v_e = magV[:, 4]
v_e=v_e[index]
magI = np.load("phot_csv/compiledSN_" + "I" + "_" + sn_name + ".npy")
magI = magI.astype(np.float)
i_t = magI[:, 0]
i_t[i_t<370] = np.floor(i_t[i_t<370]*100)/100
i_t[i_t>370] = np.floor(i_t[i_t>370])
index=np.argsort(i_t)
i_t=i_t[index]
i = magI[:, 3]
i_app=magI[:, 1]
i_app_err=magI[:, 2]
i_app=i_app[index]
i_app_err=i_app_err[index]
i=i[index]
i_e = magI[:, 4]
i_e=i_e[index]
bv_t = np.zeros(shape=(0, 1))
vv = np.zeros(shape=(0, 1))
ve = np.zeros(shape=(0, 1))
bv = np.zeros(shape=(0, 1))
bv_e = np.zeros(shape=(0, 1))
#B-V
for index,j in enumerate(u_t):
print j
if np.min(np.abs(v_t-j))<=0.1:
sub = np.argmin(np.abs(v_t - j))
bv_t=np.concatenate((bv_t,v_t[sub].reshape((1,1))))
bv=np.concatenate((bv,deredMag(u_app[index], ebv, coef["B"])-deredMag(v_app[sub].reshape((1,1)), ebv, coef["V"])))
vv=np.concatenate((vv,deredMag(v_app[sub].reshape((1,1)), ebv, coef["V"])))
ve = np.concatenate((ve, v_app_err[sub].reshape((1, 1))))
bv_e=np.concatenate((bv_e,np.sqrt(np.square(v_e[sub].reshape((1,1)))+np.square(u_e[index].reshape((1,1))))))
vi_t = np.zeros(shape=(0, 1))
vi = np.zeros(shape=(0, 1))
vi_e = np.zeros(shape=(0, 1))
Mv= np.zeros(shape=(0, 1))
Mi= np.zeros(shape=(0, 1))
Mv_err = np.zeros(shape=(0, 1))
vv = np.zeros(shape=(0, 1))
#V-I
for index,j in enumerate(v_t):
if (np.min(np.abs(i_t-j))<=1 and j>475) or (np.min(np.abs(i_t-j))<=0.1 and j<=475) :
sub = np.argmin(np.abs(i_t - j))
vi_t=np.concatenate((vi_t,i_t[sub].reshape((1,1))))
vi=np.concatenate((vi,v[index].reshape((1,1))-i[sub].reshape((1,1))))
Mv=np.concatenate((Mv,v[index].reshape((1,1))))
Mi=np.concatenate((Mi,i[sub].reshape((1,1))))
vv = np.concatenate((vv,v_app[index].reshape((1,1))))
ve = np.concatenate((ve, v_app_err[index].reshape((1, 1))))
Mv_err = np.concatenate((Mv_err, v_e[index].reshape((1,1))))
vi_e=np.concatenate((vi_e,np.sqrt(np.square(i_e[sub].reshape((1,1)))+np.square(v_e[index].reshape((1,1))))))
bi_t = np.zeros(shape=(0, 1))
bi = np.zeros(shape=(0, 1))
bi_e = np.zeros(shape=(0, 1))
Mb = np.zeros(shape=(0, 1))
Mb_err = np.zeros(shape=(0, 1))
#B-I
for index, j in enumerate(u_t):
if np.min(np.abs(i_t - j)) <= 0.1:
sub = np.argmin(np.abs(i_t - j))
bi_t = np.concatenate((bi_t, i_t[sub].reshape((1, 1))))
bi = np.concatenate((bi, u[index] - i[sub].reshape((1, 1))))
Mb = np.concatenate((Mb, u[index].reshape((1, 1))))
Mb_err = np.concatenate((Mb_err, u_e[index].reshape((1, 1))))
bi_e = np.concatenate(
(bi_e, np.sqrt(np.square(i_e[sub].reshape((1, 1))) + np.square(u_e[index].reshape((1, 1))))))
# bv_t_early=bv_t[bv_t<363]
# bv_early=bv[bv_t<363]
# vv_early=vv[bv_t<363]
# z = np.polyfit(bv_t_early, bv_early, 3)
# fit = np.poly1d(z)
# bv_new=fit(bv_t_early)
# z = np.polyfit(bv_t_early, vv_early, 3)
# fit = np.poly1d(z)
# vv_new=fit(bv_t_early)
# print bv_new, vv[bv_t<363]
# bv[bv_t<363]=bv_new
#
# length=np.shape(ve)
# lbol_bc=np.zeros(shape=(length[0],1))
# lbol_bc_err=np.zeros(shape=(length[0],1))
# for k,v_mag in enumerate(vv):
# lbol_bc[k], lbol_bc_err[k] = calc_Lbol(bv[5], bv_e[k],'BminusV', vv[k],ve[k], dis, 0)
# print bv[k], lbol_bc[k],v_mag, bv_t[k]
# ax=plt.subplot(111)
#
# plt.scatter(bv_t,lbol_bc,color='blue')
# plt.xlabel('Time [days]')
# plt.ylabel('L_{bol}[erg/s]')
#
# plt.tick_params(labelsize=20)
# plt.show()
length=np.shape(Mv)
lbol_bc=np.zeros(shape=(length[0],1))
Mbol=np.zeros(shape=(length[0],1))
lbol_bc_err=np.zeros(shape=(length[0],1))
Msun=4.74
Lsun=3.828e33
# for k, M_mag in enumerate(Mb):
# if (bi_t[k]>=363):
# Mbol[k] = M_mag + 0.004 - 0.297 * (bi[k]) - 0.149 * (np.square(bi[k]))
# else:
# Mbol[k] = M_mag - 0.473 + 0.830*bi[k] - 1.064*(np.square(bi[k]))
# print Mbol[k]
# lbol_bc[k]=Lsun*np.power(10,((Msun-Mbol[k])/2.5))
vi_t_late=vi_t[vi_t>=474]
Mv_late=Mv[vi_t>=474]
print np.shape(vv)
vv_late=vv[vi_t>=474]
print np.shape(vv_late), np.shape(Mv_late)
Mi_late=Mi[vi_t>=474]
z = np.polyfit(vi_t_late, Mv_late, 1)
fit = np.poly1d(z)
z1 = np.polyfit(vi_t_late, vv_late, 1)
fit1 = np.poly1d(z1)
zz = np.polyfit(vi_t_late, Mi_late, 1)
fit_1 = np.poly1d(zz)
Mv_new=fit(vi_t_late)
Mi_new=fit_1(vi_t_late)
vv_new=fit1(vi_t_late)
zz = np.polyfit(vi_t_late, Mv_new-Mi_new, 1)
fit_1 = np.poly1d(zz)
vi[vi_t>=474]=Mv_new-Mi_new
Mv[vi_t>=474]=Mv_new
Mi[vi_t>=474]=Mi_new
vv[vi_t>=474]=vv_new
print np.shape(Mv_new)
# ax=plt.subplot(111)
#
# plt.scatter(vi_t-vi_t[0],Mv,color='blue',label='M_V')
# plt.scatter(vi_t-vi_t[0],Mi,color='red',label='M_I')
# plt.scatter(vi_t_late-vi_t[0],Mv_new,color='black')
# plt.scatter(vi_t_late-vi_t[0],Mi_new,color='black')
# Mv[vi_t>=474]=Mv_new
# Mi[vi_t>=474]=Mi_new
# plt.xlabel('Time [days]')
# plt.ylabel('Absolute Mag')
# plt.gca().invert_yaxis()
# plt.tick_params(labelsize=20)
# plt.legend(loc='best',ncol=6, fancybox=True,fontsize=12)
# plt.show()
# z = np.polyfit(bv_t_early, vv_early, 3)
# fit = np.poly1d(z)
# vv_new=fit(bv_t_early)
# print bv_new, vv[bv_t<363]
# bv[bv_t<363]=bv_new
#Sn1987a
l1=41.55
l2=41.45
t1=0
t2=20
alpha_co=-1/111.26
beta_co=43
co=np.zeros(shape=(2,))
co[1]=beta_co
co[0]=alpha_co
fit_3 = np.poly1d(co)
alpha_ni=-1/6.1
beta_ni=44
ni=np.zeros(shape=(2,))
ni[1]=beta_ni
ni[0]=alpha_ni
fit_4 = np.poly1d(ni)
alpha=(l1-l2)/(t1-t2) |
beta=l2+(-alpha*t2)
zz[1]=beta
zz[0]=alpha
fit_2 = np.poly1d(zz)
# Mv=v
# vi_t=v_t
# vi_t_late=vi_t[vi_t>=474]
# Mv_late=Mv[vi_t>=474]
# z = np.polyfit(vi_t_late, Mv_late, 1)
# fit = np.poly1d(z)
# Mv_new=fit(vi_t_late)
# Mv[vi_t>=474]=Mv_new
print np.shape(Mv)
for k, M_mag in enumerate(Mv):
if (1):
#Lyman
if (vi[ | k]>0.4):
Mbol[k] = M_mag + 0.057 + 0.708 * (vi[k]) - 0.912 * (np.square(vi[k]))
else:
Mbol[k] = M_mag-0.61 + 2.244 * (vi[k]) - 2.107 * (np.square(vi[k]))
#V-R
# Mbol[k] = M_mag + 0.073 + 0.902 * (vi[k]) - 1.796 * (np.square(vi[k]))
# Mbol[k] = M_mag + 0.059 + 0.744 * (vi[k]) - 0.953 * (np.square(vi[k]))
#Hamuy
# Mbol[k] = M_mag-1.3555 + 6.262 * (vi[k]) - 2.676 * (np.square(vi[k])) -22.973*np.power(vi[k],3) +35.542 |
yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/synapse/manual/operations/accesscontrol.py | Python | mit | 11,030 | 0.004624 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError
from azure.cli.core.util import is_guid
from azure.graphrbac.models import GraphErrorException
from msrestazure.azure_exceptions import CloudError
from .._client_factory import cf_synapse_role_assignments, cf_synapse_role_definitions, cf_graph_client_factory
from ..constant import ITEM_NAME_MAPPING
# List Synapse Role Assignment
def list_role_assignments(cmd, workspace_name, role=None, assignee=None, assignee_object_id=None,
scope=None, item=None, item_type=None):
if bool(assignee) and bool(assignee_object_id):
raise ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID')
if bool(item) != bool(item_type):
raise ArgumentUsageError('usage error: --item-type STRING --item STRING')
return _list_role_assignments(cmd, workspace_name, role, assignee or assignee_object_id,
scope, resolve_assignee=(not assignee_object_id), item=item, item_type=item_type)
def _list_role_assignments(cmd, workspace_name, role=None, assignee=None, scope=None,
resolve_assignee=True, item=None, item_type=None):
"""Prepare scope, role ID and resolve object ID from Graph API."""
if any([scope, item, item_type]):
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_id = _resolve_role_id(cmd, role, workspace_name)
object_id = _resolve_object_id(cmd, assignee, fallback_to_object_id=True) if resolve_assignee else assignee
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
role_assignments = client.list_role_assignments(role_id, object_id, scope).value
return role_assignments
# Show Synapse Role Assignment By Id
def get_role_assignment_by_id(cmd, workspace_name, role_assignment_id):
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
return client.get_role_assignment_by_id(role_assignment_id)
# Delete Synapse Role Assignment
def delete_role_assignment(cmd, workspace_name, ids=None, assignee=None, assignee_object_id=None, role=None,
scope=None, item=None, item_type=None):
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
if not any([ids, assignee, assignee_object_id, role, scope, item, item_type]):
raise ArgumentUsageError('usage error: No argument are provided. --assignee STRING | --ids GUID')
if ids:
if any([assignee, assignee_object_id, role, scope, item, item_type]):
raise ArgumentUsageError('You should not provide --role or --assignee or --assignee_object_id '
'or --scope or --principal-type when --ids is provided.')
role_assignments = list_role_assignments(cmd, workspace_name, None, None, None, None, None, None)
assignment_id_list = [x.id for x in role_assignments]
# check role assignment id
for assignment_id in ids:
if assignment_id not in assignment_id_list:
raise ArgumentUsageError("role assignment id:'{}' doesn't exist.".format(assignment_id))
# delete when all ids check pass
for assignment_id in ids:
client.delete_role_assignment_by_id(assignment_id)
return
role_assignments = list_role_assignments(cmd, workspace_name, role, assignee, assignee_object_id,
scope, item, item_type)
if any([scope, item, item_type]):
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_assignments = [x for x in role_assignments if x.scope == scope]
if role_assignments:
for assignment in role_assignments:
client.delete_role_assignment_by_id(assignment.id)
else:
raise CLIError('No matched assignments were found to delete, please provide correct --role or --assignee.'
'Use `az synapse role assignment list` to get role assignments.')
def create_role_assignment(cmd, workspace_name, role, assignee=None, assignee_object_id=None,
scope=None, assignee_principal_type=None, item_type=None, item=None, assignment_id=None):
"""Check parameters are provided correctly, then call _create_role_assignment.""" |
if assignment_id and not is_guid(assig | nment_id):
raise InvalidArgumentValueError('usage error: --id GUID')
if bool(assignee) == bool(assignee_object_id):
raise ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID')
if assignee_principal_type and not assignee_object_id:
raise ArgumentUsageError('usage error: --assignee-object-id GUID [--assignee-principal-type]')
if bool(item) != bool(item_type):
raise ArgumentUsageError('usage error: --item-type STRING --item STRING')
try:
return _create_role_assignment(cmd, workspace_name, role, assignee or assignee_object_id, scope, item,
item_type, resolve_assignee=(not assignee_object_id),
assignee_principal_type=assignee_principal_type, assignment_id=assignment_id)
except Exception as ex: # pylint: disable=broad-except
if _error_caused_by_role_assignment_exists(ex): # for idempotent
return list_role_assignments(cmd, workspace_name, role=role,
assignee=assignee, assignee_object_id=assignee_object_id,
scope=scope, item=item, item_type=item_type)
raise
def _resolve_object_id(cmd, assignee, fallback_to_object_id=False):
if assignee is None:
return None
client = cf_graph_client_factory(cmd.cli_ctx)
result = None
try:
result = list(client.users.list(filter="userPrincipalName eq '{0}' or mail eq '{0}' or displayName eq '{0}'"
.format(assignee)))
if not result:
result = list(client.service_principals.list(filter="displayName eq '{}'".format(assignee)))
if not result:
result = list(client.groups.list(filter="mail eq '{}'".format(assignee)))
if not result and is_guid(assignee): # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("Cannot find user or group or service principal in graph database for '{assignee}'. "
"If the assignee is a principal id, make sure the corresponding principal is created "
"with 'az ad sp create --id {assignee}'.".format(assignee=assignee))
if len(result) > 1:
raise CLIError("Find more than one user or group or service principal in graph database for '{assignee}'. "
"Please using --assignee-object-id GUID to specify assignee accurately"
.format(assignee=assignee))
return result[0].object_id
except (CloudError, GraphErrorException):
if fallback_to_object_id and is_guid(assignee):
return assignee
raise
def _get_object_stubs(graph_client, assignees):
from azure.graphrbac.models import GetObjectsParameters
result = []
assignees = list(assignees) # callers could pass in a set
for i in range(0, len(assignees), 1000):
params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees[i:i + 1000])
result += list(graph_client.objects.get_objects_by_object_ids(params))
return result
def _error_caused_by_role_assignment_exists(ex):
return getattr(ex, 'status_code |
hhj0325/pystock | com/hhj/baihuabigdata/demo8.py | Python | apache-2.0 | 1,943 | 0.003603 | """
page
222
225
226
227
"""
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
import datetime
def print_result(name, docs, predicted, trains):
print(name)
for doc, category in zip(docs, predicted):
print('%r =>%s' % (doc, trains.target_names[category]))
print('----')
categories = ['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian']
twenty_train = fetch_20newsgroups(subset='train', categories=categories)
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform( | twenty_train.data)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
docs_new = ['God is love', 'OpenGl on the GPU is fast']
X_new_counts = count_vect.transform(docs_new)
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
print(datetime.datetime.now())
predicted1 = NearestCentroid().fit(X_train_tfidf, twenty_train.target).predict(X_new_tfidf)
print_result | ('NearestCentroid', docs_new, predicted1, twenty_train)
print(datetime.datetime.now())
predicted2 = MultinomialNB().fit(X_train_tfidf, twenty_train.target).predict(X_new_tfidf)
print_result('MultinomialNB', docs_new, predicted2, twenty_train)
print(datetime.datetime.now())
predicted3 = KNeighborsClassifier(15).fit(X_train_tfidf, twenty_train.target).predict(X_new_tfidf)
print_result('KNeighborsClassifier', docs_new, predicted3, twenty_train)
print(datetime.datetime.now())
predicted4 = svm.SVC(kernel='linear').fit(X_train_tfidf, twenty_train.target).predict(X_new_tfidf)
print_result('svm.SVC', docs_new, predicted4, twenty_train)
print(datetime.datetime.now())
|
flaviovdf/vodlibs | vod/test/test_entropy.py | Python | mit | 3,236 | 0.005253 | # -*- coding: utf8
from __future__ import print_function, division
from vod import entropy
import numpy as np
import math
import unittest
#Calculates the entropy iteratively.
def it_entropy(probs):
ent = 0.0
for prob in probs:
if prob == 0:
continue
ent -= prob * math.log(prob, 2)
return ent
class TestEntropy(unittest.TestCase):
def test_entropy(self):
probs = np.array([0.1, 0.5, 0.01, 0.07, 0.02, 0.3, 0, 0, 0], dtype='d')
self.assertEquals(entropy.entropy(probs), it_entropy(probs))
try:
entropy.entropy(np.array([-1], dtype='d'))
self.fail()
except AssertionError:
pass
try:
entropy.entropy(np.array([0.1, 0.8], dtype='d'))
self.fail()
except AssertionError:
pass
try:
entropy.entropy(np.array([2, -1], dtype='d'))
self.fail()
except AssertionError:
pass
try:
entropy.entropy(np.array([], dtype='d'))
self.fail()
except AssertionError:
pass
def test_norm_mi(self):
x_probs = np.array([0.04, 0.16] * 5)
xy_probs = np.array([0.02, 0.18] * 5)
h_x = it_entropy(x_probs)
h_y = it_entropy(xy_probs)
mutual_inf = 1 - (h_x - h_y)/h_x
self.assertEqual(entropy.norm_mutual_information(x_probs, xy_probs),
mutual_inf)
x_probs = np.array([1], dtype='d')
self.assertEqual(entropy.norm_mutual_information(x_probs, xy_probs), 0)
def test_mi(self):
x_probs = np.array([0.04, 0.16] * 5)
xy_probs = np.array([0.02, 0.18] * 5)
h_x = it_entropy(x_probs)
h_y = it_entropy(xy_probs)
mutual_inf = h_x - h_y
self.assertAlmostEqual(entropy.mutual_information(x_probs, xy_probs),
mutual_inf)
def test_kl(self):
x_probs = np.array([0.04, 0.16] * 5)
xy_probs = np.array([0.02, 0.18] * 5)
dkl = 0
for i in range(len(x_probs)):
div = x_probs[i] / xy_probs[i]
dkl += x_probs[i] * math.log(div, 2)
self.assertAlmostEqua | l(entropy.kullback_leiber_divergence(x_probs,
xy_probs),
dkl)
def test_kl2(self):
x_probs = np.array([0.04, 0.16] * 5 + [0])
xy_probs = np.array([0.02, 0.18] * 5 + [0])
dkl = 0
for i in range(len(x_probs) - 1):
di | v = x_probs[i] / xy_probs[i]
dkl += x_probs[i] * math.log(div, 2)
self.assertAlmostEqual(entropy.kullback_leiber_divergence(x_probs,
xy_probs),
dkl)
def test_kl3(self):
x_probs = np.array([0.25, 0.20, 0, 0.55])
xy_probs = np.array([0.20, 0, 0.25, 0.55])
self.assertAlmostEqual(entropy.kullback_leiber_divergence(x_probs,
xy_probs),
float('inf'))
|
haard/quarterapp | quarterapp/app.py | Python | mit | 3,685 | 0.012754 | #
# Copyright (c) 2013 Markus Eliasson, http://www.quarterapp.com/
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import datetime
import logging
import tornado.web
from tornado.options import options
from tornado.web import HTTPError
from basehandlers import *
from storage import *
from quarter_errors import *
from quarter_utils import *
class ActivityHandler(AuthenticatedHandler):
@authenticated_user
def get(self):
user_id = self.get_current_user_id()
activities = get_activities(self.application.db, user_id)
self.render(u"app/activities.html", activities = activities)
class SheetHandler(AuthenticatedHandler):
def default_sheet(self):
quarters = []
for i in range(0, 96):
quarters.append({ "id" : -1, "color" : "#fff", "border-color" : "#ccc"})
return quarters
@authenticated_user
def get(self, date = None):
user_id = self.get_current_user_id()
date_obj = None
today = datetime.date.today()
if date:
try:
parts = date.split("-")
if len(parts) != 3:
raise ValueErrror("Date should be in YYYY-MM-DD")
else:
date_obj = datetime.date(int(parts[0]), int(parts[1]), int(parts[2]))
except:
logging.warning("Could not verify date")
raise HTTPError(500)
else:
date_obj = today
yesterday = date_obj - datetime.timedelta(days = 1)
tomorrow = date_obj + datetime.timedelta(days = 1)
weekday = date_obj.strftime("%A")
activities = get_activities(self.application.db, user_id)
# Create a dict representation of the list of activities, to quicker resolve colors
# for cells.
activity_dict = get_dict_from_sequence(activities, "id")
sheet = get_sheet(self.application.db, user_id, date)
quarters = []
if sheet:
ids = sheet.split(',')
for i in ids:
if int(i) > -1:
| color = activity_dict[int(i)]["color"]
border_color = "#ccc" # darken color
quarters.append({ "id" : i, "color" : color, "border | -color" : border_color})
else:
quarters.append({ "id" : i, "color" : "#fff", "border-color" : "#ccc"})
else:
quarters = self.default_sheet()
self.render(u"app/sheet.html", date = date_obj, weekday = weekday,
today = today, yesterday = yesterday, tomorrow = tomorrow,
activities = activities, quarters = quarters)
|
tyb0807/angr | angr/engines/vex/irop.py | Python | bsd-2-clause | 36,944 | 0.003681 | """
This module contains symbolic implementations of VEX operations.
"""
import re
import sys
import collections
import itertools
import operator
import logging
l = logging.getLogger("angr.engines.vex.irop")
import pyvex
import claripy
#
# The more sane approach
#
def op_attrs(p):
m = re.match(r'^Iop_'
r'(?P<generic_name>\D+?)??'
r'(?P<from_type>[IFDV])??'
r'(?P<from_signed>[US])??'
r'(?P<from_size>\d+)??'
r'(?P<from_signed_back>[US])??'
# this screws up CmpLE: r'(?P<e_flag>E)??' \
r'('
r'(?P<from_side>HL|HI|L|LO)??'
r'(?P<conversion>to|as)'
r'(?P<to_type>Int|I|F|D|V)??'
r'(?P<to_size>\d+)??'
r'(?P<to_signed>[US])??'
r')??'
r'(?P<vector_info>\d+U?S?F?0?x\d+)??'
r'(?P<rounding_mode>_R([ZPNM]))?$',
p
)
if not m:
l.debug("Unmatched operation: %s", p)
return None
else:
l.debug("Matched operation: %s", p)
attrs = m.groupdict()
attrs['from_signed'] = attrs['from_signed_back'] if attrs['from_signed'] is None else attrs['from_signed']
attrs.pop('from_signed_back', None)
if attrs['generic_name'] == 'CmpOR':
assert attrs['from_type'] == 'D'
attrs['generic_name'] = 'CmpORD'
attrs['from_type'] = None
# fix up vector stuff
vector_info = attrs.pop('vector_info', None)
if vector_info:
vm = re.match(r'^(?P<vector_size>\d+)?'
r'(?P<vector_signed>[US])?'
r'(?P<vector_type>[FD])?'
r'(?P<vector_zero>0)?'
r'x'
r'(?P<vector_count>\d+)?$',
vector_info
)
attrs.update(vm.groupdict())
for k,v in attrs.items():
if v is not None and v != "":
l.debug("... %s: %s", k, v)
return attrs
all_operations = pyvex.irop_enums_to_ints.keys()
operations = { }
classified = set()
unclassified = set()
unsupported = set()
explicit_attrs = {
'Iop_64x4toV256': {
'generic_name': '64x4',
'to_size': 256,
},
'Iop_Yl2xF64': {
'generic_name': 'Yl2x',
'to_size': 64,
},
'Iop_Yl2xp1F64': {
'generic_name': 'Yl2xp1',
'to_size': 64,
},
}
for _vec_lanewidth in (8, 16, 32, 64):
for _vec_width in (64, 128):
_vec_count = _vec_width // _vec_lanewidth
if _vec_count <= 1:
continue
# the regex thinks the I is an integral descriptor
explicit_attrs['Iop_InterleaveHI%dx%d' % (_vec_lanewidth, _vec_count)] = {
'generic_name': 'InterleaveHI',
'to_size': _vec_width,
'vector_size': _vec_lanewidth,
'vector_count': _vec_count,
}
def make_operations():
for p in all_operations:
if p in ('Iop_INVALID', 'Iop_LAST'):
continue
if p in explicit_attrs:
attrs = explicit_attrs[p]
else:
attrs = op_attrs(p)
if attrs is None:
unclassified.add(p)
else:
classified.add(p)
try:
operations[p] = SimIROp(p, **attrs)
except SimOperationError:
unsupported.add(p)
l.debug("%d matched (%d supported) and %d unmatched operations", len(classified), len(operations), len(unclassified))
arithmetic_operation_map = {
'Add': '__add__',
'Sub': '__sub__',
'Mul': '__mul__',
'Div': '__div__',
'Neg': 'Neg',
'Abs': 'Abs',
'Mod': '__mod__',
}
shift_operation_map = {
'Shl': '__lshift__',
'Shr': 'LShR',
'Sar': '__rshift__',
}
bitwise_operation_map = {
'Xor': '__xor__',
'Or': '__or__',
'And': '__and__',
'Not': '__invert__',
}
rm_map = {
0: claripy.fp.RM_RNE,
1: claripy.fp.RM_RTN,
2: claripy.fp.RM_RTP,
3: claripy.fp.RM_RTZ,
}
generic_names = set()
conversions = collections.defaultdict(list)
unsupported_conversions = [ ]
add_operations = [ ]
other_operations = [ ]
vector_operations = [ ]
fp_ops = set()
common_unsupported_generics = collections.Counter()
def supports_vector(f):
f.supports_vector = True
return f
class SimIROp(object):
"""
A symbolic version of a Vex IR operation.
"""
def __init__(self, name, **attrs):
l.debug("Creating SimIROp(%s)", name)
self.name = name
self.op_attrs = attrs
self._generic_name = None
self._from_size = None
| self._from_side = None
self._from_type = None
self._from_signed = None
self._to_size = None
self._to_type = None
self._to_signed = None
self._conversion = None
self._vector_size = None
self._vector_signed | = None
self._vector_type = None
self._vector_zero = None
self._vector_count = None
self._rounding_mode = None
for k,v in self.op_attrs.items():
if v is not None and ('size' in k or 'count' in k):
v = int(v)
setattr(self, '_%s'%k, v)
# determine the output size
#pylint:disable=no-member
self._output_type = pyvex.get_op_retty(name)
#pylint:enable=no-member
self._output_size_bits = pyvex.const.get_type_size(self._output_type)
l.debug("... VEX says the output size should be %s", self._output_size_bits)
size_check = self._to_size is None or (self._to_size*2 if self._generic_name == 'DivMod' else self._to_size) == self._output_size_bits
if not size_check:
raise SimOperationError("VEX output size doesn't match detected output size")
#
# Some categorization
#
generic_names.add(self._generic_name)
if self._conversion is not None:
conversions[(self._from_type, self._from_signed, self._to_type, self._to_signed)].append(self)
if len({self._vector_type, self._from_type, self._to_type} & {'F', 'D'}) != 0:
self._float = True
if len({self._vector_type, self._from_type, self._to_type} & {'D'}) != 0:
l.debug('... aborting on BCD!')
# fp_ops.add(self.name)
raise UnsupportedIROpError("BCD ops aren't supported")
else:
self._float = False
#
# Now determine the operation
#
self._calculate = None
# is it explicitly implemented?
if hasattr(self, '_op_' + name):
self._calculate = getattr(self, '_op_' + name)
# if the generic name is None and there's a conversion present, this is a standard
# widening or narrowing or sign-extension
elif self._generic_name is None and self._conversion:
# convert int to float
if self._float and self._from_type == 'I':
self._calculate = self._op_int_to_fp
# convert float to differently-sized float
elif self._from_type == 'F' and self._to_type == 'F':
self._calculate = self._op_fp_to_fp
elif self._from_type == 'F' and self._to_type == 'I':
self._calculate = self._op_fp_to_int
# this concatenates the args into the high and low halves of the result
elif self._from_side == 'HL':
l.debug("... using simple concat")
self._calculate = self._op_concat
# this just returns the high half of the first arg
elif self._from_size > self._to_size and self._from_side == 'HI':
l.debug("... using hi half")
self._calculate = self._op_hi_half
# this just returns the high half of the first arg
elif self._from_size > self._to_size and self._from_side in ('L', 'LO'):
l.debug("... using lo half")
self._calculate = self._op_lo |
alexandrebatista84/fundamentos-programacao | Aula Prática 2/Exercicio20.py | Python | mit | 84 | 0.107143 |
n=1
i=1
while i<10:
|
print(n,"X 8 +",i," | =",n*8+i)
i=i+1
n=n*10+i
|
esteinig/dartQC | dartqc/DartModules.py | Python | gpl-3.0 | 36,121 | 0.003211 | import csv
import operator
import os
import shutil
from subprocess import call
import pandas
import numpy
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from scipy import stats
from dartqc.DartUtils import stamp
from dartqc.DartMessages import DartMessages
class SummaryModule:
def __init__(self, data=None, attributes=None, out_path=None):
self.data = data
self.attributes = attributes
if out_path is None:
self.out_path = attributes["out_path"]
else:
self.out_path = out_path
os.makedirs(self.out_path, exist_ok=True)
def write_snp_summary(self, file="snp_summary.csv", summary_parameters=None, sort=False):
if summary_parameters is None:
summary_parameters = ["maf", "hwe", "rep", "call_rate"]
out_file = os.path.join(self.out_path, self.attributes["project"] + "_" + file)
out_data = [["id"] + summary_parameters]
snps = [[snp] + [data[parameter] for parameter in summary_parameters] for snp, data in self.data.items()]
if sort:
snps = sorted(snps, key=operator.itemgetter(*[i for i in range(1, len(summary_parameters) + 1)]),
reverse=True)
out_data += snps
with open(out_file, "w") as snp_summary:
writer = csv.writer(snp_summary)
writer.writerows(out_data)
def write_module_summary(self, file="module_summary.csv"):
# Function at the moment for command line, hard-coded, need to fix.
params_snp, removed_snp = self._get_snp_results()
params_red, removed_red = self._get_redundancy_results()
params_pop, removed_pop = self._get_pop_results()
params_sam, removed_sam = self._get_sample_results()
params_ppr, removed_ppr = self._get_preprocessing_results()
project_param = {"project": self.attributes["project"], "snps": self.attributes["snps"]}
snp_removed = self._get_snp_sum([removed_red, removed_pop, removed_snp])
project_removed = {"project": self.attributes["project"], "snps": snp_removed}
row_param = {k: v for d in [project_param, params_ppr, params_sam, params_pop, params_snp, params_red]
for k, v in d.items()}
row_removed = {k: v for d in [project_removed, removed_ppr, removed_sam, removed_pop, removed_snp, removed_red]
for k, v in d.items()}
df = pandas.DataFrame([row_param, row_removed], index=["parameters", "removed"])
out_file = os.path.join(self.out_path, self.attributes["project"] + "_" + file)
df.to_csv(out_file)
@staticmethod
def _get_snp_sum(dicts):
s = 0
for d in dicts:
for k, v in d.items():
if k in ("maf", "hwe", "rep_average", "monomorphic", "call_rate", "clusters", "duplicates"):
if v is not None:
s += int(v)
return s
def _get_snp_results(self):
"""Extract entry from Attributes"""
try:
results = self.attributes["modules"]["snp"]["results"]
parameters = self.attributes["modules"]["snp"]["settings"]["parameters"]
params = {entry[0]: entry[1] for entry in parameters}
removed = {param: result["removed"] for param, result in results.items()}
except KeyError:
stamp("Could not detect results for SNP Module, skipping...")
params = {"maf": None, "hwe": None, "call_rate": None, "rep_average": None}
removed = {"maf": None, "hwe": None, "call_rate": None, "rep_average": None}
ret | urn params, removed
def _get_sample_results(self):
try:
removed = {"mind": self.attributes["individual"]["results"]["mind"]["removed_samples"],
"samples": self.attributes["individual"]["results"]["mind"]["removed_samples"]}
params = {"mind": self.attributes["individual"]["results"]["mi | nd"]["value"],
"samples": len(self.attributes["individual"]["states"]["mind"]["sample_names_original"])}
except KeyError:
stamp("Could not detect results for Sample Module, skipping...")
params = {"mind": None, "samples": None}
removed = {"mind": None, "samples": None}
return params, removed
def _get_pop_results(self):
try:
removed = {"monomorphic": self.attributes["modules"]["population"]["results"]["removed"]}
params = {"monomorphic": self.attributes["modules"]["population"]["settings"]["value"]}
except KeyError:
stamp("Could not detect results for Population Module, skipping...")
params = {"monomorphic": None}
removed = {"monomorphic": None}
return params, removed
def _get_redundancy_results(self):
try:
parameters = self.attributes["modules"]["redundancy"]["settings"]
params = {"clusters": parameters["clusters"], "duplicates": parameters["duplicates"],
"identity:": parameters["identity"]}
results = self.attributes["modules"]["redundancy"]["results"]
removed = {"clusters": results["clusters"]["removed"], "duplicates": results["duplicates"]["removed"],
"identity": None}
except KeyError:
stamp("Could not detect results for Redundancy Module, skipping...")
params = {"clusters": None, "duplicates": None, "identity": None}
removed = {"clusters": None, "duplicates": None, "identity": None}
return params, removed
def _get_preprocessing_results(self):
try:
params = {"preprocess": self.attributes["modules"]["preprocessor"]["settings"]["read_count_sum_threshold"],
"calls": self.attributes["modules"]["preprocessor"]["settings"]["results"]["total_calls"],
"missing": self.attributes["modules"]["preprocessor"]["settings"]["results"]["before_missing"]}
removed = {
"preprocess": self.attributes["modules"]["preprocessor"]["settings"]["results"]["replaced_calls"],
"calls": self.attributes["modules"]["preprocessor"]["settings"]["results"]["replaced_calls"],
"missing": self.attributes["modules"]["preprocessor"]["settings"]["results"]["replaced_calls"]}
except KeyError:
stamp("Could not detect results for Preprocessing Module, skipping...")
params = {"preprocess": None, "calls": None, "missing": None}
removed = {"preprocess": None, "calls": None, "missing": None}
return params, removed
def write_matrix(self, combination_matrix, r_matrix=None, file="combination_table.csv", r_file="r_matrix.csv"):
out_file = os.path.join(self.out_path, file)
with open(out_file, "w") as table_file:
writer = csv.writer(table_file)
writer.writerows(combination_matrix)
if r_matrix is not None:
out_r = os.path.join(self.out_path, r_file)
with open(out_r, "w") as out_r_file:
writer = csv.writer(out_r_file)
writer.writerows(r_matrix)
########################################################################################################################
class QualityControl:
def __init__(self, data, attributes):
self.data = data # Dictionary holds data from DartReader
self.attributes = attributes
self.verbose = True
self.messages = DartMessages()
self.sample_size = attributes["sample_size"]
self.sample_names = attributes["sample_names"]
self.missing = attributes["missing"]
self.homozygous_major = attributes["homozygous_major"]
self.homozygous_minor = attributes["homozygous_minor"]
self.hetero |
sarrionandia/taber | results/controllers/PointsController.py | Python | gpl-2.0 | 1,456 | 0 | from results.controllers.ResultsController import ResultsController
class PointsController():
results_controller = None
def __init__(self):
self.results_controller = ResultsController()
def team_points_for_team(self, team, round):
result = self.results_controller.result_for_team(team, round)
debate = result.debate
if debate.OG == team:
return result.og
if debate.OO == team:
return result.oo
if debate.CG == team:
return result.cg
if debate.CO == team:
return result.co
def total_points_for_team(self, team, maxround):
total = 0
for round in range(1, maxround+1):
total += self.team_points_for_team(team, round)
return total
def speaker_points_for_team(self, team | , round):
result = self.results_controller.result_for_team(team, round)
debate = result.debate
if debate.OG == team:
return [result.ogsp1, result.ogsp2]
if debate.OO == team:
return [result.oosp1, result.oosp2]
if debate.CG == team:
return [result.cgsp1, result.cgsp2]
if debate.CO == team:
| return [result.cosp1, result.cosp2]
def team_points_map_for_round(self, round, teams):
points = {}
for team in teams:
points.update({team: self.team_points_for_team(team, round)})
return points
|
nathanielvarona/airflow | airflow/providers/tableau/operators/tableau_refresh_workbook.py | Python | apache-2.0 | 3,881 | 0.001804 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from tableauserverclient import WorkbookItem
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.tableau.hooks.tableau import TableauHook
class TableauRefreshWorkbookOperator(BaseOperator):
"""
Refreshes a Tableau Workbook/Extract
.. seealso:: https://tableau.github.io/server-client-python/docs/api-ref#workbooks
:param workbook_name: The name of the workbook to refresh.
:type workbook_name: str
:param site_id: The id of the site where the workbook belongs to.
:type site_id: Optional[str]
:param blocking: By default the extract refresh will be blocking means it will wait until it has finished.
:type blocking: bool
:param tableau_conn_id: The :ref:`Tableau Connection id <howto/connection:tableau>`
containing the credentials to authenticate to the Tableau Server.
:type tableau_conn_id: str
"""
def __init__(
self,
*,
workbook_name: str,
site_id: Optional[str] = None,
blocking: bool = True,
tableau_conn_id: str = 'tableau_default',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.workbook_name = workbook_name
self.site_id = site_id
self.blocking = blocking
self.tableau_conn_id = tableau_conn_id
def execute(self, context: dict) -> str:
"""
| Executes the Tableau Extract Refresh and pushes the job id to xcom.
:param context: The task context during execution.
:type context: dict
:return: the id of the job that executes the extract refresh
:rtype: str |
"""
with TableauHook(self.site_id, self.tableau_conn_id) as tableau_hook:
workbook = self._get_workbook_by_name(tableau_hook)
job_id = self._refresh_workbook(tableau_hook, workbook.id)
if self.blocking:
from airflow.providers.tableau.sensors.tableau_job_status import TableauJobStatusSensor
TableauJobStatusSensor(
job_id=job_id,
site_id=self.site_id,
tableau_conn_id=self.tableau_conn_id,
task_id='wait_until_succeeded',
dag=None,
).execute(context={})
self.log.info('Workbook %s has been successfully refreshed.', self.workbook_name)
return job_id
def _get_workbook_by_name(self, tableau_hook: TableauHook) -> WorkbookItem:
for workbook in tableau_hook.get_all(resource_name='workbooks'):
if workbook.name == self.workbook_name:
self.log.info('Found matching workbook with id %s', workbook.id)
return workbook
raise AirflowException(f'Workbook {self.workbook_name} not found!')
def _refresh_workbook(self, tableau_hook: TableauHook, workbook_id: str) -> str:
job = tableau_hook.server.workbooks.refresh(workbook_id)
self.log.info('Refreshing Workbook %s...', self.workbook_name)
return job.id
|
soundgnome/django-rom | rom/invoices/calculator.py | Python | mit | 3,170 | 0.003155 | from datetime import date, timedelta
from decimal import Decimal
from django.db.models import Q, Sum
from .models import Expense, Invoice
def get_monthly_totals(year, month):
totals = {}
start = date(year, month, 1)
if month == 12:
end = date(year+1, 1, 1)
else:
end = date(year, month+1, 1)
totals['income'] = _get_income_total(start, end)
totals['invoiced'] = _get_invoiced_total(start, end)
totals['total_expenses'] = _get_raw_total(Expense.objects, start, end)
totals['expenses_before_tax'] = _get_raw_total(_get_filtered_expenses(tax=False), start, end)
totals['adjusted_expenses'] = _get_adjusted_total(Expense.objects, start, end)
return totals
def get_outstanding_invoices(since=None):
invoices = {}
all_outstanding = Invoice.objects.filter(date_received=None)
aggregate = all_outstanding.aggregate(Sum('amount'))
invoices['total_balance'] = aggregate['amount__sum'] or 0
if since is None:
since = date.today()
start = since - timedelta(days=30)
invoices['overdue'] = all_outstanding.filter(date_sent__lte=start)
aggregate = invoices['overdue'].aggregate(Sum('amount'))
invoices['overdue_balance'] = aggregate['amount__sum'] or 0
return invoices
def _get_adjusted_total(expenses, start, end):
total = Decimal('0')
aggregate = expenses.filter(date__gte=start).filter(date__lt=end). \
filter(month_span=1).aggregate(Sum('amount'))
total += aggregate['amount__sum'] or 0
if start.month >= 3:
quarterly_start = date(start.year, start.month-2, 1)
else:
quarterly_sta | rt = date(start.year-1, start.month+10, 1)
aggregate = expenses.filter(date__gte=quarterly_start).filter(date__lt=end). \
filter(month_span=3).aggregate(Sum('amount'))
if aggregate['amount__sum']:
total += aggregate['amount__sum']/3
annual_start = date(end.year-1, end.month, 1)
aggregate = expenses.filter(date__gte=annual_start).filter(date__lt=end). \
| filter(month_span=12).aggregate(Sum('amount'))
if aggregate['amount__sum']:
total += aggregate['amount__sum']/12
return total.quantize(Decimal('.01'))
def _get_filtered_expenses(tax=False):
if tax:
expenses = Expense.objects.filter(Q(type=Expense.Type.TAX_QUARTERLY) | Q(type=Expense.Type.TAX_ANNUAL))
else:
expenses = Expense.objects.exclude(type=Expense.Type.TAX_QUARTERLY).exclude(type=Expense.Type.TAX_ANNUAL)
return expenses
def _get_income_total(start, end):
aggregate = Invoice.objects. \
filter(date_received__gte=start).filter(date_received__lt=end). \
aggregate(Sum('amount'))
return aggregate['amount__sum'] or 0
def _get_invoiced_total(start, end):
aggregate = Invoice.objects. \
filter(date_sent__gte=start).filter(date_sent__lt=end). \
aggregate(Sum('amount'))
return aggregate['amount__sum'] or 0
def _get_raw_total(expenses, start, end):
aggregate = expenses.filter(date__gte=start).filter(date__lt=end).aggregate(Sum('amount'))
return aggregate['amount__sum']
|
vadosl/photorganizer | photorganizer/photo/views__.py | Python | mit | 8,479 | 0.006318 | # coding: utf-8
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response
from collections import defaultdict
from django.contrib.auth.decorators import login_required
from django.core.context_processors import csrf
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.db.models import Q
from django.contrib.auth.models import User
from django.forms import ModelForm
from .models import Image, Album, Tag
def main(request):
"""Main listing."""
context = RequestContext(request)
albums = Album.objects.all()
if not request.user.is_authenticated():
albums = albums.filter(public=True)
paginator = Paginator(albums, 4)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
try:
albums = paginator.page(page)
except (InvalidPage, EmptyPage):
albums = paginator.page(paginator.num_pages)
for album in albums.object_list:
album.images = album.image_set.all()[:4]
#album.images = album.image_set.all()
context_dict = {'albums':albums}
return render_to_response("photo/list.html", context_dict, context)
def album(request, pk, view="thumbnails"):
"""Album listing."""
# Code without Slideshow
"""album = Album.objects.get(pk=pk)
| if not album.public and not request.user.is_authenticated():
return HttpResponse("Error: you need to be logged in to view this album.")
images = album.image_set.all()
paginator = Paginator(images, 30)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
images = paginator.page(page)
except (InvalidPage, EmptyPage):
images = pagin | ator.page(paginator.num_pages)"""
#Write another code for Slideshow realization
num_images = 30
if view == "full": num_images = 10
album = Album.objects.get(pk=pk)
images = album.image_set.all()
paginator = Paginator(images, num_images)
try: page = int(request.GET.get("page", '1'))
except ValueError: page = 1
try:
images = paginator.page(page)
except (InvalidPage, EmptyPage):
images = paginator.page(paginator.num_pages)
# add list of tags as string and list of album objects to each image object
for img in images.object_list:
tags = [x[1] for x in img.tags.values_list()]
img.tag_lst = ", ".join(tags)
img.album_lst = [x[1] for x in img.albums.values_list()]
context = RequestContext(request)
context_dict = dict(album=album, images=images, view=view, albums=Album.objects.all())
#context_dict.update(csrf(request))
return render_to_response("photo/album.html", context_dict, context )
def image(request, pk):
"""Image page."""
img = Image.objects.get(pk=pk)
context = RequestContext(request)
context_dict = dict(image=img, backurl=request.META["HTTP_REFERER"])
return render_to_response("photo/image.html", context_dict, context)
def update(request):
"""Update image title, rating, tags, albums."""
p = request.POST
images = defaultdict(dict)
# create dictionary of properties for each image
for k, v in p.items():
if k.startswith("title") or k.startswith("rating") or k.startswith("tags"):
k, pk = k.split('-')
images[pk][k] = v
elif k.startswith("album"):
pk = k.split('-')[1]
images[pk]["albums"] = p.getlist(k)
# process properties, assign to image objects and save
for k, d in images.items():
image = Image.objects.get(pk=k)
image.title = d["title"]
image.rating = int(d["rating"])
# tags - assign or create if a new tag!
tags = d["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
lst.append(Tag.objects.get_or_create(tag=t)[0])
image.tags = lst
if "albums" in d:
image.albums = d["albums"]
image.save()
return HttpResponseRedirect(request.META["HTTP_REFERER"])
#@login_required
def search(request):
"""Search, filter, sort images."""
context = RequestContext(request)
context_dict = dict( albums=Album.objects.all(), authors=User.objects.all())
# Если это первый заход по ссылке Search , то просто отображаем страницу, не производя расчетов
if request.method == 'GET' and not request.GET.get("page"):
return render_to_response("photo/search.html", context_dict, context)
# Тут уже работает метод POST or GET(?page)
try:
page = int(request.GET.get("page", '1'))
except ValueError:
page = 1
p = request.POST
images = defaultdict(dict)
# init parameters
parameters = {}
keys = ['title', 'filename', 'rating_from', 'rating_to', 'width_from',
'width_to', 'height_from', 'height_to', 'tags', 'view', 'user', 'sort', 'asc_desc']
for k in keys:
parameters[k] = ''
parameters["album"] = []
# create dictionary of properties for each image and a dict of search/filter parameters
for k, v in p.items():
if k == "album":
parameters[k] = [int(x) for x in p.getlist(k)]
elif k in parameters:
parameters[k] = v
elif k.startswith("title") or k.startswith("rating") or k.startswith("tags"):
k, pk = k.split('-')
images[pk][k] = v
elif k.startswith("album"):
pk = k.split('-')[1]
images[pk]["albums"] = p.getlist(k)
# save or restore parameters from session
if page != 1 and "parameters" in request.session:
parameters = request.session["parameters"]
else:
request.session["parameters"] = parameters
results = update_and_filter(images, parameters)
# make paginator
paginator = Paginator(results, 20)
try:
results = paginator.page(page)
except (InvalidPage, EmptyPage):
results = paginator.page(paginator.num_pages)
# add list of tags as string and list of album names to each image object
for img in results.object_list:
tags = [x[1] for x in img.tags.values_list()]
img.tag_lst = ", ".join(tags)
img.album_lst = [x[1] for x in img.albums.values_list()]
context_dict['results'] = results
context_dict['prm'] = parameters
return render_to_response("photo/search.html", context_dict, context)
def update_and_filter(images, p):
"""Update image data if changed, filter results through parameters and return results list."""
# process properties, assign to image objects and save
for k, d in images.items():
image = Image.objects.get(pk=k)
image.title = d["title"]
image.rating = int(d["rating"])
# tags - assign or create if a new tag!
tags = d["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
lst.append(Tag.objects.get_or_create(tag=t)[0])
image.tags = lst
if "albums" in d:
image.albums = d["albums"]
image.save()
# filter results by parameters
results = Image.objects.all()
if p["title"] : results = results.filter(title__icontains=p["title"])
if p["filename"] : results = results.filter(image__icontains=p["filename"])
if p["rating_from"] : results = results.filter(rating__gte=int(p["rating_from"]))
if p["rating_to"] : results = results.filter(rating__lte=int(p["rating_to"]))
if p["width_from"] : results = results.filter(width__gte=int(p["width_from"]))
if p["width_to"] : results = results.filter(width__lte=int(p["width_to"]))
if p["height_from"] : results = results.filter(height__gte=int(p["height_from"]))
if p["height_to"] : results = results.filter(height__lte=int(p["height_to"]))
if p["tags"]:
tags = p["tags"].split(',')
lst = []
for t in tags:
if t:
t = t.strip()
results = results.filter(tags=Tag.objects.get(tag=t))
if p["album"]:
|
jimporter/bfg9000 | test/unit/tools/test_ld.py | Python | bsd-3-clause | 2,557 | 0 | from unittest import mock
from .. import *
from bfg9000.tools.ld import LdLinker
from bfg9000.path import abspath
from bfg9000.versioning import Version
def mock_execute(args, **kwargs):
return 'SEARCH_DIR("/dir1")\nSEARCH_DIR("=/dir2")\n'
class TestLdLinker(CrossPlatformTestCase):
def __init__(self, *args, **kwargs):
super().__init__(clear_variables=True, *args, **kwargs)
def test_flavor(self):
ld = LdLinker(None, self.env, ['ld'], 'version')
self.assertEqual(ld.flavor, 'ld')
def test_lang(self):
class MockBuilder:
lang = 'c++'
ld = LdLinker(MockBuilder(), self.env, ['ld'], 'version')
self.assertEqual(ld.lang, 'c++')
def test_family(self):
class MockBuilder:
family = 'native'
ld = LdLinker(MockBuilder(), self.env, ['ld'], 'version')
self.assertEqual(ld.family, 'native')
def test_gnu_ld(self):
version = 'GNU ld (GNU Binutils for Ubuntu) 2.26.1'
ld = LdLinker(None, self.env, ['ld'], version)
self.assertEqual(ld.brand, 'bfd')
self.assertEqual(ld.version, Version('2.26.1'))
def test_gnu_gold(self):
version = 'GNU gold (GNU Binutils for Ubuntu 2.26.1) 1.11'
ld = LdLinker(None, self.env, ['ld'], version)
self.assertEqual(ld.brand, 'gold')
self.assertEqual(ld.version, Version('1.11'))
def test_unknown_brand(self):
version = 'unknown'
ld = LdLinker(None, self.env, ['ld'], version)
self.assertEqual(ld.brand, 'unknown')
self.assertEqual(ld.version, None)
def test_search_dirs(self):
with mock.patch('bfg9000.shell.execute', mock_execute):
ld = LdLinker(None, self.env, ['ld'], 'version')
self.assertEqual(ld.search_dirs(),
[abspath('/dir1'), abspath('/dir2')])
def test_search_dirs_sysroot(self):
with mock.patch('bfg9000.shell.execute', mock_execute):
ld = LdLinker(None, self.env, ['ld'], 'version')
self.assertEqual(ld.search_dirs(sysroot='/sysroot'),
[abspath('/dir1'), abspath('/sysroot/dir2')])
def test_se | arch_dirs_fail(self):
def mock_bad_execute(*args, **kwargs):
raise OSError()
with mock.patch('bfg9000.shell.execute', mock_bad_execute):
ld = LdLinker(None, self.env, ['ld'], 'version')
| self.assertEqual(ld.search_dirs(), [])
self.assertRaises(OSError, lambda: ld.search_dirs(strict=True))
|
hofschroeer/shinysdr | shinysdr/blocks.py | Python | gpl-3.0 | 15,994 | 0.005252 | # Copyright 2013, 2014 Kevin Reid <kpreid@switchb.org>
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
'''
GNU Radio flowgraph blocks for use by ShinySDR.
This module is not an external API and not guaranteed to have a stable
interface.
'''
# pylint: disable=attribute-defined-outside-init
# (attribute-defined-outside-init: doing it carefully)
from __future__ import absolute_import, division
import math
import os
import subprocess
from gnuradio import gr
from gnuradio import blocks
from gnuradio.fft import logpwrfft
from shinysdr.math import todB
from shinysdr.signals import SignalType
from shinysdr.types import BulkDataType, Range
from shinysdr.values import ExportedState, LooseCell, StreamCell, exported_value, setter
class RecursiveLockBlockMixin(object):
'''
For top blocks needing recursive locking and/or a notification to restart parts.
'''
__lock_count = 0
def _recursive_lock_hook(self):
''' override'''
pass
def _recursive_lock(self):
# gnuradio uses a non-recursive lock, which is not adequate for our purposes because we want to make changes locally or globally without worrying about having a single lock entry point
if self.__lock_count == 0:
self.lock()
self._recursive_lock_hook()
self.__lock_count += 1
def _recursive_unlock(self):
self.__lock_count -= 1
if self.__lock_count == 0:
self.unlock()
class Context(object):
'''
Client facet for RecursiveLockBlockMixin.
'''
def __init__(self, top):
self.__top = top
def lock(self):
self.__top._recursive_lock()
def unlock(self):
self.__top._recursive_unlock()
def rotator_inc(rate, shift):
'''
Calculation for using gnuradio.blocks.rotator_cc or other interfaces wanting radians/sample input.
rate: sample rate
shift: frequency shift in Hz
'''
return (2 * math.pi) * (shift / rate)
def make_sink_to_process_stdin(process, itemsize=gr.sizeof_char):
'''Given a twisted Process, connect a sink to its stdin.'''
fd_owned_by_twisted = process.pipes[0].fileno() # TODO: More public way to do this?
fd_owned_by_sink = os.dup(fd_owned_by_twisted)
process.closeStdin()
return blocks.file_descriptor_sink(itemsize, fd_owned_by_sink)
def test_subprocess(args, substring, shell=False):
'''Check the stdout or stderr of the specified command for a specified string.'''
# TODO: establish resource and output size limits
# TODO: Use Twisted facilities instead to avoid possible conflicts
try:
output = subprocess.check_output(
args=args,
shell=shell,
stderr=subprocess.STDOUT)
return substring in output
except OSError:
return False
except subprocess.CalledProcessError:
return False
class _NoContext(object):
def lock(self):
pass
def unlock(self):
pass
class MessageDistributorSink(gr.hier_block2):
'''Like gnuradio.blocks.message_sink, but copies its messages to a dynamic set of queues and saves the most recent item.
Never blocks.'''
def __init__(self, itemsize, context, migrate=None, notify=None):
gr.hier_block2.__init__(
self, self.__class__.__name__,
gr.io_signature(1, 1, itemsize),
gr.io_signature(0, 0, 0),
)
self.__itemsize = itemsize
self.__context = _NoContext()
self.__peek = blocks.probe_signal_vb(itemsize)
self.__subscriptions = {}
self.__notify = None
self.connect(self, self.__peek)
if migrate is not None:
assert isinstance(migrate, MessageDistributorSink) # sanity check
for queue in migrate.__subscriptions.keys():
migrate.unsubscribe(queue)
self.subscribe(queue)
# set now, not earlier, so as not to trigger anything while migrating
self.__context = context
self.__notify = notify
def get(self):
return self.__peek.level()
def get_subscription_count(self):
return len(self.__subscriptions)
def subscribe(self, queue):
assert queue not in self.__subscriptions
sink = blocks.message_sink(self.__itemsize, queue, True)
self.__subscriptions[queue] = sink
try:
self.__context.lock()
self.connect(self, sink)
finally:
self.__context.unlock()
if self.__notify:
self.__notify()
def unsubscribe(self, queue):
sink = self.__subscriptions[queue]
del self.__subscriptions[queue]
try:
self.__context.lock()
self.disconnect(self, sink)
finally:
self.__context.unlock()
if self.__notify:
self.__notify()
_maximum_fft_rate = 500
class _OverlapGimmick(gr.hier_block2):
'''
Pure flowgraph kludge to cause a logpwrfft block to perform overlapped FFTs.
The more correct solution would be to replace stream_to_vector_decimator (used inside of logpwrfft) with a block which takes arbitrarily-spaced vector chunks of the input rather than chunking and then decimating in terms of whole chunks. The cost of doing this instead is more scheduling steps and more data copies.
To adjust for the data rate, the logpwrfft block's sample rate parameter must be multiplied by the factor parameter of this block; or equivalently, the frame rate must be divided by it.
'''
def __init__(self, size, factor, itemsize=gr.sizeof_gr_complex):
'''
size: (int) vector size (FFT size) of next block
factor: (int) output will have this many more samples than input
If size is not divisible by factor, then the output will necessarily have jitter.
'''
size = int(size)
factor = | int(factor)
# assert size % factor == 0
offset = size // factor
gr.hier_block2.__init__(
self, self.__class__.__name__,
gr.io_signature(1, 1, itemsize),
gr.io_signature(1, 1, itemsize),
)
if factor == 1:
# No duplication needed; simplify flowgraph
# GR refused to connect self to self, so insert a dummy block
| self.connect(self, blocks.copy(itemsize), self)
else:
interleave = blocks.interleave(itemsize * size)
self.connect(
interleave,
blocks.vector_to_stream(itemsize, size),
self)
for i in xrange(0, factor):
self.connect(
self,
blocks.delay(itemsize, (factor - 1 - i) * offset),
blocks.stream_to_vector(itemsize, size),
(interleave, i))
class MonitorSink(gr.hier_block2, ExportedState):
'''
Convenience wrapper around all the bits and pieces to display the signal spectrum to the client.
The units of the FFT output are dB power/Hz (power spectral density) relative to unit amplitude (i.e. dBFS assuming the source clips at +/-1). Note this is different from the standard logpwrfft result of power _per bin_, which would be undesirably dependent on the sample rate and bin size.
'''
def __init__(self,
signal_type=None,
enable_scope=False,
freq_resolution=4096,
time_length=2048,
frame_r |
dbcls/dbcls-galaxy | scripts/scramble/scripts/psycopg2.py | Python | mit | 4,933 | 0.028583 | import os, sys, subprocess, tarfile, shutil
def unpack_prebuilt_postgres():
if not os.access( POSTGRES_BINARY_ARCHIVE, os.F_OK ):
print "unpack_prebuilt_postgres(): No binary archive of Postgres available for this platform - will build it now"
build_postgres()
else:
print "unpack_prebuilt_postgres(): Found a previously built Postgres binary archive for this platform."
print "unpack_prebuilt_postgres(): To force Postgres to be rebuilt, remove the archive:"
print " ", POSTGRES_BINARY_ARCHIVE
t = tarfile.open( POSTGRES_BINARY_ARCHIVE, "r" )
for fn in t.getnames():
t.extract( fn )
t.close()
def build_postgres():
# untar
print "build_postgres(): Unpacking postgres source archive from:"
print " ", POSTGRES_ | ARCHIVE
t = tarfile.open( POSTGRES_ARC | HIVE, "r" )
for fn in t.getnames():
t.extract( fn )
t.close()
# configure
print "build_postgres(): Running postgres configure script"
p = subprocess.Popen( args = CONFIGURE, shell = True, cwd = os.path.join( os.getcwd(), "postgresql-%s" %POSTGRES_VERSION) )
r = p.wait()
if r != 0:
print "build_postgres(): postgres configure script failed"
sys.exit( 1 )
# compile
print "build_postgres(): Building postgres (make)"
p = subprocess.Popen( args = "make", shell = True, cwd = os.path.join( os.getcwd(), "postgresql-%s" %POSTGRES_VERSION) )
r = p.wait()
if r != 0:
print "build_postgres(): Building postgres (make) failed"
sys.exit( 1 )
# install
print "build_postgres(): Installing postgres (make install)"
p = subprocess.Popen( args = "make install", shell = True, cwd = os.path.join( os.getcwd(), "postgresql-%s" %POSTGRES_VERSION) )
r = p.wait()
if r != 0:
print "build_postgres(): Installing postgres (make install) failed"
sys.exit( 1 )
# pack
print "build_postgres(): Creating binary postgres archive for future builds of psycopg2"
t = tarfile.open( POSTGRES_BINARY_ARCHIVE, "w:bz2" )
t.add( "postgres/bin/pg_config" )
t.add( "postgres/include" )
t.add( "postgres/lib" )
t.close()
# remove self-referencing symlink
os.unlink( os.path.join( "postgresql-%s" %POSTGRES_VERSION, "src", "test", "regress", "regress.so" ) )
# change back to the build dir
if os.path.dirname( sys.argv[0] ) != "":
os.chdir( os.path.dirname( sys.argv[0] ) )
# find setuptools
scramble_lib = os.path.join( "..", "..", "..", "lib" )
sys.path.append( scramble_lib )
try:
from setuptools import *
import pkg_resources
except:
from ez_setup import use_setuptools
use_setuptools( download_delay=8, to_dir=scramble_lib )
from setuptools import *
import pkg_resources
# get the tag
if os.access( ".galaxy_tag", os.F_OK ):
tagfile = open( ".galaxy_tag", "r" )
tag = tagfile.readline().strip()
else:
tag = None
POSTGRES_VERSION = ( tag.split( "_" ) )[1]
POSTGRES_ARCHIVE = os.path.abspath( os.path.join( "..", "..", "..", "archives", "postgresql-%s.tar.bz2" %POSTGRES_VERSION ) )
POSTGRES_BINARY_ARCHIVE = os.path.abspath( os.path.join( "..", "..", "..", "archives", "postgresql-%s-%s.tar.bz2" %( POSTGRES_VERSION, pkg_resources.get_platform() ) ) )
# there's no need to have a completely separate build script for this
if pkg_resources.get_platform() == "macosx-10.3-fat":
CONFIGURE = "CFLAGS='-O -g -isysroot /Developer/SDKs/MacOSX10.4u.sdk -arch i386 -arch ppc' LDFLAGS='-arch i386 -arch ppc' LD='gcc -mmacosx-version-min=10.4 -isysroot /Developer/SDKs/MacOSX10.4u.sdk -nostartfiles -arch i386 -arch ppc' ./configure --prefix=%s/postgres --disable-shared --disable-dependency-tracking --without-readline" %os.getcwd()
else:
CONFIGURE = "CFLAGS='-fPIC' ./configure --prefix=%s/postgres --disable-shared --without-readline" %os.getcwd()
# clean, in case you're running this by hand from a dirty module source dir
for dir in [ "build", "dist", "postgresql-%s" %POSTGRES_VERSION ]:
if os.access( dir, os.F_OK ):
print "scramble_it.py: removing dir:", dir
shutil.rmtree( dir )
# build/unpack Postgres
unpack_prebuilt_postgres()
# patch
file = "setup.cfg"
print "build(): Patching", file
if not os.access( "%s.orig" %file, os.F_OK ):
shutil.copyfile( file, "%s.orig" %file )
i = open( "%s.orig" %file, "r" )
o = open( file, "w" )
for line in i.readlines():
if line == "#pg_config=\n":
line = "pg_config=postgres/bin/pg_config\n"
if line == "#libraries=\n":
# linux has a separate crypt lib
if pkg_resources.get_platform().startswith( "linux" ):
line = "libraries=crypt\n"
print >>o, line,
i.close()
o.close()
# tag
me = sys.argv[0]
sys.argv = [ me ]
if tag is not None:
sys.argv.append( "egg_info" )
sys.argv.append( "--tag-build=%s" %tag )
sys.argv.append( "bdist_egg" )
# go
execfile( "setup.py", globals(), locals() )
|
iledarn/odoo-saas-tools | saas_portal_demo/controllers/main.py | Python | lgpl-3.0 | 1,196 | 0.002508 | # -*- coding: utf-8 -*-
import werkzeug
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.saas_portal.controllers.main import SaasPortal
def signup_redirect():
url = '/web/signup?'
redirect_url = '%s?%s' % (request.httprequest.base_url, werkzeug.urls.url_encode(request.params))
return """<html><head><script>
window.location = '%sredirect=' + encodeURIComponent("%s");
</script></head></html>
""" % (url, redirect_url)
class SaasPortalDemo(SaasPortal):
@http.route(['/demo/<string:version>/<string:plan_url>/'], type='http', auth='public', website=True)
def show_plan(self, vers | ion, plan_url, **post):
domain = [('odoo_version', '=', version), ('page_url', '=', plan_url),
('state', '=', 'confirmed')]
plan = request.env['saas_portal.plan'].sudo().search(domain)
if not plan:
# TODO: maybe in this case we can redirect to saas_portal_templates.select_template
| return request.website.render("saas_portal_demo.unavailable_plan")
values = {'plan': plan[0]}
return request.website.render("saas_portal_demo.show_plan", values)
|
ict-felix/stack | vt_manager_kvm/src/python/scripts/setup_ch.py | Python | apache-2.0 | 3,204 | 0.003121 | '''
Created on Jul 19, 2010
@author: jnaous
'''
from django.core.urlresolvers import reverse
from django.test import Client
from common.tests.client import test_get_and_post_form
from django.contrib.auth.models import User
from pyquery import PyQuery as pq
from openflow.plugin.models import OpenFlowInterface, NonOpenFlowConnection
from geni.planetlab.models import PlanetLabNode
try:
from setup_expedient_params import \
SUPERUSER_USERNAME, SUPERUSER_PASSWORD,\
USER_INFO,\
PL_AGGREGATE_INFO,\
OF_AGGREGATE_INFO,\
OF_PL_CONNECTIONS
except ImportError:
print """
Could not import setup_om_params module. Make sure this
module exists and that it contains the following variables:
SUPERUSER_USERNAME, SUPERUSER_PASSWORD,
CH_PASSWORD, CH_USERNAME
"""
raise
def run():
client = Client()
client.login(username=SUPERUSER_USERNAME,
password=SUPERUSER_PASSWORD)
# Add all planetlab aggregates
for pl_agg in PL_AGGREGATE_INFO:
print "adding pl | agg %s" % pl_agg["url"]
response = test_get_and_post_form(
client,
reverse("planetlab_aggregate_create"),
pl_agg,
)
print "got response %s" % response
assert response.status_code == 302
for of_agg in OF_AGGREGATE_INFO:
print "adding of agg %s" % of_agg["url"]
response = test_get_and_post_form(
client,
rev | erse("openflow_aggregate_create"),
of_agg,
del_params=["verify_certs"],
)
assert response.status_code == 302
for cnxn_tuple in OF_PL_CONNECTIONS:
print "adding cnxn %s" % (cnxn_tuple,)
NonOpenFlowConnection.objects.get_or_create(
of_iface=OpenFlowInterface.objects.get(
switch__datapath_id=cnxn_tuple[0],
port_num=cnxn_tuple[1],
),
resource=PlanetLabNode.objects.get(name=cnxn_tuple[2]),
)
client.logout()
for username, info in USER_INFO.items():
# create user
User.objects.create_user(
username=username, email=info["email"], password=info["password"])
client.login(username=username, password=info["password"])
# create project and slice
for project in info["projects"]:
response = test_get_and_post_form(
client, reverse("project_create"),
params=dict(
name=project["name"],
description=project["description"],
),
)
assert response.status_code == 302
# This code is missing the project id. Need to get somehow to use reverse.
# for slice in project["slices"]:
# response = test_get_and_post_form(
# client, reverse("slice_create"),
# params=dict(
# name=slice["name"],
# description=slice["description"],
# ),
# )
# assert response.status_code == 302
client.logout()
|
neilswainston/development-py | synbiochemdev/datascience/ch6.py | Python | mit | 1,896 | 0 | '''
synbiochem (c) University of Manchester 2016
synbiochem is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=invalid-name
import math
import random
import sys
import numpy
def cent_limit_theorem(tests, lngth, min_val, max_val):
'''Tests central limit theorem.'''
means = | []
for _ in range(tests):
rand_dist = get_rand_dist(lngth, min_val, max_val)
mean_val = numpy.mean(rand_dist)
print str(mean_val) + '\t' + str(numpy.std(rand_dist))
means.append(mean_val)
print '\t'.join([str(numpy.mean(means)),
str(numpy.std(means)),
str(numpy.std(means) * math.sqrt(lngth))])
| def get_rand_dist(lngth, min_val, max_val):
'''Returns random distribution.'''
return [random.randrange(min_val, max_val) for _ in range(lngth)]
def normal_cdf(x, mu=0, sigma=1):
'''Returns normal cumulative distribution function.'''
return (1 + math.erf((x - mu) / math.sqrt(2) / sigma)) / 2
def inverse_normal_cdf(p, mu=0.0, sigma=1.0, tolerance=0.00001):
'''Find approx inverse using binary search.'''
# If not standard, compute standard and rescale:
if mu > 1e-4 or sigma != 1.0:
return mu + sigma * inverse_normal_cdf(p, tolerance)
lo_z = -10.0 # normal_cdf(-10) is ~0
hi_z = 10.0 # normal_cdf(10) is ~1
while hi_z - lo_z > tolerance:
mid_z = (lo_z + hi_z) / 2
mid_p = normal_cdf(mid_z)
if mid_p < p:
lo_z = mid_z
elif mid_p > p:
hi_z = mid_z
else:
break
return mid_z
def main(argv):
'''main method.'''
return cent_limit_theorem(int(argv[0]), int(argv[1]), int(argv[2]),
int(argv[3]))
if __name__ == '__main__':
main(sys.argv[1:])
|
basu96/crux-judge | src/server/contest/migrations/0003_profile.py | Python | mit | 849 | 0.002356 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-12-25 23:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contest', '0002_submission_testcase_c | odes'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('logged_in', models.BooleanField(default=False)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=se | ttings.AUTH_USER_MODEL)),
],
),
]
|
t3dev/odoo | addons/hr_expense_check/__manifest__.py | Python | gpl-3.0 | 421 | 0 | # -*- coding: utf-8 -*-
{
'name': "Check Printing in Expenses",
'summary': """Print amount in words | on checks issued for expenses""",
'category': 'Accounting',
'description': """
Print amount in words on checks issued for expenses
""",
'version': '1.0',
'depends': ['account_check_printing', 'hr_expense'],
'auto_install': True,
'dat | a': [
'views/payment.xml',
],
}
|
BiznetGIO/horizon | openstack_dashboard/utils/settings.py | Python | apache-2.0 | 14,525 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from importlib import import_module
import logging
import os
import pkgutil
from horizon.utils import file_discovery
from openstack_dashboard import theme_settings
def import_submodules(module):
"""Import all submodules and make them available in a dict."""
submodules = {}
for loader, name, ispkg in pkgutil.iter_modules(module.__path__,
module.__name__ + '.'):
try:
submodule = import_module(name)
except ImportError as e:
# FIXME: Make the errors non-fatal (do we want that?).
logging.warning("Error importing %s", name)
logging.exception(e)
else:
parent, child = name.rsplit('.', 1)
submodules[child] = submodule
return submodules
def import_dashboard_config(modules):
"""Imports configuration from all the modules and merges it."""
config = collections.defaultdict(dict)
for module in modules:
for submodule in import_submodules(module).values():
if hasattr(submodule, 'DASHBOARD'):
dashboard = submodule.DASHBOARD
config[dashboard].update(submodule.__dict__)
elif (hasattr(submodule, 'PANEL')
or hasattr(submodule, 'PANEL_GROUP')
or hasattr(submodule, 'FEATURE')):
# If enabled and local.enabled contains a same filename,
# the file loaded later (i.e., local.enabled) will be used.
name = submodule.__name__.rsplit('.', 1)[1]
config[name] = submodule.__dict__
else:
logging.warning("Skipping %s because it doesn't have DASHBOARD"
", PANEL, PANEL_GROUP, or FEATURE defined.",
submodule.__name__)
return sorted(config.items(),
key=lambda c: c[1]['__name__'].rsplit('.', 1)[1])
def update_dashboards(modules, horizon_config, installed_apps):
"""Imports dashboard and panel configuration from modules and applies it.
The submodules from specified modules are imported, and the configuration
for the specific dashboards is merged, with the later modules overriding
settings from the former. Then the configuration is applied to
horizon_config and installed_apps, in alphabetical order of files from
which th | e configurations were imported.
For example, given this setup:
| foo/__init__.py
| foo/_10_baz.py
| foo/_20_qux.py
| bar/__init__.py
| bar/_30_baz_.py
and being called with ``modules=[foo, bar]``, we will first have the
configu | ration from ``_10_baz`` and ``_30_baz`` merged, then the
configurations will be applied in order ``qux``, ``baz`` (``baz`` is
second, because the most recent file which contributed to it, ``_30_baz``,
comes after ``_20_qux``).
Panel specific configurations are stored in horizon_config. Dashboards
from both plugin-based and openstack_dashboard must be registered before
the panel configuration can be applied. Making changes to the panel is
deferred until the horizon autodiscover is completed, configurations are
applied in alphabetical order of files where it was imported.
"""
config_dashboards = horizon_config.get('dashboards', [])
if config_dashboards or horizon_config.get('default_dashboard'):
logging.warning(
'"dashboards" and "default_dashboard" in (local_)settings is '
'DEPRECATED now and may be unsupported in some future release. '
'The preferred way to specify the order of dashboards and the '
'default dashboard is the pluggable dashboard mechanism (in %s).',
', '.join([os.path.abspath(module.__path__[0])
for module in modules])
)
enabled_dashboards = []
disabled_dashboards = []
exceptions = horizon_config.get('exceptions', {})
apps = []
angular_modules = []
js_files = []
js_spec_files = []
scss_files = []
panel_customization = []
update_horizon_config = {}
for key, config in import_dashboard_config(modules):
if config.get('DISABLED', False):
if config.get('DASHBOARD'):
disabled_dashboards.append(config.get('DASHBOARD'))
continue
_apps = config.get('ADD_INSTALLED_APPS', [])
apps.extend(_apps)
if config.get('AUTO_DISCOVER_STATIC_FILES', False):
for _app in _apps:
module = import_module(_app)
base_path = os.path.join(module.__path__[0], 'static/')
file_discovery.populate_horizon_config(horizon_config,
base_path)
add_exceptions = config.get('ADD_EXCEPTIONS', {}).items()
for category, exc_list in add_exceptions:
exceptions[category] = tuple(set(exceptions.get(category, ())
+ exc_list))
angular_modules.extend(config.get('ADD_ANGULAR_MODULES', []))
# avoid pulling in dashboard javascript dependencies multiple times
existing = set(js_files)
js_files.extend([f for f in config.get('ADD_JS_FILES', [])
if f not in existing])
js_spec_files.extend(config.get('ADD_JS_SPEC_FILES', []))
scss_files.extend(config.get('ADD_SCSS_FILES', []))
update_horizon_config.update(
config.get('UPDATE_HORIZON_CONFIG', {}))
if config.get('DASHBOARD'):
dashboard = key
enabled_dashboards.append(dashboard)
if config.get('DEFAULT', False):
horizon_config['default_dashboard'] = dashboard
elif config.get('PANEL') or config.get('PANEL_GROUP'):
config.pop("__builtins__", None)
panel_customization.append(config)
# Preserve the dashboard order specified in settings
dashboards = ([d for d in config_dashboards
if d not in disabled_dashboards] +
[d for d in enabled_dashboards
if d not in config_dashboards])
horizon_config['panel_customization'] = panel_customization
horizon_config['dashboards'] = tuple(dashboards)
horizon_config.setdefault('exceptions', {}).update(exceptions)
horizon_config.update(update_horizon_config)
horizon_config.setdefault('angular_modules', []).extend(angular_modules)
horizon_config.setdefault('js_files', []).extend(js_files)
horizon_config.setdefault('js_spec_files', []).extend(js_spec_files)
horizon_config.setdefault('scss_files', []).extend(scss_files)
# apps contains reference to applications declared in the enabled folder
# basically a list of applications that are internal and external plugins
# installed_apps contains reference to applications declared in settings
# such as django.contribe.*, django_pyscss, compressor, horizon, etc...
# for translation, we are only interested in the list of external plugins
# so we save the reference to it before we append to installed_apps
horizon_config.setdefault('plugins', []).extend(apps)
installed_apps[0:0] = apps
# Order matters, list the xstatic module name and the entry point file(s) for
# that module (this is often defined as the "main" in bower.json, and
# as the xstatic module MAIN variable in the very few compliant xstatic
# modules). If the xstatic module does define a MAIN then set the files
# list to None.
# This list is to b |
Spoken-tutorial/spoken-website | events/migrations/0038_auto_20190531_0738.py | Python | gpl-3.0 | 98,329 | 0.003112 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-31 07:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('events', '0037_auto_20190306_1145'),
]
operations = [
migrations.AlterField(
model_name='academiccenter',
name='city',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='events.City'),
),
| migrations.AlterField(
| model_name='academiccenter',
name='district',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='events.District'),
),
migrations.AlterField(
model_name='academiccenter',
name='institute_category',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='events.InstituteCategory'),
),
migrations.AlterField(
model_name='academiccenter',
name='institution_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='events.InstituteType'),
),
migrations.AlterField(
model_name='academiccenter',
name='location',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='events.Location'),
),
migrations.AlterField(
model_name='academiccenter',
name='state',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='events.State'),
),
migrations.AlterField(
model_name='academiccenter',
name='university',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='events.University'),
),
migrations.AlterField(
model_name='academiccenter',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='accountexecutive',
name='academic',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='events.AcademicCenter'),
),
migrations.AlterField(
model_name='accountexecutive',
name='appoved_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='accountexecutive_approved_by', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='accountexecutive',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, related_name='accountexecutive', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='city',
name='state',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='events.State'),
),
migrations.AlterField(
model_name='coursemap',
name='course',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='events.LabCourse'),
),
migrations.AlterField(
model_name='coursemap',
name='foss',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.FossCategory'),
),
migrations.AlterField(
model_name='district',
name='state',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='events.State'),
),
migrations.AlterField(
model_name='eventsnotification',
name='academic',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='events.AcademicCenter'),
),
migrations.AlterField(
model_name='eventsnotification',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='fossmdlcourses',
name='foss',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='creation.FossCategory'),
),
migrations.AlterField(
model_name='helpfulfor',
name='helpful_for',
field=models.CharField(choices=[('0', 'Academic Performance'), ('1', 'Project Assignments'), ('2', 'To get job interviews'), ('3', 'To get jobs'), ('4', 'All of the above')], max_length=50),
),
migrations.AlterField(
model_name='inductionfinallist',
name='eoi_id',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.PROTECT, to='events.InductionInterest'),
),
migrations.AlterField(
model_name='inductioninterest',
name='age',
field=models.CharField(choices=[('', '-----'), ('20to25', '20 to 25 years'), ('26to30', '26 to 30 years'), ('31to35', '31 to 35 years'), ('35andabove', 'Above 35 years')], max_length=100),
),
migrations.AlterField(
model_name='inductioninterest',
name='borrow_laptop',
field=models.CharField(choices=[('', '-----'), ('Yes', 'Yes'), ('No', 'No')], max_length=50),
),
migrations.AlterField(
model_name='inductioninterest',
name='bring_laptop',
field=models.CharField(choices=[('', '-----'), ('Yes', 'Yes'), ('No', 'No')], max_length=50),
),
migrations.AlterField(
model_name='inductioninterest',
name='designation',
field=models.CharField(choices=[('', '-----'), ('Lecturer', 'Lecturer'), ('AssistantProfessor', 'Assistant Professor'), ('AssociateProfessor', 'Associate Professor'), ('Professor', 'Professor'), ('Other', 'Other')], max_length=100),
),
migrations.AlterField(
model_name='inductioninterest',
name='do_agree',
field=models.CharField(choices=[('', '-----'), ('Yes', 'Yes')], max_length=50),
),
migrations.AlterField(
model_name='inductioninterest',
name='education',
field=models.CharField(choices=[('', '-----'), ('3yeargraduatedegree(BABScB.Cometc)', '3 year graduate degree (BA, BSc, B.Com, etc.)'), ('Professionaldegree(BEBTechetc)', 'Professional degree (BE, B.Tech, etc.)'), ('2yearMasters(MAMScMCometc)', '2 year Masters (MA, MSc, MCom, etc.)'), ('2yearprofessionalMasters(MEMTechMBAMPhiletc)', '2 year professional Masters (ME, MTech, MBA, MPhil, etc.)'), ('PhD', 'PhD'), ('Other', 'Other')], max_length=100),
),
migrations.AlterField(
model_name='inductioninterest',
name='experience_in_college',
field=models.CharField(choices=[('', '-----'), ('Lessthan1year', 'Less than 1 year'), ('Morethan1yearbutlessthan2years', 'More than 1 year, but less than 2 years'), ('Morethan2yearsbutlessthan5years', 'More than 2 years but less than 5 years'), ('Morethan5years', 'More than 5 years')], max_length=100),
),
migrations.AlterField(
model_name='inductioninterest',
name='gender',
field=models.CharField(choices=[('', '-----'), ('Male', 'Male'), ('Female', 'Female')], max_length=50),
),
migrations.AlterField(
model_name='inductioninterest',
name='medium_of_studies',
field=models.CharField(choices=[('', '-----'), ('Assamese', 'Assamese'), ('Bengali', 'Bengali'), ('Bhojpuri', 'Bhojpuri'), ('Bodo', 'Bodo'), ('English', 'English'), ('Gujarati', 'Gujarati'), ('Hindi', 'Hindi'), ('Kannada', 'Kannada'), ('Kashmiri', 'Kashmiri'), ('Khasi', 'Khasi'), ('Konkani', 'Konkani'), ('Maithili', 'Maithili'), ('Malayalam', 'Malayalam'), ('Manipuri', 'Manipuri'), ('Marathi', 'Marathi'), ('Nepali', 'Nepali'), ('Oriya', 'Oriya'), ('Punjabi', 'Punjabi'), ('Raja |
chickonice/AutonomousFlight | simulation/simulation_ws/build/rotors_simulator/rotors_control/catkin_generated/pkg.installspace.context.pc.py | Python | gpl-3.0 | 777 | 0.005148 | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/spacecat/AutonomousFlight/simulation/simulation_ws/install/include;/usr/include/eigen3".split(';') if "/home/spacecat/AutonomousFlight/simulation/simulation_ws/install/include;/usr/incl | ude/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;mav_msgs;roscpp;sensor_msgs;glog_catkin".replac | e(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-llee_position_controller;-lroll_pitch_yawrate_thrust_controller".split(';') if "-llee_position_controller;-lroll_pitch_yawrate_thrust_controller" != "" else []
PROJECT_NAME = "rotors_control"
PROJECT_SPACE_DIR = "/home/spacecat/AutonomousFlight/simulation/simulation_ws/install"
PROJECT_VERSION = "1.0.0"
|
ursky/metaWRAP | bin/metawrap-scripts/plot_binning_results.py | Python | mit | 7,420 | 0.019946 | #!/usr/bin/env python2.7
# USAGE:
# ./script file1.stats file2.stats file3.stats
import sys
import matplotlib.pyplot as plt
plt.switch_backend('agg')
max_contamination=int(sys.argv[2])
min_completion=int(sys.argv[1])
####################################################################################################################################
############################################ MAKE THE COMPLETION PLOT ############################################
####################################################################################################################################
print "Loading completion info...."
data={}
max_x=0
# loop over all bin .stats files
for file_name in sys.argv[3:]:
bin_set=".".join(file_name.split("/")[-1].split(".")[:-1])
data[bin_set]=[]
for line in open(file_name):
# skip header
if "compl" in line: continue
# skip bins that are too contaminated or very incomplete
if float(line.split("\t")[2])>max_contamination: continue
if float(line.split("\t")[1])<min_completion: continue
# save the completion value of each bin into a list
data[bin_set].append(float(line.split("\t")[1]))
if len(data[bin_set])>max_x: max_x=len(data[bin_set])
# sort the completion dat | a sets
for bin_set in data:
data[bin_set].sort(reverse=True)
print "Plotting completion data..."
# MAKING THE PLOT PRETTY!!!!
# set some color schemes
tableau20 = [(21 | 4, 39, 40), (31, 119, 180), (255, 127, 14),
(44, 160, 44), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
plot_colors={}
for i, label in enumerate(sys.argv[1:]):
bin_set=".".join(label.split("/")[-1].split(".")[:-1])
plot_colors[bin_set]=tableau20[i]
# set figure size
plt.figure(figsize=(16, 8))
plt.style.use('ggplot')
# Remove the plot frame lines. They are unnecessary chartjunk.
ax = plt.subplot(121)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_linewidth(0.5)
ax.spines['bottom'].set_color('black')
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
#ax.set_facecolor('white')
ax.set_facecolor("white")
# Ensure that the axis ticks only show up on the bottom and left of the plot.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
plt.ylim(min_completion, 105)
max_x=0
for k in data:
if len(data[k])>max_x: max_x=len(data[k])
plt.xlim(0, max_x)
# Make sure your axis ticks are large enough to be easily read.
plt.yticks(range(min_completion, 105, 10), [str(x) + "%" for x in range(min_completion, 105, 10)], fontsize=14)
plt.xticks(fontsize=14)
# Provide tick lines across the plot to help your viewers trace along
for y in range(min_completion, 105, 10):
plt.plot(range(0, max_x), [y] * len(range(0, max_x)), "--", lw=0.5, color="black", alpha=0.3)
# Remove the tick marks; they are unnecessary with the tick lines we just plotted.
plt.tick_params(axis="both", which="both", bottom="off", top="off", labelbottom="on", left="off", right="off", labelleft="on")
# PLOTTING THE DATA
# prepare labeles
labels = []
for k in data: labels.append(k)
# start plotting data
for rank, bin_set in enumerate(labels):
# chose a color!
c=plot_colors[bin_set]
# plot the data
plt.plot(data[bin_set], lw=2.5, color=c)
# add bin set label to plot
y_pos = data[bin_set][len(data[bin_set])*3/4]
x_pos=len(data[bin_set])*3/4
plt.text(x_pos, y_pos, bin_set, fontsize=18, color=c)
# add plot and axis titles and adjust edges
plt.title("Bin completion ranking", fontsize=26)
plt.xlabel("Descending completion rank", fontsize=16)
plt.ylabel("Estimated bin completion", fontsize=16)
####################################################################################################################################
############################################ MAKE THE CONTAMINATION PLOT ############################################
####################################################################################################################################
print "Loading contamination info..."
data={}
# loop over all bin .stats files
for file_name in sys.argv[3:]:
bin_set=".".join(file_name.split("/")[-1].split(".")[:-1])
data[bin_set]=[]
for line in open(file_name):
# skip header
if "compl" in line: continue
# skip bins that are too incomplete or way too contaminated
if float(line.split("\t")[1])<min_completion: continue
if float(line.split("\t")[2])>max_contamination: continue
# save the contamination value of each bin into a list
data[bin_set].append(float(line.split("\t")[2]))
# sort the contamination data sets
for bin_set in data:
data[bin_set].sort(reverse=False)
print "Plotting the contamination data..."
# MAKING THE PLOT PRETTY!!!!
# Remove the plot frame lines. They are unnecessary chartjunk.
ax = plt.subplot(122)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_linewidth(0.5)
ax.spines['bottom'].set_color('black')
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
#ax.set_facecolor('white')
ax.set_facecolor("white")
# Ensure that the axis ticks only show up on the bottom and left of the plot.
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Limit the range of the plot to only where the data is.
#plt.gca().invert_yaxis()
plt.ylim(0, max_contamination+1)
#ax.set_yscale('log')
max_x=0
for k in data:
if len(data[k])>max_x: max_x=len(data[k])
plt.xlim(0, max_x)
# Make sure your axis ticks are large enough to be easily read.
plt.yticks(range(-0, max_contamination+1, 1), [str(x) + "%" for x in range(-0, max_contamination+1, 1)], fontsize=14)
plt.xticks(fontsize=14)
# Provide tick lines across the plot to help your viewers trace along
for y in range(0, max_contamination+1, 1):
plt.plot(range(0, max_x), [y] * len(range(0, max_x)), "--", lw=0.5, color="black", alpha=0.3)
# Remove the tick marks; they are unnecessary with the tick lines we just plotted.
plt.tick_params(axis="both", which="both", bottom="off", top="off", labelbottom="on", left="off", right="off", labelleft="on")
# PLOTTING THE DATA
# prepare labeles
labels = []
for k in data: labels.append(k)
# start plotting data
for rank, bin_set in enumerate(labels):
# chose a color!
c=plot_colors[bin_set]
# plot the data
plt.plot(data[bin_set], lw=2.5, color=c)
# add plot label
x_pos = len(data[bin_set])-1
y_pos = data[bin_set][-1]
plt.text(x_pos, y_pos, bin_set, fontsize=18, color=c)
# add plot and axis titles and adjust the edges
plt.title("Bin contamination ranking", fontsize=26)
plt.xlabel("Acending contamination rank", fontsize=16)
plt.ylabel("Estimated bin contamination (log scale)", fontsize=16)
plt.gcf().subplots_adjust(right=0.9)
# save figure
print "Saving figures binning_results.eps and binning_results.png ..."
plt.tight_layout(w_pad=10)
plt.subplots_adjust(top=0.92, right=0.90, left=0.08)
plt.savefig("binning_results.eps",format='eps', dpi=600)
plt.savefig("binning_results.png",format='png', dpi=600)
#plt.show()
|
burzillibus/RobHome | venv/lib/python2.7/site-packages/bs4/dammit.py | Python | mit | 29,930 | 0.01034 | # -*- coding: utf-8 -*-
"""Beautiful Soup bonus library: Unicode, Dammit
This library converts a bytestream to Unicode through any means
necessary. It is heavily based on code from Mark Pilgrim's Universal
Feed Parser. It works best on XML and HTML, but it does not rewrite the
XML or HTML to reflect a new encoding; that's the tree builder's job.
"""
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__license__ = "MIT"
import codecs
from htmlentitydefs import codepoint2name
import re
import logging
import string
# Import a library to autodetect character encodings.
chardet_type = None
try:
# First try the fast C implementation.
# PyPI package: cchardet
import cchardet
def chardet_dammit(s):
return cchardet.detect(s)['encoding']
except ImportError:
try:
# Fall back to the pure Python implementation
# Debian package: python-chardet
# PyPI package: chardet
import chardet
def chardet_dammit(s):
return chardet.detect(s)['encoding']
#import chardet.constants
#chardet.constants._debug = 1
except ImportError:
# No chardet available.
def chardet_dammit(s):
return None
# Available from http://cjkpython.i18n.org/.
try:
import iconv_codec
except ImportError:
pass
xml_encoding_re = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I)
html_meta_re = re.compile(
'<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I)
class EntitySubstitution(object):
"""Substitute XML or HTML entities for the corresponding characters."""
def _populate_class_variables():
lookup = {}
reverse_lookup = {}
characters_for_re = []
for codepoint, name in list(codepoint2name.items()):
character = unichr(codepoint)
if codepoint != 34:
# There's no point in turning the quotation mark into
# ", unless it happens within an attribute value, which
# is handled elsewhere.
characters_for_re.append(character)
lookup[character] = name
# But we do want to turn " into the quotation mark.
reverse_lookup[name] = character
re_definition = "[%s]" % "".join(characters_for_re)
return lookup, reverse_lookup, re.compile(re_definition)
(CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER,
CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables()
CHARACTER_TO_XML_ENTITY = {
"'": "apos",
'"': "quot",
"&": "amp",
"<": "lt",
">": "gt",
}
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
"&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
")")
AMPERSAND_OR_BRACKET = re.compile("([<>&])")
@classmethod
def _substitute_html_entity(cls, matchobj):
entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
return "&%s;" % entity
@classmethod
def _substitute_xml_entity(cls, matchobj):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
return "&%s;" % entity
@classmethod
def quoted_attribute_value(self, value):
"""Make a value into a quoted XML attribute, possibly escaping it.
Most strings will be quoted using double quotes.
Bob's Bar -> "Bob's Bar"
If a string contains double quotes, it will be quoted using
single quotes.
Welcome to "my bar" -> 'Welcome to "my bar"'
If a string contains both single and double quotes, the
double quotes will be escaped, and the string will be quoted
using double quotes.
Welcome to "Bob's Bar" -> "Welcome to "Bob's bar"
"""
quote_with = '"'
if '"' in value:
if "'" in value:
# The string contains both single and double
# quotes. Turn the double quotes into
# entities. We quote the double quotes rather than
# the single quotes because the entity name is
# """ whether this is HTML or XML. If we
# quoted the single quotes, we'd have to decide
# between ' and &squot;.
replace_with = """
value = value.replace('"', replace_with)
else:
# There are double quotes but no single quotes.
# We can use single quotes to quote the attribute.
quote_with = "'"
return quote_with + value + quote_with
@classmethod
def substitute_xml(cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign
will become <, the greater-than sign will become >,
and any ampersands will become &. If you want ampersands
that appear to be part of an entity definition to be left
alone, use substitute_xml_containing_entities() instead.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets and ampersands.
value = cls.AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
| if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_xml_containing_entities(
cls, value, make_quoted_attribute=False):
""" | Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign will
become <, the greater-than sign will become >, and any
ampersands that are not part of an entity defition will
become &.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets, and ampersands that aren't part of
# entities.
value = cls.BARE_AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_html(cls, s):
"""Replace certain Unicode characters with named HTML entities.
This differs from data.encode(encoding, 'xmlcharrefreplace')
in that the goal is to make the result more readable (to those
with ASCII displays) rather than to recover from
errors. There's absolutely nothing wrong with a UTF-8 string
containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
character with "é" will make it more readable to some
people.
"""
return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(
cls._substitute_html_entity, s)
class EncodingDetector:
"""Suggests a number of possible encodings for a bytestring.
Order of precedence:
1. Encodings you specifically tell EncodingDetector to try first
(the override_encodings argument to the constructor).
2. An encoding declared within the bytestring itself, either in an
XML declaration (if the bytestring is to be interpreted as an XML
document), or in a <meta> tag (if the bytestring is to be
interpreted as an HTML document.)
3. An encoding detected through textual analysis by chardet,
cchardet, or a similar external library.
4. UTF-8.
5. Windows-1252.
"""
def __init__(self, markup, override_encodings=None, is_html=False,
exclude_encodings=None):
self.override_encodings = override_encodings or []
exclude_encodings = exclude_encodings or []
self.exclude_encodings = set([x.lower() for x in exclude_encodings])
self.chardet_encoding = None
|
taimur97/Feeder | server/appengine/endpoints/api_backend_service.py | Python | gpl-2.0 | 5,564 | 0.005931 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Api serving config collection service implementation.
Contains the implementation for BackendService as defined in api_backend.py.
"""
try:
import json
except ImportError:
import simplejson as json
import logging
from endpoints import api_backend
from endpoints import api_config
from endpoints import api_exceptions
from protorpc import message_types
__all__ = [
'ApiConfigRegistry',
'BackendServiceImpl',
]
class ApiConfigRegistry(object):
"""Registry of active APIs to be registered with Google API Server."""
def __init__(self):
self.__registered_classes = set()
self.__api_configs = set()
self.__api_methods = {}
def regis | ter_spi(self, config_contents):
"""Register a single SPI and its config contents.
Args:
config_contents: String containing API configuration.
"""
if config_contents is None:
return
parsed_config = json.loads(config_contents)
self.__register_class(parsed_con | fig)
self.__api_configs.add(config_contents)
self.__register_methods(parsed_config)
def __register_class(self, parsed_config):
"""Register the class implementing this config, so we only add it once.
Args:
parsed_config: The JSON object with the API configuration being added.
Raises:
ApiConfigurationError: If the class has already been registered.
"""
methods = parsed_config.get('methods')
if not methods:
return
service_classes = set()
for method in methods.itervalues():
rosy_method = method.get('rosyMethod')
if rosy_method and '.' in rosy_method:
method_class = rosy_method.split('.', 1)[0]
service_classes.add(method_class)
for service_class in service_classes:
if service_class in self.__registered_classes:
raise api_config.ApiConfigurationError(
'SPI class %s has already been registered.' % service_class)
self.__registered_classes.add(service_class)
def __register_methods(self, parsed_config):
"""Register all methods from the given api config file.
Methods are stored in a map from method_name to rosyMethod,
the name of the ProtoRPC method to be called on the backend.
If no rosyMethod was specified the value will be None.
Args:
parsed_config: The JSON object with the API configuration being added.
"""
methods = parsed_config.get('methods')
if not methods:
return
for method_name, method in methods.iteritems():
self.__api_methods[method_name] = method.get('rosyMethod')
def lookup_api_method(self, api_method_name):
"""Looks an API method up by name to find the backend method to call.
Args:
api_method_name: Name of the method in the API that was called.
Returns:
Name of the ProtoRPC method called on the backend, or None if not found.
"""
return self.__api_methods.get(api_method_name)
def all_api_configs(self):
"""Return a list of all API configration specs as registered above."""
return list(self.__api_configs)
class BackendServiceImpl(api_backend.BackendService):
"""Implementation of BackendService."""
def __init__(self, api_config_registry, app_revision):
"""Create a new BackendService implementation.
Args:
api_config_registry: ApiConfigRegistry to register and look up configs.
app_revision: string containing the current app revision.
"""
self.__api_config_registry = api_config_registry
self.__app_revision = app_revision
@staticmethod
def definition_name():
"""Override definition_name so that it is not BackendServiceImpl."""
return api_backend.BackendService.definition_name()
def getApiConfigs(self, request):
"""Return a list of active APIs and their configuration files.
Args:
request: A request which may contain an app revision
Returns:
ApiConfigList: A list of API config strings
"""
if request.appRevision and request.appRevision != self.__app_revision:
raise api_exceptions.BadRequestException(
message='API backend app revision %s not the same as expected %s' % (
self.__app_revision, request.appRevision))
configs = self.__api_config_registry.all_api_configs()
return api_backend.ApiConfigList(items=configs)
def logMessages(self, request):
"""Write a log message from the Swarm FE to the log.
Args:
request: A log message request.
Returns:
Void message.
"""
Level = api_backend.LogMessagesRequest.LogMessage.Level
log = logging.getLogger(__name__)
for message in request.messages:
level = message.level if message.level is not None else Level.info
record = logging.LogRecord(name=__name__, level=level.number, pathname='',
lineno='', msg=message.message, args=None,
exc_info=None)
log.handle(record)
return message_types.VoidMessage()
|
sonaht/ansible | lib/ansible/modules/utilities/logic/set_fact.py | Python | gpl-3.0 | 2,883 | 0.002775 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
author: "Dag Wieers (@dagwieers)"
module: set_fact
short_description: Set host facts from a task
description:
- This module allows setting new variables. Variables are set on a host-by-host basis just like facts discovered by the setup module.
- These variables will be available to subsequent plays during an ansible-playbook run, but will not be saved across executions even if you use
a fact cache.
- Per the standard Ansible variable precedence rules, many other types of variables have a higher priority, so this value may be overridden.
See U(http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable) for more information.
- This module is also supported for Windows targets.
options:
key_value:
description:
- The C(set_fact) module takes key=value pairs as variables to set
in the playbook scope. Or alternatively, accepts complex arguments
using the C(args:) statement.
required: true
default: null
version_added: "1.2"
notes:
- "The `var=value` notation can only create strings or booleans.
If you want to create lists/arrays or dictionary/hashes use `var: [val1, val2]`"
- This module is also supported for Windows targets.
'''
EXAMPLES = '''
# Example setting host facts using key=value pairs, note that this always creates strings or booleans
- set_fact: one_fact="something" other_fact="{{ local_var }}"
# Example setting host facts using complex arguments
- set_fact:
one_fact: something
other_fact: "{{ local_var * 2 }}"
another_fact: "{{ some_registered_var.results | map(attribute='ansible_facts.some_fact') | list }}"
# As of 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no')
# to proper boolean values when us | ing the key=value syntax, however it is still
# recommended that booleans be set | using the complex argument style:
- set_fact:
one_fact: true
other_fact: false
'''
|
boto/s3transfer | s3transfer/constants.py | Python | apache-2.0 | 910 | 0 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the A | pache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying | this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import s3transfer
KB = 1024
MB = KB * KB
GB = MB * KB
ALLOWED_DOWNLOAD_ARGS = [
'ChecksumMode',
'VersionId',
'SSECustomerAlgorithm',
'SSECustomerKey',
'SSECustomerKeyMD5',
'RequestPayer',
'ExpectedBucketOwner',
]
USER_AGENT = 's3transfer/%s' % s3transfer.__version__
PROCESS_USER_AGENT = '%s processpool' % USER_AGENT
|
luiscberrocal/homeworkpal | homeworkpal_project/maximo/migrations/0007_auto_20151117_1038.py | Python | mit | 506 | 0.001976 | # -*- coding | : utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('maximo', '0006_auto_20151111_2021'),
]
operations = [
migrations.AlterField(
model_name='maximoticket',
name='number',
field=models.CharField(validators=[django.core.validators.RegexValidator(regex='\\d{5,6}')], ma | x_length=7),
),
]
|
janeen666/mi-instrument | mi/instrument/wetlabs/fluorometer/flord_d/test/__init__.py | Python | bsd-2-clause | 22 | 0 | __auth | or__ = 'tgu | pta'
|
JenSte/libsigrokdecode | decoders/jtag_stm32/__init__.py | Python | gpl-3.0 | 1,163 | 0.012898 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## | GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to th | e Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
'''
This decoder stacks on top of the 'jtag' PD and decodes JTAG data specific to
the STM32 microcontroller series.
Details:
https://en.wikipedia.org/wiki/STM32
http://www.st.com/internet/com/TECHNICAL_RESOURCES/TECHNICAL_LITERATURE/REFERENCE_MANUAL/CD00171190.pdf (e.g. chapter 31.7: "JTAG debug port")
'''
from .pd import *
|
selurvedu/openprocurement.api | src/openprocurement/api/views/award_complaint.py | Python | apache-2.0 | 6,347 | 0.002521 | # -*- coding: utf-8 -*-
from logging import getLogger
from cornice.resource import resource, view
from openprocurement.api.models import Complaint, STAND_STILL_TIME, get_now
from openprocurement.api.utils import (
apply_patch,
save_tender,
add_next_award,
error_handler,
update_journal_handler_params,
)
from openprocurement.api.validation import (
validate_complaint_data,
validate_patch_complaint_data,
)
LOGGER = getLogger(__name__)
@resource(name='Tender Award Complaints',
collection_path='/tenders/{tender_id}/awards/{award_id}/complaints',
path='/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}',
description="Tender award complaints",
error_handler=error_handler)
class TenderAwardComplaintResource(object):
def __init__(self, request):
self.request = request
self.db = request.registry.db
@view(content_type="application/json", permission='create_award_complaint', validators=(validate_complaint_data,), renderer='json')
def collection_post(self):
"""Post a complaint for award
"""
tender = self.request.validated['tender']
if tender.status not in ['active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t add complaint in current ({}) tender status'.format(tender.status))
self.request.errors.status = 403
return
if self.request.context.complaintPeriod and \
(self.request.context.complaintPeriod.startDate and self.request.context.complaintPeriod.startDate > get_now() or
self.request.context.complaintPeriod.endDate and self.request.context.complaintPeriod.endDate < get_now()):
self.request.errors.add('body', 'data', 'Can add complaint only in complaintPeriod')
self.request.errors.status = 403
return
complaint_data = self.request.validated['data']
complaint = Complaint(complaint_data)
self.request.context.complaints.append(complaint)
if save_tender(self.request):
update_journal_handler_params({'complaint_id': complaint.id})
LOGGER.info('Created tender award complaint {}'.format(complaint.id), extra={'MESSAGE_ID': 'tender_award_complaint_create'})
self.request.response.status = 201
self.request.response.headers['Location'] = self.request.route_url('Tender Award Complaints', tender_id=tender.id, award_id=self.request.validated['award_id'], complaint_id=complaint['id'])
return {'data': complaint.serialize("view")}
@view(renderer='json', permission='view_tender')
def collection_get(self):
"""List complaints for award
"""
return {'data': [i.serialize("view") for i in self.request.context.complaints]}
@view(renderer='json', permission='view_tender')
def get(self):
"""Retrieving the complaint for award
"""
return {'data': self.request.validated['complaint'].serialize("view")}
@view(content_type="application/json", permission='review_complaint', validators=(validate_patch_complaint_data,), renderer='json')
def patch(self):
"""Post a complaint resolution for award
"""
tender = self.request.validated['tender']
if tender.status not in ['active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t update complaint in current ({}) tender status'.format(tender.status))
self.request.errors.status = 403
return
complaint = self.request.context
if complaint.status != 'pending':
self.request.errors.add('body', 'data', 'Can\'t update complaint in current ({}) status'.format(complaint.status))
self.request.errors.stat | us = 403
return
apply_patch(self.request, save=False, src=complaint.serialize())
if complaint.status == 'cancelled':
self.request.errors.add('body', 'data', 'Can\'t cancel complaint')
self.request.errors.status = 403
return
if complaint.status == 'resolved':
award = self.request.validated['award']
| if tender.status == 'active.awarded':
tender.status = 'active.qualification'
tender.awardPeriod.endDate = None
if award.status == 'unsuccessful':
for i in tender.awards[tender.awards.index(award):]:
i.complaintPeriod.endDate = get_now() + STAND_STILL_TIME
i.status = 'cancelled'
for j in i.complaints:
if j.status == 'pending':
j.status = 'cancelled'
for i in award.contracts:
i.status = 'cancelled'
award.complaintPeriod.endDate = get_now() + STAND_STILL_TIME
award.status = 'cancelled'
add_next_award(self.request)
elif complaint.status in ['declined', 'invalid'] and tender.status == 'active.awarded':
pending_complaints = [
i
for i in tender.complaints
if i.status == 'pending'
]
pending_awards_complaints = [
i
for a in tender.awards
for i in a.complaints
if i.status == 'pending'
]
stand_still_ends = [
a.complaintPeriod.endDate
for a in tender.awards
if a.complaintPeriod.endDate
]
stand_still_end = max(stand_still_ends) if stand_still_ends else get_now()
stand_still_time_expired = stand_still_end < get_now()
if not pending_complaints and not pending_awards_complaints and stand_still_time_expired:
active_awards = [
a
for a in tender.awards
if a.status == 'active'
]
if not active_awards:
tender.status = 'unsuccessful'
if save_tender(self.request):
LOGGER.info('Updated tender award complaint {}'.format(self.request.context.id), extra={'MESSAGE_ID': 'tender_award_complaint_patch'})
return {'data': complaint.serialize("view")}
|
total-impact/impactstory-analytics | impactstoryanalytics/widgets/hourly_new_users.py | Python | mit | 903 | 0.003322 | from collections import defaultdict
import iso8601
import arrow
| import logging
from impactstoryanalytics.widgets.widget import Widget, get_raw_dataclip_data
logger = logging.getLogger("impactstoryanalytics.widgets.hourly_new_users")
class Hourly_new_users(Widget):
new_accounts_query_url = "https://dataclips.heroku.com/hefcjkzcteluxosfhdvsofefjrjr.json"
def get_data(self):
number_of_datapoints = 72
datapoints = get_raw_dataclip_data(self.new_accounts_query_url)["values"][0:number_ | of_datapoints]
pans = Widget.get_time_pan_list(number_of_datapoints, interval="hour")
for datapoint in datapoints:
(iso_time, new_accounts, total_accounts) = datapoint
time = arrow.get(str(iso_time), 'YYYY-MM-DDTHH:mm:ss')
pans.add_to_pan(time, "new_accounts", int(new_accounts))
return pans.replace_NAs_with_zeroes().as_list()
|
MathYourLife/TSatPy-thesis | sandbox/dns_string_io.py | Python | mit | 10,430 | 0.005561 |
import struct
OK, EFORMAT, ESERVER, ENAME, ENOTIMP, EREFUSED = range(6)
IXFR, AXFR, MAILB, MAILA, ALL_RECORDS = range(251, 256)
IN, CS, CH, HS = range(1, 5)
from io import BytesIO
class Message:
"""
L{Message} contains all the information represented by a single
DNS request or response.
@ivar id: See L{__init__}
@ivar answer: See L{__init__}
@ivar opCode: See L{__init__}
@ivar recDes: See L{__init__}
@ivar recAv: See L{__init__}
@ivar auth: See L{__init__}
@ivar rCode: See L{__init__}
@ivar trunc: See L{__init__}
@ivar maxSize: See L{__init__}
@ivar authenticData: See L{__init__}
@ivar checkingDisabled: See L{__init__}
@ivar queries: The queries which are being asked of or answered by
DNS server.
@type queries: L{list} of L{Query}
@ivar answers: Records containing the answers to C{queries} if
this is a response message.
@type answers: L{list} of L{RRHeader}
@ivar authority: Records containing information about the
authoritative DNS servers for the names in C{queries}.
@type authority: L{list} of L{RRHeader}
@ivar additional: Records containing IP addresses of host names
in C{answers} and C{authority}.
@type additional: L{list} of L{RRHeader}
"""
headerFmt = "!H2B4H"
headerSize = struct.calcsize(headerFmt)
# Question, answer, additional, and nameserver lists
queries = answers = add = ns = None
def __init__(self, id=0, answer=0, opCode=0, recDes=0, recAv=0,
auth=0, rCode=OK, trunc=0, maxSize=512,
authenticData=0, checkingDisabled=0):
"""
@param id: A 16 bit identifier assigned by the program that
generates any kind of query. This identifier is copied to
the corresponding reply and can be used by the requester
to match up replies to outstanding queries.
@type id: L{int}
@param answer: A one bit field that specifies whether this
message is a query (0), or a response (1).
@type answer: L{int}
@param opCode: A four bit field that specifies kind of query in
this message. This value is set by the originator of a query
and copied into the response.
@type opCode: L{int}
@param recDes: Recursion Desired - this bit may be set in a
query and is copied into the response. If RD is set, it
directs the name server to pursue the query recursively.
Recursive query support is optional.
@type recDes: L{int}
@param recAv: Recursion Available - this bit is set or cleared
in a response and denotes whether recursive query support
is available in the name server.
@type recAv: L{int}
@param auth: Authoritative Answer - this bit is valid in
responses and specifies that the responding name server
is an authority for the domain name in question section.
@type auth: L{int}
@ivar rCode: A response code, used to indicate success or failure in a
message which is a response from a server to a client request.
@type rCode: C{0 <= int < 16}
@param trunc: A flag indicating that this message was
truncated due to length greater than that permitted on the
transmission channel.
@type trunc: L{int}
@param maxSize: The requestor's UDP payload size is the number
of octets of the largest UDP payload that can be
reassembled and delivered in the requestor's network
stack.
@type maxSize: L{int}
@param authenticData: A flag indicating in a response that all
the data included in the answer and authority portion of
the response has been authenticated by the server
according to the policies of that server.
See U{RFC2535 section-6.1<https://tools.ietf.org/html/rfc2535#section-6.1>}.
@type authenticData: L{int}
@param checkingDisabled: A flag indicating in a query that
pending (non-authenticated) data is acceptable to the
resolver sending the query.
See U{RFC2535 section-6.1<https://tools.ietf.org/html/rfc2535#section-6.1>}.
@type authenticData: L{int}
"""
self.maxSize = maxSize
self.id = id
self.answer = answer
self.opCode = opCode
self.auth = auth
self.trunc = trunc
self.recDes = recDes
self.recAv = recAv
self.rCode = rCode
self.authenticData = authenticData
self.checkingDisabled = checkingDisabled
self.queries = []
self.answers = []
self.authority = []
self.additional = []
def addQuery(self, name, type=ALL_RECORDS, cls=IN):
"""
Add another query to this Message.
@type name: C{bytes}
@param name: The name to query.
@type type: C{int}
@param type: Query type
@type cls: C{int}
@param cls: Query class
"""
self.queries.append(Query(name, type, cls))
def encode(self, strio):
compDict = {}
body_tmp = BytesIO()
for q in self.queries:
q.encode(body_tmp, compDict)
for q in self.answers:
q.encode(body_tmp, compDict)
for q in self.authority:
q.encode(body_tmp, compDict)
for q in self.additional:
q.encode(body_tmp, compDict)
body = body_tmp.getvalue()
size = len(body) + self.headerSize
if self.maxSize and size > self.maxSize:
self.trunc = 1
body = body[:self.maxSize - self.headerSize]
byte3 = (( ( self.answer & 1 ) << 7 )
| ((self.opCode & 0xf ) << 3 )
| ((self.auth & 1 ) << 2 )
| ((self.trunc & 1 ) << 1 )
| ( self.recDes & 1 ) )
byte4 = ( ( (self.recAv & 1 ) << 7 )
| ((self.authenticData & 1) << 5)
| ((self.checkingDisabled & 1) << 4)
| (self.rCode & 0xf ) )
strio.write(struct.pack(self.headerFmt, self.id, byte3, byte4,
len(self.queries), len(self.answers),
len(self.authority), len(self.additional)))
strio.write(body)
def decode(self, strio, length=None):
| self.maxSize = 0
header = readPrecisely(strio, self.headerSize)
r = struct.unpack(self.headerFmt, header)
self.id, byte3, byte4, nqueries, nans, nns, nadd = r
self.answer = ( byte3 >> 7 ) & 1
self.opCode = ( byte3 >> 3 ) & 0xf
self.auth = ( byte3 >> 2 ) & 1
self.trunc = ( byte3 >> 1 ) | & 1
self.recDes = byte3 & 1
self.recAv = ( byte4 >> 7 ) & 1
self.authenticData = ( byte4 >> 5 ) & 1
self.checkingDisabled = ( byte4 >> 4 ) & 1
self.rCode = byte4 & 0xf
self.queries = []
for i in range(nqueries):
self.name.decode(strio)
buff = readPrecisely(strio, 4)
self.type, self.cls = struct.unpack("!HH", buff)
q = Query()
try:
q.decode(strio)
except EOFError:
return
self.queries.append(q)
items = (
(self.answers, nans),
(self.authority, nns),
(self.additional, nadd))
for (l, n) in items:
self.parseRecords(l, n, strio)
def parseRecords(self, list, num, strio):
for i in range(num):
header = RRHeader(auth=self.auth)
try:
header.decode(strio)
except EOFError:
return
t = self.lookupRecordType(header.type)
if not t:
continue
header.payload = t(ttl=header.ttl)
try:
header.payload.decode(strio, header.rdlength)
except EOFError:
return
list.append(header)
# Create a mapping from rec |
thozza/energy-track | energytrack/gui/gui_helper.py | Python | gpl-3.0 | 6,070 | 0.002142 | #!/usr/bin/env python
#
# -*- coding: utf-8 -*-
#
# gui_helper.py -
# Copyright 2013 Tomas Hozza <thozza@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Tomas Hozza <thozza@gmail.com>
import os
from datetime import date
from gi.repository import Gtk, Gdk
class GuiHelper(object):
(DIALOG_TYPE_ERROR,
DIALOG_TYPE_WARNING,
DIALOG_TYPE_INFO) = range(3)
@staticmethod
def enable_item(widget, enable=True):
widget.set_sensitive(enable)
@staticmethod
def calendar_get_date(calendar):
year, month, day = calendar.get_date()
return date(year, month + 1, day)
@staticmethod
def create_msg_dialog(type=DIALOG_TYPE_INFO,
message="message",
buttons=Gtk.ButtonsType.CLOSE,
parent=None, sec_message=None):
if type == GuiHelper.DIALOG_TYPE_INFO:
dlg = Gtk.MessageDialog(parent,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
buttons,
message)
elif type == GuiHelper.DIALOG_TYPE_WARNING:
dlg = Gtk.MessageDialog(parent,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
buttons,
message)
else:
dlg = Gtk.MessageDialog(parent,
| Gtk.DialogFlags.DESTROY_WITH_PARENT,
| Gtk.MessageType.ERROR,
buttons,
message)
if sec_message is not None:
dlg.format_secondary_text(sec_message)
return dlg
@staticmethod
def show_msg_dialog(type=DIALOG_TYPE_INFO,
message="message",
buttons=Gtk.ButtonsType.CLOSE,
parent=None,
sec_message=None):
dlg = GuiHelper.create_msg_dialog(type,
message,
buttons,
parent,
sec_message)
response = dlg.run()
dlg.hide()
return response
@staticmethod
def show_info_dialog(message="info", buttons=Gtk.ButtonsType.CLOSE, parent=None, sec_message=None):
return GuiHelper.show_msg_dialog(GuiHelper.DIALOG_TYPE_INFO,
message,
buttons,
parent,
sec_message)
@staticmethod
def show_warning_dialog(message="warning", buttons=Gtk.ButtonsType.CLOSE, parent=None, sec_message=None):
return GuiHelper.show_msg_dialog(GuiHelper.DIALOG_TYPE_WARNING,
message,
buttons,
parent,
sec_message)
@staticmethod
def show_error_dialog(message="Error", buttons=Gtk.ButtonsType.CLOSE, parent=None, sec_message=None):
return GuiHelper.show_msg_dialog(GuiHelper.DIALOG_TYPE_ERROR,
message,
buttons,
parent,
sec_message)
@staticmethod
def get_RGBA_color(color_string=None):
if color_string:
color = Gdk.RGBA()
if Gdk.RGBA.parse(color, color_string):
return color
return None
@staticmethod
def widget_override_color(widget, color_string=None):
if color_string:
widget.override_color(Gtk.StateFlags.NORMAL,
GuiHelper.get_RGBA_color(color_string))
else:
widget.override_color(Gtk.StateFlags.NORMAL,
None)
@staticmethod
def replace_widget(current, new):
"""
Replace one widget with another.
'current' has to be inside a container (e.g. gtk.VBox).
"""
container = current.get_parent()
assert container # is "current" inside a container widget?
# stolen from gazpacho code (widgets/base/base.py):
props = {}
for pspec in Gtk.ContainerClass.list_child_properties(container):
props[pspec.name] = container.child_get_property(current, pspec.name)
Gtk.Container.remove(container, current)
container.add(new)
for name, value in props.items():
container.child_set_property(new, name, value)
@staticmethod
def replace_widget2(cur, replace):
"""replace cur widget with another in a container keeping child properties"""
con = cur.get_parent()
pos = con.child_get_property(cur, "position", "")
pak = con.query_child_packing(cur)
con.remove(cur)
if replace.get_parent(): replace.get_parent().remove(replace)
#con.add_with_properties(replace, "position", pos)
con.add(replace)
con.child_set_property(replace, "position", pos)
con.set_child_packing(replace, *pak) |
gcsadovy/generalPY | manageParcels.py | Python | gpl-3.0 | 217 | 0.009217 | # manageParcels.py
import parcelClass
myParcel = parcelClass.parce | l(145000, "residential")
print "Value:", myParcel.value
print "Zoning:", myParcel.zoning
mytax = myParcel | .calculateTax()
print "Tax:", mytax |
llimllib/champsleagueviz | europa/stats.py | Python | mit | 1,359 | 0.002943 | import glob, csv, re, shutil, mustache, time
import numpy as np
oddsfile = list(sorted(glob.glob('raw/odds*.csv')))[-1]
timestamp = re.search('s(.*?)\.', oddsfile).group(1)
with open(oddsfile) as infile:
reader = csv.reader(infile)
header = reader.next()
teams = [row for row in reader]
fixed = []
for team in teams:
t = team[0:2]
for odd in team[2:]:
if odd:
o = float(odd)
# betdaq lists some impossible odds. WTF?
if o < 1: o = 1.
t.append(o)
fixed.append(t)
teams = fixed
summary = []
for team in teams: |
odds = team[2:]
try:
max_ = max(odds)
except ValueError:
#nobody is offering odds on this team, they're elimin | ated, skip them
continue
min_ = min(odds)
mean = np.mean(odds)
median = np.median(odds)
summary.append(team[:2] + [max_, min_, mean, median])
summaryfile = "raw/summary%s.csv" % timestamp
with file(summaryfile, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['name', 'group', 'max', 'min', 'mean', 'median'])
for row in summary:
w.writerow(row)
shutil.copy2(summaryfile, "summary.csv")
last_updated = time.strftime("%b %d %Y %H:%M")
context = {"last_updated": last_updated}
out = mustache.render(file("index.mustache.html").read(), context)
file("index.html", 'w').write(out)
|
lmcro/webserver | admin/CTK/CTK/Plugin.py | Python | gpl-2.0 | 7,366 | 0.008824 | # CTK: Cherokee Toolkit
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2010-2014 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import os
import sys
import imp
import string
import traceback
from consts import *
from Widget import Widget
from Container import Container
from Combobox import ComboCfg
from Server import cfg, publish, post, get_server
from PageCleaner import Postprocess
from Help import HelpEntry, HelpGroup
SELECTOR_CHANGED_JS = """
/* On selector change
*/
$('#%(id)s').bind ('change', this, function() {
info = {'%(key)s': $('#%(id)s')[0].value };
$.ajax ({url: '%(url)s',
type: 'POST',
async: true,
data: info,
success: function(data) {
$('#%(plugin_id)s').html(data);
$('#%(id)s').trigger('changed');
/* Activate the Save button */
var save_button = $('#save-button');
save_button.show();
save_button.removeClass('saved');
},
error: function (xhr, ajaxOptions, thrownError) {
alert ("Error: " + xhr.status +"\\n"+ xhr.statusText);
}
});
/* Update the Help menu
*/
Help_update_group ('%(key)s', $('#%(id)s')[0].value);
});
/* Help: Initial status
*/
Help_update_group ('%(key)s', $('#%(id)s')[0].value);
"""
class Plugin (Container):
def __init__ (self, key):
Container.__init__ (self)
self.key = key
self.id = "Plugin_%s" %(self.uniq_id)
class PluginInstanceProxy:
def __call__ (self, key, modules, **kwargs):
# Update the configuration
if not key in post.keys():
return ''
new_val = post.get_val (key)
cfg[key] = new_val
if not new_val:
return ''
# Instance the content
plugin = instance_plugin (new_val, key, **kwargs)
if not plugin:
return ''
# Render it
render = plugin.Render()
output = '<div id="%s">%s</div>' %(plugin.id, render.html)
if render.js:
output += HTML_JS_ON_READY_BLOCK %(render.js)
return Postprocess(output)
class PluginSelector (Widget):
def __init__ (self, key, modules, **kwargs):
def key_to_url (key):
return ''.join ([('_',c)[c in string.letters + string.digits] for c in key])
Widget.__init__ (self)
# Properties
self._key = key
self._mods = modules
active_name = cfg.get_val (self._key)
# URL
self._url = '/plugin_content_%s' %(key_to_url(key))
srv = get_server()
if srv.use_sec_submit:
self._url += '?key=%s' %(srv.sec_submit)
# Widgets
self.selector_widget = ComboCfg (key, modules)
self.plugin = instance_plugin (active_name, key, **kwargs)
# Register hidden URL for the plugin content
publish (r'^/plugin_content_%s' %(key_to_url(key)), PluginInstanceProxy,
key=key, modules=modules, method='POST', **kwargs)
def _get_helps (self):
global_key = self._key.replace('!','_')
global_help = HelpGroup(global_key)
for e in self._mods:
name, desc = e
module = load_module (name, 'plugins')
if module:
if 'HELPS' in dir(module):
| help_grp = HelpGroup (name)
for entry in module.HELPS:
help_grp += HelpEntry (entry[1], entry[0])
global_help += help_grp
return [global_help]
def Render (self):
# Load the plugin
render = self.plugin.Render()
# Warp the content
render.html = '<div id="%s">%s</div>' %(self.plugin.id, render.html)
# Add the initialization | Javascript
render.js += SELECTOR_CHANGED_JS %({
'id': self.selector_widget.id,
'url': self._url,
'plugin_id': self.plugin.id,
'key': self._key})
# Helps
render.helps = self._get_helps()
return render
# Helper functions
#
def load_module_pyc (fullpath_pyc, namespace, use_cache=True, load_src=True):
files = [fullpath_pyc]
# Load source if present
if load_src and \
(fullpath_pyc.endswith('.pyc') or \
fullpath_pyc.endswith('.pyo')):
files.insert (0, fullpath_pyc[:-1])
# Load the first available
for fullpath in files:
# Cache
if use_cache:
if sys.modules.has_key (namespace):
if sys.modules[namespace].__file__ == fullpath:
return sys.modules[namespace]
# Load
if os.path.exists (fullpath):
if fullpath.endswith ('.py'):
return imp.load_source (namespace, fullpath)
return imp.load_compiled (namespace, fullpath)
def unload_module (name):
if name in sys.modules:
del (sys.modules[name])
def load_module (name, dirname):
# Sanity check
if not name:
return
# Check the different plug-in dirs
srv = get_server()
for path in srv.plugin_paths:
mod_path = os.path.abspath (os.path.join (path, dirname))
fullpath = os.path.join (mod_path, "%s.py"%(name))
if os.access (fullpath, os.R_OK):
break
# Shortcut: it might be loaded
if sys.modules.has_key (name):
loaded_mod_file = sys.modules[name].__file__
if loaded_mod_file.endswith('.pyc'):
loaded_mod_file = loaded_mod_file[:-1]
if loaded_mod_file == fullpath:
return sys.modules[name]
# Load the plug-in
fullpath = os.path.join (mod_path, "%s.py"%(name))
try:
return imp.load_source (name, fullpath)
except IOError:
print "Could not load '%s'." %(fullpath)
raise
def instance_plugin (name, key, **kwargs):
# Load the Python module
module = load_module (name, 'plugins')
if not module:
# Instance an empty plugin
plugin = Plugin(key)
return plugin
# Instance an object
class_name = 'Plugin_%s' %(name)
obj = module.__dict__[class_name](key, **kwargs)
return obj
def figure_plugin_paths():
paths = []
# Figure the path to admin's python source (hacky!)
stack = traceback.extract_stack()
if 'pyscgi.py' in ' '.join([x[0] for x in stack]):
for stage in stack:
if 'CTK/pyscgi.py' in stage[0]:
paths.append (os.path.join (stage[0], '../../..'))
break
paths.append (os.path.dirname (stack[0][0]))
return paths
|
ksrajkumar/openerp-6.1 | win32/setup.py | Python | agpl-3.0 | 1,676 | 0.008353 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import glob
from distutils.core import setup
import py2exe
def datas():
r = []
if os.name == 'nt':
r.append(("Microsoft.VC90.CRT", glob.glob('C:\Microsoft.VC90.CRT\*.*')))
return r
setup(service=["OpenERPServerService"],
options={"py2exe":{"excludes":["Tkconstants","Tkinter","tcl",
| "_imagingtk","PIL._imagingtk",
"ImageTk", "PIL.ImageTk",
"FixTk"],
"skip_archive": 1,
| "optimize": 2,}},
data_files=datas(),
)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
drpngx/tensorflow | tensorflow/contrib/mixed_precision/python/loss_scale_manager_test.py | Python | apache-2.0 | 6,362 | 0.005816 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LossScaleManager classes.."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.mixed_precision.python import loss_scale_manager as lsm_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _GetExampleIter(inputs):
dataset = dataset_ops.Dataset.from_tensor_slices(inputs)
return dataset.make_one_shot_iterator()
class FixedLossScaleManagerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_basic(self):
itr = _GetExampleIter([True] * 10 + [False] * 10)
loss_scale = 1000
lsm = lsm_lib.FixedLossScaleManager(loss_scale)
update_fn = lambda: lsm.update_loss_scale(itr.get_next())
self.evaluate(variables.global_variables_initializer())
if not context.executing_eagerly():
update_op = update_fn()
for _ in range(10):
if context.executing_eagerly():
update_fn()
else:
self.evaluate(update_op)
self.assertEqual(loss_scale, self.evaluate(lsm.get_loss_scale()))
class ExponentialUpdateLossScaleManagerTest(test.TestCase):
def _test_helper(self,
inputs,
expected_outputs,
init_loss_scale=1,
incr_every_n_step=2,
decr_every_n_n | an_or_inf=2):
ratio = 2
lsm = lsm_lib.ExponentialUpdateLossScaleManager(
init_loss_scale=init_loss_scale,
incr_every_n_steps=incr_every_n_step,
decr_every_n_nan_or_inf=decr_every_n_nan_or_inf,
incr_ratio=ratio,
decr_ratio=1. / ratio)
itr = _GetExampleIter(inputs)
update_fn = lambda: | lsm.update_loss_scale(itr.get_next())
self.evaluate(variables.global_variables_initializer())
actual_outputs = []
if not context.executing_eagerly():
update_op = update_fn()
for _ in range(len(inputs)):
if context.executing_eagerly():
update_fn()
else:
self.evaluate(update_op)
actual_outputs.append(self.evaluate(lsm.get_loss_scale()))
self.assertEqual(actual_outputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_increase_every_n_steps(self):
inputs = [True] * 6
expected_outputs = [1, 2, 2, 4, 4, 8]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_keep_increasing_until_capped(self):
init_loss_scale = np.finfo(np.float32).max / 4 + 10
max_float = np.finfo(np.float32).max
inputs = [True] * 6
# Output is capped the 2nd time it doubles.
expected_outputs = [
init_loss_scale, init_loss_scale * 2, init_loss_scale * 2, max_float,
max_float, max_float
]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_decrease_every_n_steps(self):
inputs = [False] * 6
init_loss_scale = 1024
expected_outputs = [1024, 512, 512, 256, 256, 128]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_keep_decreasing_until_one(self):
inputs = [False] * 10
init_loss_scale = 16
expected_outputs = [16, 8, 8, 4, 4, 2, 2, 1, 1, 1]
self._test_helper(inputs, expected_outputs, init_loss_scale)
@test_util.run_in_graph_and_eager_modes
def test_incr_bad_step_clear_good_step(self):
inputs = [True, True, True, False, True]
expected_outputs = [1, 2, 2, 2, 2]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_incr_good_step_does_not_clear_bad_step(self):
inputs = [True, True, True, False, True, False]
expected_outputs = [1, 2, 2, 2, 2, 1]
self._test_helper(inputs, expected_outputs)
@test_util.run_in_graph_and_eager_modes
def test_trigger_loss_scale_update_each_step(self):
"""Test when incr_every_n_step and decr_every_n_nan_or_inf is 1."""
init_loss_scale = 1
incr_every_n_step = 1
decr_every_n_nan_or_inf = 1
inputs = [True] * 3 + [False, True, True]
expected_outputs = [2, 4, 8, 4, 8, 16]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_each_step(self):
init_loss_scale = 1
incr_every_n_step = 1
decr_every_n_nan_or_inf = 1
inputs = [True, False] * 4 + [True]
expected_outputs = [2, 1, 2, 1, 2, 1, 2, 1, 2]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_alternating_good_and_bad_gradients_trigger_incr_every_2steps(self):
init_loss_scale = 32
incr_every_n_step = 2
decr_every_n_nan_or_inf = 1
inputs = [True, False] * 3 + [True]
expected_outputs = [32, 16, 16, 8, 8, 4, 4]
self._test_helper(inputs, expected_outputs, init_loss_scale,
incr_every_n_step, decr_every_n_nan_or_inf)
@test_util.run_in_graph_and_eager_modes
def test_random_mix_good_and_bad_gradients(self):
init_loss_scale = 4
inputs = [
False, False, True, True, True, False, True, False, True, True, True,
False
]
expected_outputs = [4, 2, 2, 4, 4, 4, 4, 2, 2, 4, 4, 4]
self._test_helper(inputs, expected_outputs, init_loss_scale)
if __name__ == "__main__":
test.main()
|
GNOME/gnome-code-assistance | backends/json/mkdeps.py | Python | lgpl-3.0 | 859 | 0.003492 | #!/usr/bin/env python
import subprocess, os, re
perdir = {}
ret = subprocess.check_output(['git', 'ls-files', '--', 'deps']).decode('utf-8').splitlines()
seen = {}
for r in ret:
if r in seen:
continue
seen[r] = True
dname = os.path.dirname(r)
if dname in perdir:
perdir[dname].append(r)
else:
perdir[dname] = [r]
datas = []
print('if PYTHON_SIMPLEJSON')
print('else')
for dname in perdir:
vname = 'json_{0}'.format(re.sub('[/.-]', '_', dname))
print('{0}dir = $(GCA_PYBACKENDS_DIR)/json/{1}'.format(vname, dname))
print('{0}_DATA = \\'.format(vname))
print("\tbackends/json/{0}".format(" \\\n\tbackends/jso | n/".join(perdir[dname])))
print('')
datas.append('$({0}_D | ATA)'.format(vname))
print('endif\n')
print('EXTRA_DIST += \\\n\t{0}'.format(' \\\n\t'.join(datas)))
# vi:ts=4:et
|
biocore/qiita | qiita_pet/handlers/study_handlers/sample_template.py | Python | bsd-3-clause | 19,340 | 0 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import basename, exists
from json import loads, dumps
from tempfile import NamedTemporaryFile
from tornado.web import authenticated, HTTPError
from qiita_core.qiita_settings import r_client
from qiita_pet.handlers.base_handlers import BaseHandler
from qiita_db.util import get_files_from_uploads_folders
from qiita_db.study import Study
from qiita_db.metadata_template.sample_template import SampleTemplate
from qiita_db.metadata_template.util import looks_like_qiime_mapping_file
from qiita_db.software import Software, Parameters
from qiita_db.processing_job import ProcessingJob
from qiita_db.exceptions import QiitaDBUnknownIDError
from qiita_pet.handlers.api_proxy import (
data_types_get_req, sample_template_samples_get_req,
prep_template_samples_get_req, study_prep_get_req,
sample_template_meta_cats_get_req, sample_template_category_get_req,
get_sample_template_processing_status,
check_fp)
SAMPLE_TEMPLATE_KEY_FORMAT = 'sample_template_%s'
def sample_template_checks(study_id, user, check_exists=False):
"""Performs different checks and raises errors if any of the checks fail
Parameters
----------
study_id : int
The study id
user : qiita_db.user.User
The user trying to access the study
check_exists : bool, optional
If true, check if the sample template exists
Raises
------
HTTPError
404 if the study does not exist
403 if the user does not have access to the study
404 if check_exists == True and the sample template doesn't exist
"""
try:
study = Study(int(study_id))
except QiitaDBUnknownIDError:
raise HTTPError(404, reason='Study does not exist')
if not study.has_access(user):
raise HTTPError(403, reason='User does not have access to study')
# Check if the sample template exists
if check_exists and not SampleTemplate.exists(study_id):
raise HTTPError(404, reason="Study %s doesn't have sample information"
% study_id)
def sample_template_handler_post_request(study_id, user, filepath,
data_type=None, direct_upload=False):
"""Creates a new sample template
Parameters
----------
study_id: int
The study to add the sample information
user: qiita_db.user import User
The user performing the request
filepath: str
The path to the sample template file
data_type: str, optional
If filepath is a QIIME mapping file, the data type of the prep
information file
direct_upload: boolean, optional
If filepath is a direct upload; if False we need to process the
filepath as part of the study upload folder
Returns
-------
dict of {'job': str}
job: the id of the job adding the sample information to the study
Raises
------
HTTPError
404 if the filepath doesn't exist
"""
# Check if the current user has access to the study
sample_template_checks(study_id, user)
# Check if the file exists
if not direct_upload:
fp_rsp = check_fp(study_id, filepath)
if fp_rsp['status'] != 'success':
raise HTTPError(404, reason='Filepath not found')
filepath = fp_rsp['file']
is_mapping_file = looks_like_qiime_mapping_file(filepath)
if is_mapping_file and not data_type:
raise HTTPError(400, reason='Please, choose a data type if uploading '
'a QIIME mapping file')
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('create_sample_template')
params = Parameters.load(
cmd, values_dict={'fp': filepath, 'study_id': study_id,
'is_mapping_file': is_mapping_file,
'data_type': data_type})
job = ProcessingJob.create(user, params, True)
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit()
return {'job': job.id}
def sample_template_handler_patch_request(user, req_op, req_path,
req_value=None, req_from=None,
direct_upload=False):
"""Patches the sample template
Parameters
----------
user: qiita_db.user.User
The user performing the request
req_op : str
The operation to perform on the sample template
req_path : str
The path to the attribute to patch
req_value : str, optional
The new value
req_from : str, optional
The original path of the element
direct_upload : boolean, optional
If the file being uploaded comes from a direct upload (True)
Returns
-------
Raises
------
HTTPError
400 If the path parameter doens't follow the expected format
400 If the given operation is not supported
"""
req_path = [v for v in req_path.split('/') if v]
# At this point we know the path should be at least length 2
if len(req_path) < 2:
raise HTTPError(400, reason='Incorrect path parameter')
study_id = int(req_path[0])
# Check if the current user has access to the study and if the sample
# template exists
sample_template_checks(study_id, user, check_exists=True)
if req_op == 'remove':
# Path format
# column: study_id/columns/column_name
# sample: study_id/samples/sample_id
if len(req_path) != 3:
raise HTTPError(400, reason='Incorrect path parameter')
attribute = req_path[1]
attr_id = req_path[2]
qiita_plugin = Software.from_name_and_version('Qiita', 'alpha')
cmd = qiita_plugin.get_command('delete_sample_or_column')
params = Parameters.load(
cmd, values_dict={'obj_class': 'SampleTemplate',
'obj_id': study_id,
'sample_or_col': attribute,
'name': attr_id})
job = ProcessingJob.create(user, params, True)
# Store the job id attaching it to the sample template id
r_client.set(SAMPLE_TEMPLATE_KEY_FORMAT % study_id,
dumps({'job_id': job.id}))
job.submit( | )
return {'job': job.id}
elif req_op == 'replace':
# WARNING: Although the patch operation is a replace, is not a full
# true replace. A replace is in theory equivalent to a remove + add.
# In this case, the repl | ace operation doesn't necessarily removes
# anything (e.g. when only new columns/samples are being added to the)
# sample information.
# Path format: study_id/data
# Forcing to specify data for extensibility. In the future we may want
# to use this function to replace other elements of the sample
# information
if len(req_path) != 2:
raise HTTPError(400, reason='Incorrect path parameter')
attribute = req_path[1]
if attribute == 'data':
# Update the sample information
if req_value is None:
raise HTTPError(400, reason="Value is required when updating "
"sample information")
if direct_upload:
# We can assume that the file exist as it was generated by
# the system
filepath = req_value
if not exists(filepath):
reason = ('Upload file not found (%s), please report to '
'qiita.help@gmail.com' % filepath)
raise HTTPError(404, reason=reason)
else:
# Check if the file exists
fp_rsp = check_fp(study_id, req_value)
|
Distrotech/bzr | bzrlib/send.py | Python | gpl-2.0 | 8,490 | 0.001296 | # Copyright (C) 2009, 2010 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import absolute_import
import os
import time
from bzrlib import (
controldir,
errors,
osutils,
registry,
trace,
)
from bzrlib.i18n import gettext
from bzrlib.branch import (
Branch,
)
from bzrlib.revision import (
NULL_REVISION,
)
format_registry = registry.Registry()
def send(target_branch, revision, public_branch, remember,
format, no_bundle, no_patch, output, from_, mail_to, message, body,
to_file, strict=None):
possible_transports = []
tree, branch = controldir.ControlDir.open_containing_tree_or_branch(
from_, possible_transports=possible_transports)[:2]
# we may need to write data into branch's repository to calculate
# the data to send.
branch.lock_write()
try:
if output is None:
config_stack = branch.get_config_stack()
if mail_to is None:
mail_to = config_stack.get('submit_to')
mail_client = config_stack.get('mail_client')(config_stack)
if (not getattr(mail_client, 'supports_body', False)
and body is not None):
raise errors.BzrCommandError(gettext(
'Mail client "%s" does not support specifying body') %
mail_client.__class__.__name__)
if remember and target_branch is None:
raise errors.BzrCommandError(gettext(
'--remember requires a branch to be specified.'))
stored_target_branch = branch.get_submit_branch()
remembered_target_branch = None
if target_branch is None:
target_branch = stored_target_branch
remembered_target_branch = "submit"
else:
# Remembers if asked explicitly or no previous location is set
if remember or (
remember is None and stored_target_branch is None):
branch.set_submit_branch(target_branch)
if target_branch is None:
target_branch = branch.get_parent()
remembered_target_branch = "parent"
if target_branch is None:
raise errors.BzrCommandError(gettext('No submit b | ranch known or'
' specified'))
if remembered_target_branch is not None:
trace.note(gettext('Using saved {0} location "{1}" to determine '
'what changes to submit.').format(
remembered_target_branch,
target_branch))
submit_branch = Branch.open(target_b | ranch,
possible_transports=possible_transports)
possible_transports.append(submit_branch.bzrdir.root_transport)
if mail_to is None or format is None:
if mail_to is None:
mail_to = submit_branch.get_config_stack().get(
'child_submit_to')
if format is None:
formatname = submit_branch.get_child_submit_format()
try:
format = format_registry.get(formatname)
except KeyError:
raise errors.BzrCommandError(
gettext("No such send format '%s'.") % formatname)
stored_public_branch = branch.get_public_branch()
if public_branch is None:
public_branch = stored_public_branch
# Remembers if asked explicitly or no previous location is set
elif (remember
or (remember is None and stored_public_branch is None)):
branch.set_public_branch(public_branch)
if no_bundle and public_branch is None:
raise errors.BzrCommandError(gettext('No public branch specified or'
' known'))
base_revision_id = None
revision_id = None
if revision is not None:
if len(revision) > 2:
raise errors.BzrCommandError(gettext('bzr send takes '
'at most two one revision identifiers'))
revision_id = revision[-1].as_revision_id(branch)
if len(revision) == 2:
base_revision_id = revision[0].as_revision_id(branch)
if revision_id is None:
if tree is not None:
tree.check_changed_or_out_of_date(
strict, 'send_strict',
more_error='Use --no-strict to force the send.',
more_warning='Uncommitted changes will not be sent.')
revision_id = branch.last_revision()
if revision_id == NULL_REVISION:
raise errors.BzrCommandError(gettext('No revisions to submit.'))
if format is None:
format = format_registry.get()
directive = format(branch, revision_id, target_branch,
public_branch, no_patch, no_bundle, message, base_revision_id,
submit_branch)
if output is None:
directive.compose_merge_request(mail_client, mail_to, body,
branch, tree)
else:
if directive.multiple_output_files:
if output == '-':
raise errors.BzrCommandError(gettext('- not supported for '
'merge directives that use more than one output file.'))
if not os.path.exists(output):
os.mkdir(output, 0755)
for (filename, lines) in directive.to_files():
path = os.path.join(output, filename)
outfile = open(path, 'wb')
try:
outfile.writelines(lines)
finally:
outfile.close()
else:
if output == '-':
outfile = to_file
else:
outfile = open(output, 'wb')
try:
outfile.writelines(directive.to_lines())
finally:
if outfile is not to_file:
outfile.close()
finally:
branch.unlock()
def _send_4(branch, revision_id, target_branch, public_branch,
no_patch, no_bundle, message,
base_revision_id, local_target_branch=None):
from bzrlib import merge_directive
return merge_directive.MergeDirective2.from_objects(
branch.repository, revision_id, time.time(),
osutils.local_time_offset(), target_branch,
public_branch=public_branch,
include_patch=not no_patch,
include_bundle=not no_bundle, message=message,
base_revision_id=base_revision_id,
local_target_branch=local_target_branch)
def _send_0_9(branch, revision_id, submit_branch, public_branch,
no_patch, no_bundle, message,
base_revision_id, local_target_branch=None):
if not no_bundle:
if not no_patch:
patch_type = 'bundle'
else:
raise errors.BzrCommandError(gettext('Format 0.9 does not'
' permit bundle with no patch'))
else:
if not no_patch:
patch_type = 'diff'
else:
patch_type = None
from bzrlib import merge_directive
return merge_directive.MergeDirective.from_objects(
branch.repository, revision_id, time.time(),
osutils.local_time_offset(), submit_branch,
public_branch=public_b |
ZachOhara/Project-Euler | python/p061_p070/problem062.py | Python | gpl-3.0 | 688 | 0.050872 | import itertools
def main():
#print(permutations(41063625))
| #print([isCube(i) for i in [41063625, 56623104, 66430125]])
#print(hasCubicPermutations(41063625, 3))
i = 1
while not hasCubicPermutations(i ** 3, 5):
i += 1
print(i)
print(i, i ** 3)
def hasCubicPermutations(n, p):
cubesCount = 0
for i in set(permutations(n)):
if isCube(i):
cubesCount += 1
#print(i, isCube(i), int(round(i ** (1 / 3))))
if cubesCount == p:
return True
#print(cubesCount)
return False
def | isCube(n):
root = int(round(n ** (1 / 3)))
return root ** 3 == n
def permutations(n):
return [int("".join(i)) for i in itertools.permutations(str(n))]
if __name__ == "__main__":
main()
|
GraphProcessor/CommunityDetectionCodes | Prensentation/metrics/util.py | Python | gpl-2.0 | 504 | 0.003968 | import networkx as nx
import re
def get_grap | h_info(file_path):
def extract_first_two(collection):
return [int(collection[0]), int(collection[1])]
with open(file_path) as ifs:
lines = map(lambda ele: ele.strip(), ifs.readlines())
lines = filter(lambda ele: not ele.startswith('#') and re.match('.*[0-9]+.*[0-9]+', ele), lines)
pair_list = map(lambda ele: extract_first_two(map(lambda ele2: ele2.strip(), ele.split())), lines)
| return nx.Graph(pair_list)
|
jnerin/ansible | lib/ansible/inventory/manager.py | Python | gpl-3.0 | 23,570 | 0.001782 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
import os
import re
import itertools
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError, AnsibleParserError
from ansible.inventory.data import InventoryData
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.parsing.utils.addresses import parse_address
from ansible.plugins.loader import inventory_loader
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
IGNORED_ALWAYS = [br"^\.", b"^host_vars$", b"^group_vars$", b"^vars_plugins$"]
IGNORED_PATTERNS = [to_bytes(x) for x in C.INVENTORY_IGNORE_PATTERNS]
IGNORED_EXTS = [b'%s$' % to_bytes(re.escape(x)) for x in C.INVENTORY_IGNORE_EXTS]
IGNORED = re.compile(b'|'.join(IGNORED_ALWAYS + IGNORED_PATTERNS + IGNORED_EXTS))
def order_patterns(patterns):
''' takes a list of patterns and reorders them by modifier to apply them consistently '''
# FIXME: this goes away if we apply patterns incrementally or by groups
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
elif p:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
return pattern_regular + pattern_intersection + pattern_exclude
def split_host_pattern(pattern):
"""
Takes a string containing host patterns separated by commas (or a list
thereof) and returns a list of single patterns (which may not contain
commas). Whitespace is ignored.
Also accepts ':' as a separator for backwards compatibility, but it is
not recommended due to the conflict with IPv6 addresses and host ranges.
Example: 'a,b[1], c[2:3] , d' -> ['a', 'b[1]', 'c[2:3]', 'd']
"""
if isinstance(pattern, list):
return list(itertools.chain(*map(split_host_pattern, pattern)))
elif not isinstance(pattern, string_types):
pattern = to_native(pattern)
# If it's got commas in it, we'll treat it as a straightforward
# comma-separated list of patterns.
if ',' in pattern:
patterns = pattern.split(',')
# If it doesn't, it could still be a single pattern. This accounts for
# non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
else:
try:
(base, port) = parse_address(pattern, allow_ranges=True)
patterns = [pattern]
except Exception:
# The only other case we accept is a ':'-separated list of patterns.
# This mishandles IPv6 addresses, and is retained only for backwards
# compatibility.
patterns = re.findall(
r'''(?: # We want to match something comprising:
[^\s:\[\]] # (anything other than whitespace or ':[]'
| # ...or...
\[[^\]]*\] # a single complete bracketed expression)
)+ # occurring once or more
''', pattern, re.X
)
return [p.strip() for p in patterns]
class InventoryManager(object):
''' Creates and manages inventory '''
def __init__(self, loader, sources=None):
# base objects
self._loader = loader
self._inventory = InventoryData()
# a list of host(names) to contain current inquiries to
self._restriction = None
self._subset = None
# caches
self._hosts_patterns_cache = {} # resolved full patterns
self._pattern_cache = {} # resolved individual patterns
self._inventory_plugins = [] # for generating inventory
# the inventory dirs, files, script paths or lists of hosts
if sources is None:
self._sources = []
elif isinstance(sources, string_types):
self._sources = [sources]
else:
self._sources = sources
# get to work!
self.parse_sources(cache=True)
@property
def localhost(self):
return self._inventory.localhost
@property
def groups(self):
return self._inventory.groups
@property
def hosts(self):
return self._inven | tory.hosts
def get_vars(self, *args, **kwargs):
return self._inventory.get_vars(args, kwargs)
def add_host(self, host, group=None, port=None):
return self._inventory.add_host(host, group, port)
def add_group(self, group):
return self._inventory.add_group(group)
def get_groups_dict(self):
return self. | _inventory.get_groups_dict()
def reconcile_inventory(self):
self.clear_caches()
return self._inventory.reconcile_inventory()
def get_host(self, hostname):
return self._inventory.get_host(hostname)
def _setup_inventory_plugins(self):
''' sets up loaded inventory plugins for usage '''
display.vvvv('setting up inventory plugins')
for name in C.INVENTORY_ENABLED:
plugin = inventory_loader.get(name)
if plugin:
plugin.set_options()
self._inventory_plugins.append(plugin)
else:
display.warning('Failed to load inventory plugin, skipping %s' % name)
if not self._inventory_plugins:
raise AnsibleError("No inventory plugins available to generate inventory, make sure you have at least one whitelisted.")
def parse_sources(self, cache=False):
''' iterate over inventory sources and parse each one to populate it'''
self._setup_inventory_plugins()
parsed = False
# allow for multiple inventory parsing
for source in self._sources:
if source:
if ',' not in source:
source = unfrackpath(source, follow=False)
parse = self.parse_source(source, cache=cache)
if parse and not parsed:
parsed = True
if parsed:
# do post processing
self._inventory.reconcile_inventory()
else:
display.warning("No inventory was parsed, only implicit localhost is available")
self._inventory_plugins = []
def parse_source(self, source, cache=False):
''' Generate or update inventory for the source provided '''
parsed = False
display.debug(u'Examining possible inventory source: %s' % source)
b_source = to_bytes(source)
# process directories as a collection of inventories
if os.path.isdir(b_source):
display.debug(u'Searching for inventory files in directory: %s' % source)
for i in sorted(os.listdir(b_source)):
display.debug(u'Considering %s' % i)
# Skip hidden files and stuff we explicitly ignore
if IGNORED.search(i):
|
Azure/azure-sdk-for-python | sdk/commerce/azure-mgmt-commerce/azure/mgmt/commerce/aio/_usage_management_client.py | Python | mit | 2,922 | 0.002396 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
| # pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import UsageManagementClientConfiguration
from .operations import UsageAggregatesOperations
from .operations import RateCardOper | ations
from .. import models
class UsageManagementClient(object):
"""UsageManagementClient.
:ivar usage_aggregates: UsageAggregatesOperations operations
:vartype usage_aggregates: azure.mgmt.commerce.aio.operations.UsageAggregatesOperations
:ivar rate_card: RateCardOperations operations
:vartype rate_card: azure.mgmt.commerce.aio.operations.RateCardOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: It uniquely identifies Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = UsageManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.usage_aggregates = UsageAggregatesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.rate_card = RateCardOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "UsageManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
pkyad/libreERP-old | Employee/models.py | Python | gpl-2.0 | 8,557 | 0.052121 | from django.db import models
from django import forms
from django.contrib.auth.models import User
from time import time
# Create your models here.
def getSignaturesPath(instance , filename):
return 'images/Sign/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, filename)
def getDisplayPicturePath(instance , filename):
return 'images/DP/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, filename)
def getIDPhotoPath(instance , filename ):
return 'images/ID/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, filename)
def getTNCandBondPath(instance , filename ):
return 'doc/TNCBond/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, filename)
def getResumePath(instance , filename ):
return 'doc/Resume/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, filename)
def getCertificatesPath(instance , filename ):
return 'doc/Cert/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, filename)
def getTranscriptsPath(instance , filename ):
return 'doc/Transcripts/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, filename)
def getOtherDocsPath(instance , filename ):
return 'doc/Others/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, filename)
def getSocialCoverPictureUploadPath(instance , filename ):
return 'social/pictureUploads/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, filename)
class userProfile(models.Model):
user = models.OneToOneField(User)
PREFIX_CHOICES = (
('NA' , 'NA'),
('Mr' , 'Mr'),
('Mrs' , 'Mrs'),
('Smt' , 'Smt'),
('Shri' ,'Shri'),
)
GENDER_CHOICES = (
('M' , 'Male'),
('F' , 'Female'),
('O' , 'Other'),
)
empID = models.PositiveIntegerField(unique = True , null = True)
displayPicture = models.ImageField(upload_to = getDisplayPicturePath)
dateOfBirth = models.DateField( null= True )
anivarsary = models.DateField( null= True )
permanentAddressStreet = models.TextField(max_length = 100 , null= True , blank=True)
permanentAddressCity = models.CharField(max_length = 15 , null= True , blank=True)
permanentAddressPin = models.IntegerField(null= True , blank=True)
permanentAddressState = models.CharField(max_length = 20 , null= True , blank=True)
permanentAddressCountry = models.CharField(max_length = 20 , null= True , blank=True)
localAddressStreet = models.TextField(max_length = 100 , null= True )
localAddressCity = models.CharField(max_length = 15 , null= True )
localAddressPin = models.IntegerField(null= True )
localAddressState = models.CharField(max_length = 20 , null= True )
localAddressCountry = models.CharField(max_length = 20 , null= True )
prefix = models.CharField(choices = PREFIX_CHOICES , default = 'NA' , max_length = 4)
gender = models.CharField(choices = GENDER_CHOICES , default = 'M' , max_length = 6)
email = models.EmailField(max_length = 50)
email2 = models.EmailField(max_length = 50, blank = True)
mobile = models.PositiveIntegerField( null = True)
emergency = models.PositiveIntegerField(null = True)
tele = models.PositiveIntegerField(null = True , blank = True)
website = models.URLField(max_length = 100 , null = True , blank = True)
sign = models.ImageField(upload_to = getSignaturesPath , null = True)
| IDPhoto = models.ImageField(upload_to = getDisplayPicturePath , null = True)
TNCandBond = models.FileField(upload_to = getTNCandBondPath , null = True)
resume = models.FileField(upload_to = getResumePath , null = True)
certificates = models.FileField(upload_to = getCertificatesPath , | null = True)
transcripts = models.FileField(upload_to = getTranscriptsPath , null = True)
otherDocs = models.FileField(upload_to = getOtherDocsPath , null = True , blank = True)
almaMater = models.CharField(max_length = 100 , null = True)
pgUniversity = models.CharField(max_length = 100 , null = True , blank = True)
docUniversity = models.CharField(max_length = 100 , null = True , blank = True)
fathersName = models.CharField(max_length = 100 , null = True)
mothersName = models.CharField(max_length = 100 , null = True)
wifesName = models.CharField(max_length = 100 , null = True , blank = True)
childCSV = models.CharField(max_length = 100 , null = True , blank = True)
note1 = models.TextField(max_length = 500 , null = True , blank = True)
note2 = models.TextField(max_length = 500 , null = True , blank = True)
note3 = models.TextField(max_length = 500 , null = True , blank = True)
aboutMe = models.TextField(max_length = 1000 , null = True)
status = models.CharField(max_length = 100 , null = True) # social status
coverPic = models.ImageField(upload_to = getSocialCoverPictureUploadPath , null = True , blank = True)
User.profile = property(lambda u : userProfile.objects.get_or_create(user = u)[0])
DOMAIN_CHOICES = (
('SYS' , 'System'),
('ADM' , 'Administration'),
('APP' , 'Application')
)
class notification(models.Model):
message = models.TextField(max_length = 300 , null=True)
link = models.URLField(max_length = 100 , null = True)
shortInfo = models.CharField(max_length = 30 , null = True)
read = models.BooleanField(default = False)
user = models.ForeignKey(User)
domain = models.CharField(null = False , default = 'SYS' , choices = DOMAIN_CHOICES , max_length = 3)
originator = models.CharField(null = True , max_length = 20)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
onHold = models.BooleanField(default = False)
def getChatMessageAttachment(instance , filename ):
return 'chat/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, instance.originator.username, filename)
class chatMessage(models.Model):
message = models.CharField(max_length = 200 , null=True)
attachment = models.FileField(upload_to = getChatMessageAttachment , null = True)
originator = models.ForeignKey(User , related_name = "sentIMs" , null = True)
created = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User)
read = models.BooleanField(default = False)
def getMailMessageAttachment(instance , filename ):
return 'mail/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, instance.originator.username, filename)
class mailMessage(models.Model):
message = models.CharField(max_length = 4000 , null = True)
subject = models.CharField(max_length = 200 , null = True)
attachments = models.FileField(upload_to = getMailMessageAttachment , null = True)
originator = models.CharField(null = True , max_length = 20)
created = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User)
read = models.BooleanField(default = False)
CCd = models.CharField(max_length = 300 , null = True)
def getCalendarAttachment(instance , filename ):
return 'calendar/%s_%s_%s' % (str(time()).replace('.', '_'), instance.user.username, instance.originator.username, filename)
class calenderItem(models.Model):
TYPE_CHOICE =(
('NONE' , 'Not Available'),
('MEET' , 'Meeting'),
('REMI' , 'Reminder'),
('TODO' , 'ToDo'),
('EVEN' , 'EVENT'),
('DEAD' , 'Deadline'),
('OTHE' , 'Other'),
)
LEVEL_CHOICE = (
('NOR' , 'Normal'),
('CRI' , 'Critical'),
('OPT' , 'Optional'),
('MAN' , 'Mandatory'),
)
eventType = models.CharField(choices = TYPE_CHOICE , default = 'NONE' , max_length = 4)
originator = models.CharField(null = True , max_length = 20)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User)
text = models.CharField(max_length = 200 , null = True)
notification = models.ForeignKey(notification)
when = models.DateTimeField(null = True)
checked = models.BooleanField(default = False)
deleted = models.BooleanField(default = F |
rakeshmi/cinder | cinder/tests/unit/api/v2/test_types.py | Python | apache-2.0 | 8,430 | 0 | # Copyright 2011 OpenStack Foundation
# aLL Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from lxml import etree
from oslo_utils import timeutils
import six
import webob
from cinder.api.v2 import types
from cinder.api.views import types as views_types
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.volume import volume_types
def stub_volume_type(id):
specs = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"
}
return dict(
id=id,
name='vol_type_%s' % six.text_type(id),
description='vol_type_desc_%s' % six.text_type(id),
extra_specs=specs,
)
def return_volume_types_get_all_types(context, search_opts=None):
return dict(
vol_type_1=stub_volume_type(1),
vol_type_2=stub_volume_type(2),
vol_type_3=stub_volume_type(3)
)
def return_empty_volume_types_get_all_types(context, search_opts=None):
return {}
def return_volume_types_get_volume_type(context, id):
if id == "777":
raise exception.VolumeTypeNotFound(volume_type_id=id)
return stub_volume_type(id)
def return_volume_types_get_by_name(context, name):
if name == "777":
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
return stub_volume_type(int(name.split("_")[2]))
def return_volume_types_get_default():
return stub_volume_type(1)
def return_volume_types_get_default_not_found():
return {}
class VolumeTypesApiTest(test.TestCase):
def setUp(self):
super(VolumeTypesApiTest, self).setUp()
self.controller = types.VolumeTypesController()
def test_volume_types_index(self):
self.stubs.Set(volume_types, 'get_all_types',
return_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['volume_types']))
expected_names = ['vol_type_1', 'vol_type_2', 'vol_type_3']
actual_names = map(lambda e: e['name'], res_dict['volume_types'])
self.assertEqual(set(actual_names), set(expected_names))
for entry in res_dict['volume_types']:
self.assertEqual('value1', entry['extra_specs']['key1'])
def test_volume_types_index_no_data(self):
self.stubs.Set(volume_types, 'get_all_types',
return_empty_volume_types_get_all_types)
req = fakes.HTTPRequest.blank('/v2/fake/types')
res_dict = self.controller.index(req)
self.assertEqual(0, len(res_dict['volume_types']))
def test_volume_types_show(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
type_id = str(uuid.uuid4())
req = fakes.HTTPRequest.blank('/v2/fake/types/' + type_id)
res_dict = self.controller.show(req, type_id)
self.assertEqual(1, len(res_dict))
self.assertEqual(type_id, res_dict['volume_type']['id'])
type_name = 'vol_type_' + type_id
self.assertEqual(type_name, res_dict['volume_type']['name'])
def test_volume_types_show_not_found(self):
self.stubs.Set(volume_types, 'get_volume_type',
return_volume_types_get_volume_type)
req = fakes.HTTPRequest.blank('/v2/fake/types/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, '777')
def test_get_default(self):
self.stubs.Set(volume_types, 'get_default_volume_type',
return_volume_types_get_default)
req = fakes.HTTPRequest.blank('/v2/fake/types/default')
req.method = 'GET'
res_dict = self.controller.show(req, 'default')
self.assertEqual(1, len(res_dict))
self.assertEqual('vol_type_1', res_dict['volume_type']['name'])
self.assertEqual('vol_type_desc_1',
res_dict['volume_type']['description'])
def test_get_def | ault_not_found(self):
self.stubs.Set(volume_types, 'get_default_volume_type',
return_volume_types_get_default_not_found)
req = fakes.HTTPRequest.blank('/v2/fake/types/default')
req.method = 'GET'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'default')
def test_view_builder_show(self):
view_builder = | views_types.ViewBuilder()
now = timeutils.isotime()
raw_volume_type = dict(
name='new_type',
description='new_type_desc',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42,
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.show(request, raw_volume_type)
self.assertIn('volume_type', output)
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
extra_specs={},
id=42,
)
self.assertDictMatch(output['volume_type'], expected_volume_type)
def test_view_builder_list(self):
view_builder = views_types.ViewBuilder()
now = timeutils.isotime()
raw_volume_types = []
for i in range(0, 10):
raw_volume_types.append(
dict(
name='new_type',
description='new_type_desc',
deleted=False,
created_at=now,
updated_at=now,
extra_specs={},
deleted_at=None,
id=42 + i
)
)
request = fakes.HTTPRequest.blank("/v2")
output = view_builder.index(request, raw_volume_types)
self.assertIn('volume_types', output)
for i in range(0, 10):
expected_volume_type = dict(
name='new_type',
description='new_type_desc',
extra_specs={},
id=42 + i
)
self.assertDictMatch(output['volume_types'][i],
expected_volume_type)
class VolumeTypesSerializerTest(test.TestCase):
def _verify_volume_type(self, vtype, tree):
self.assertEqual('volume_type', tree.tag)
self.assertEqual(vtype['name'], tree.get('name'))
self.assertEqual(vtype['description'], tree.get('description'))
self.assertEqual(str(vtype['id']), tree.get('id'))
self.assertEqual(1, len(tree))
extra_specs = tree[0]
self.assertEqual('extra_specs', extra_specs.tag)
seen = set(vtype['extra_specs'].keys())
for child in extra_specs:
self.assertIn(child.tag, seen)
self.assertEqual(vtype['extra_specs'][child.tag], child.text)
seen.remove(child.tag)
self.assertEqual(len(seen), 0)
def test_index_serializer(self):
serializer = types.VolumeTypesTemplate()
# Just getting some input data
vtypes = return_volume_types_get_all_types(None)
text = serializer.serialize({'volume_types': vtypes.values()})
tree = etree.fromstring(text)
self.assertEqual('volume_types', tree.tag)
self.assertEqual(len(vtypes), len(tree))
for child in tree:
name = child.get('name')
self.assertIn(name, vtypes)
self._verify_volume_type(vtypes[name], child)
def test_voltype_ |
N402/NoahsArk | ark/goal/models.py | Python | mit | 6,393 | 0.003285 | from datetime import datetime
from flask.ext.babel import lazy_gettext as _
from sqlalchemy import func, select
from sqlalchemy.ext.hybrid import hybrid_property
from ark import settings
from ark.exts import db, cache
from ark.ranking.models import GoalRankingBan
class Goal(db.Model):
__tablename__ = 'goal'
GOAL_STATES = {
'doing': _('Doing'),
'canceled': _('Canceled'),
'finished': _('Finished'),
}
id = db.Column(db.Integer, primary_key=True)
account_id = db.Column(db.Integer, db.ForeignKey('account.id'))
title = db.Column(db.String(100), nullable=False)
description = db.Column(db.String(300))
image_file_id = db.Column(db.Integer, db.ForeignKey('goal_file.id'))
created = db.Column(db.DateTime, default=datetime.utcnow)
operate_at = db.Column(db.DateTime)
score = db.Column(db.Integer, default=0)
state = db.Column(db.Enum(*(GOAL_STATES.keys())), default='doing')
is_deleted = db.Column(db.Boolean, default=False)
image = db.relationship('GoalFile', uselist=False)
likes = db.relationship('GoalLikeLog', uselist=True, lazy='dynamic',
backref=db.backref('goal', uselist=False))
activities = db.relationship('GoalActivity', uselist=True, lazy='dynamic',
backref=db.backref('goal', uselist=False))
@hybrid_property
def last_activity(self):
last_activity = self.activities.limit(1).first()
if last_activity:
return last_activity
return None
@last_activity.expression
def last_activity(cls):
return (select([GoalActivity]).where(GoalActivity.goal_id==cls.id)
.order_by(GoalActivity.created.desc())
.limit(1).label('last_activity'))
@hybrid_property
def last_activity_interval(self):
if not self.last_activity:
return 0
delta = datetime.utcnow() - self.last_activity.created
return delta.days
@last_activity_in | terval.expression
def last_activity_interval(cls):
return (select([func.datediff(func | .now(), GoalActivity.created)])
.where(GoalActivity.goal_id==cls.id)
.order_by(GoalActivity.created.desc())
.limit(1).label('last_activity_interval'))
@hybrid_property
def like_count(self):
return self.likes.filter(GoalLikeLog.is_deleted==False).count()
@like_count.expression
def like_count(cls):
return (select([func.count(GoalLikeLog.id)])
.where(GoalLikeLog.goal_id==cls.id)
.where(GoalLikeLog.is_deleted==False).label('like_count'))
@hybrid_property
def activity_count(self):
return self.activities.filter(GoalActivity.is_deleted==False).count()
@activity_count.expression
def activity_count(cls):
return (select([func.count(GoalActivity.id)])
.where(GoalActivity.goal_id==cls.id)
.where(GoalActivity.is_deleted==False)
.label('activity_count'))
def cal_score(self):
return (self.like_count * settings.GOAL_LIKE_SOCRE +
self.activity_count * settings.GOAL_UPDATE_SCORE +
self.last_activity_interval * settings.GOAL_UPDATE_DAY)
@cache.memoize(3600)
def cache_score(self):
return self.score
@hybrid_property
def is_ban(self):
return (self.bans.filter(GoalRankingBan.is_deleted==False).count() > 0)
@is_ban.expression
def is_ban(cls):
return (select([func.count(GoalRankingBan.id) > 0])
.where(GoalRankingBan.goal_id==cls.id).label('is_ban'))
def cancel(self):
self.state = 'canceled'
self.operate_at = datetime.utcnow()
def complete(self):
self.state = 'finished'
self.operate_at = datetime.utcnow()
def display_state(self):
return self.GOAL_STATES[self.state]
def is_doing(self):
return self.state == 'doing'
def is_like_by(self, account):
count = (self.likes
.filter(GoalLikeLog.account_id==account.id)
.filter(GoalLikeLog.is_deleted==False).count())
return count > 0
def is_belong_to(self, account):
return self.author.id is account.id
class GoalActivity(db.Model):
__tablename__ = 'goal_activity'
id = db.Column(db.Integer, primary_key=True)
goal_id = db.Column(db.Integer, db.ForeignKey('goal.id'))
account_id = db.Column(db.Integer, db.ForeignKey('account.id'))
image_file_id = db.Column(db.Integer, db.ForeignKey('goal_file.id'))
activity = db.Column(db.String(300))
created = db.Column(db.DateTime, default=datetime.utcnow)
is_deleted = db.Column(db.Boolean, default=False)
image = db.relationship('GoalFile', uselist=False)
author = db.relationship(
'Account', uselist=False,
backref=db.backref('updates', uselist=True, lazy='dynamic',
order_by='desc(GoalActivity.created)'))
def is_belong_to(self, account):
return self.author.id is account.id
def delete(self):
self.is_deleted = True
db.session.add(self)
db.session.commit()
class GoalLikeLog(db.Model):
__tablename__ = 'goal_like_log'
id = db.Column(db.Integer, primary_key=True)
goal_id = db.Column(db.Integer, db.ForeignKey('goal.id'))
account_id = db.Column(db.Integer, db.ForeignKey('account.id'))
created = db.Column(db.DateTime, default=datetime.utcnow)
is_deleted = db.Column(db.Boolean, default=False)
class GoalFile(db.Model):
__tablename__ = 'goal_file'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
file_url = db.Column(db.String(500))
account_id = db.Column(db.Integer, db.ForeignKey('account.id'))
created = db.Column(db.DateTime, default=datetime.utcnow)
is_deleted = db.Column(db.Boolean, default=False)
def get_thumbnail(self, width=0, height=0):
url = self.file_url
return '%s?imageMogr2/thumbnail/%sx%s!' % (url, width, height)
def get_thumbnail_limit_width(self, width=0):
url = self.file_url
return '%s?imageMogr2/thumbnail/%sx' % (url, width)
def get_scale_long(self, width=0, height=0):
url = self.file_url
return '%s?imageMogr2/thumbnail/%sx%s' % (url, width, height)
|
clarson469/reinforcementLearning | solutions/solution_util.py | Python | mit | 819 | 0.006105 | import numpy as np
def e_greedy(estimates, epsilon):
numBandits, numArms = estimates.shape
explore = np.zeros(numBandits)
explore[np.random.rand | om(numBandits) <= epsilon] = 1
arm = np.argmax(estimates, axis=1)
arm[explore == 1] = np.random.randint(0, numArms, np.count_nonzero(explore))
return arm
def softmax(estimates, temperature):
temp_est = estimates.T / temperature
exponents = np.exp(temp_est - np.max(temp_est))
dist = exponents / np.sum(exponents, axis=0)
return | (np.random.random(temp_est.shape) < dist.cumsum(axis=0)).argmax(axis=0)
def pref_softmax(preferences):
pref = preferences.T
exponents = np.exp(pref - np.max(pref))
dist = exponents / np.sum(exponents, axis=0)
return (np.random.random(pref.shape) < dist.cumsum(axis=0)).argmax(axis=0)
|
vicnet/weboob | weboob/applications/qbooblyrics/main_window.py | Python | lgpl-3.0 | 11,785 | 0.002376 | # -*- coding: utf-8 -*-
# Copyright(C) 2016 Julien Veyssier
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import os
from PyQt5.QtCore import pyqtSlot as Slot, Qt
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QApplication, QFrame, QShortcut
from weboob.capabilities.lyrics import CapLyrics
from weboob.tools.application.qt5 import QtMainWindow, QtDo
from weboob.tools.application.qt5.backendcfg import BackendCfg
from weboob.tools.application.qt5.models import BackendListModel
from weboob.tools.application.qt5.search_history import HistoryCompleter
from weboob.applications.qbooblyrics.ui.main_window_ui import Ui_MainWindow
from weboob.applications.qbooblyrics.ui.result_ui import Ui_Result
from .minisonglyrics import MiniSonglyrics
from .songlyrics import Songlyrics
MAX_TAB_TEXT_LENGTH=30
class Result(QFrame):
def __init__(self, weboob, app, parent=None):
super(Result, self).__init__(parent)
self.ui = Ui_Result()
self.ui.setupUi(self)
self.parent = parent
self.weboob = weboob
self.app = app
self.minis = []
self.current_info_widget = None
# action history is composed by the last action and the action list
# An action is a function, a list of arguments and a description string
self.action_history = {'last_action': None, 'action_list': []}
self.ui.backButton.clicked.connect(self.doBack)
self.ui.backButton.setShortcut(QKeySequence('Alt+Left'))
self.ui.backButton.hide()
def doAction(self, description, fun, args):
''' Call fun with args as arguments
and save it in the action history
'''
self.ui.currentActionLabel.setText(description)
if self.action_history['last_action'] is not None:
self.action_history['action_list'].append(self.action_history['last_action'])
self.ui.backButton.setToolTip('%s (Alt+Left)'%self.action_history['last_action']['description'])
self.ui.backButton.show()
self.action_history['last_action'] = {'function': fun, 'args': args, 'description': description}
# manage tab text
mytabindex = self.parent.ui.resultsTab.indexOf(self)
tabtxt = description
if len(tabtxt) > MAX_TAB_TEXT_LENGTH:
tabtxt = '%s...'%tabtxt[:MAX_TAB_TEXT_LENGTH]
self.parent.ui.resultsTab.setTabText(mytabindex, tabtxt)
self.parent.ui.resultsTab.setTabToolTip(mytabindex, description)
return fun(*args)
@Slot()
def doBack(self):
''' Go back in action history
Basically call previous function and update history
'''
if len(self.action_history['action_list']) > 0:
todo = self.action_history['action_list'].pop()
self.ui.currentActionLabel.setText(todo['description'])
self.action_history['last_action'] = todo
if len(self.action_history['action_list']) == 0:
self.ui.backButton.hide()
else:
self.ui.backButton.setToolTip(self.action_history['action_list'][-1]['description'])
# manage tab text
mytabindex = self.parent.ui.resultsTab.indexOf(self)
tabtxt = todo['description']
if len(tabtxt) > MAX_TAB_TEXT_LENGTH:
tabtxt = '%s...'%tabtxt[:MAX_TAB_TEXT_LENGTH]
self.parent.ui.resultsTab.setTabText(mytabindex, tabtxt)
self.parent.ui.resultsTab.setTabToolTip(mytabindex, todo['description'])
return todo['function'](*todo['args'])
def processFinished(self):
self.parent.ui.searchEdit.setEnabled(True)
QApplication.restoreOverrideCursor()
self.process = None
self.parent.ui.stopButton.hide()
@Slot()
def stopProcess(self):
if self.process is not None:
self.process.stop()
def searchSonglyrics(self,pattern):
if not pattern:
return
self.doAction(u'Search lyrics "%s"' % pattern, self.searchSonglyricsAction, [pattern])
def searchSonglyricsAction(self, pattern):
self.ui.stackedWidget.setCurrentWidget(self.ui.list_page)
for mini in self.minis:
self.ui.list_content.layout().removeWidget(mini)
mini.hide()
mini.deleteLater()
self.minis = []
self.parent.ui.searchEdit.setEnabled(False)
QApplication.setOverrideCursor(Qt.WaitCursor)
backend_name = self.parent.ui.backendEdit.itemData(self.parent.ui.backendEdit.currentIndex())
self.process = QtDo(self.weboob, self.addSonglyrics, fb=self.processFinished)
self.process.do(self.app._do_complete, self.parent.getCount(), ('title'), 'iter_lyrics',
self.parent.ui.typeCombo.currentText(), pattern, backends=backend_name, caps=CapLyrics)
self.parent.ui.stopButton.show()
def addSonglyrics(self, songlyrics):
minisonglyrics = MiniSonglyrics(self.weboob, self.weboob[songlyrics.backend], songlyrics, self)
self.ui.list_content.layout().insertWidget(self.ui.list_content.layout().count()-1,minisonglyrics)
self.minis.append(minisonglyrics)
def displaySonglyrics(self, songlyrics, backend):
self.ui.stackedWidget.setCurrentWidget(self.ui.info_page)
if self.current_info_widget is not None:
self.ui.info_content.layout().removeWidget(self.current_info_widget)
self.current_info_widget.hide()
self.current_info_widget.deleteLater()
wsonglyrics = Songlyrics(songlyrics, backend, self)
self.ui.info_content.layout().addWidget(wsonglyrics)
self.current_info_widget = wsonglyrics
QApplication.restoreOverrideCursor()
def searchId(self, id):
QApplication.setOverrideCursor(Qt.WaitCursor)
if '@' in id:
backend_name = id.split('@')[1]
id = id.split('@')[0]
else:
backend_name = None
for backend in self.weboob.iter_backends():
if (backend_name and backend.name == backend_name) or not backend_name:
songlyrics = backend.get_lyrics(id)
if songlyrics:
self.doAction('Lyrics of "%s" (%s)' % (songlyrics.title, songlyrics.artist), self.displaySonglyrics, [songlyrics, backend])
QApplication.restoreOverrideCursor()
class MainWindow(QtMainWindow):
def __init__(self, config, weboob, app, parent=None):
super(MainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.config = config
self.weboob = weboob
self.app = app
self.minis = []
self.current_info_widget = None
# search history is a list of patterns which have been searched
history_path = os.path.join(self.weboob.workdir, 'qbooblyrics_history')
qc = HistoryCompleter(history_path, self)
qc.load()
qc.setCaseSensitivity(Qt.CaseInsensitive)
self.ui.searchEdit.setCompleter(qc)
self.ui.typeCombo.addItem('song')
self.ui.typeCombo.addItem('artist')
self.ui.searchEdit.returnPressed.connect(self.search)
self.ui.idEdit. | returnPressed.connect(self.searchId)
| count = self.config.get('settings', 'maxresultsnumber')
self.ui.countSpin.setValue(int(count))
showT = self.config.get('settings', 'showthumbnails')
self.ui.showTCheck.setChecked(showT == '1')
self.ui.stopButton.hide()
self.ui.actionBackends.triggered.co |
barbour-em/osf.io | website/profile/views.py | Python | apache-2.0 | 22,557 | 0.001286 | # -*- coding: utf-8 -*-
import logging
import operator
import httplib
import httplib as http
import os
from dateutil.parser import parse as parse_date
from flask import request
from modularodm.exceptions import ValidationError, NoResultsFound
from modularodm import Q
from framework import sentry
from framework.auth import utils as auth_utils
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from framework.auth.exceptions import ChangePasswordError
from framework.auth.views import send_confirm_email
from framework.exceptions import HTTPError, PermissionsError
from framework.flask import redirect # VOL-aware redirect
from framework.status import push_status_message
from website import mails
from website import mailchimp_utils
from website import settings
from website.models import User
from website.models import ApiKey
from website.profile import utils as profile_utils
from website.util import web_url_for, paths
from website.util.sanitize import escape_html
from website.util.sanitize import strip_html
from website.views import _render_nodes
logger = logging.getLogger(__name__)
def get_public_projects(uid=None, user=None):
user = user or User.load(uid)
return _render_nodes(
list(user.node__contributed.find(
(
Q('category', 'eq', 'project') &
Q('is_public', 'eq', True) &
Q('is_registration', 'eq', False) &
Q('is_deleted', 'eq', False)
)
))
)
def get_public_components(uid=None, user=None):
user = user or User.load(uid)
# TODO: This should use User.visible_contributor_to?
nodes = list(user.node__contributed.find(
(
Q('category', 'ne', 'project') &
Q('is_public', 'eq', True) &
Q('is_registration', 'eq', False) &
Q('is_deleted', 'eq', False)
)
))
return _render_nodes(nodes, show_path=True)
@must_be_logged_in
def current_user_gravatar(size=None, **kwargs):
user_id = kwargs['auth'].user._id
return get_gravatar(user_id, size=size)
def get_gravatar(uid, size=None):
return {'gravatar_url': profile_utils.get_gravatar(User.load(uid), size=size)}
def date_or_none(date):
try:
return parse_date(date)
except Exception as error:
logger.exception(error)
return None
def validate_user(data, user):
"""Check if the user in request is the user who log in """
if 'id' in data:
if data['id'] != user._id:
raise HTTPError(httplib.FORBIDDEN)
else:
# raise an error if request doesn't have user id
raise HTTPError(httplib.BAD_REQUEST, data={'message_long': '"id" is required'})
@must_be_logged_in
def resend_confirmation(auth):
user = auth.user
data = request.get_json()
validate_user(data, user)
try:
primary = data['email']['primary']
confirmed = data['email']['confirmed']
address = data['email']['address'].strip().lower()
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if primary or confirmed:
raise HTTPError(httplib.BAD_REQUEST, data={'message_long': 'Cannnot resend confirmation for confirmed emails'})
user.add_unconfirmed_email(address)
# TODO: This setting is now named incorrectly.
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=address)
user.save()
return _profile_view(user)
@must_be_logged_in
def update_user(auth):
"""Update the logged-in user's profile."""
# trust the decorator to handle auth
user = auth.user
data = request.get_json()
validate_user(data, user)
# TODO: Expand this to support other user attributes
##########
# Emails #
##########
if 'emails' in data:
emails_list = [x['address'].strip().lower() for x in data['emails']]
if user.username not in emails_list:
raise HTTPError(httplib.FORBIDDEN)
# removals
removed_emails = [
each
for each in user.emails + user.unconfirmed_emails
if each not in emails_list
]
if user.username in removed_emails:
raise HTTPError(httplib.FORBIDDEN)
for address in removed_emails:
if address in user.emails:
try:
user.remove_email(address)
except PermissionsError as e:
raise HTTPError(httplib.FORBIDDEN, e.message)
user.remove_unconfirmed_email(address)
# additions
added_emails = [
each['address'].strip().lower()
for each in data['emails']
if each['address'].strip().lower() not in user.emails
and each['address'].strip().lower() not in user.unconfirmed_emails
]
for address in added_emails:
try:
user.add_unconfirmed_email(address)
except (ValidationError, ValueError):
continue
# TODO: This setting is now named incorrectly.
if settings.CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=address)
############
# Username #
############
# get the first email that is set to primary and has an address
primary_email = next(
(
each for each in data['emails']
# email is primary
if each.get('primary') and each.get('confirmed')
# an address is specified (can't trust those sneaky users!)
and each.get('address')
)
)
if primary_email:
primary_email_address = primary_email['address'].strip().lower()
if primary_email_address not in user.emails:
raise HTTPError(httplib.FORBIDDEN)
username = primary_email_address
# make sure the new username has already been confirmed
if username and username in user.emails and username != user.username:
mails.send_mail(user.username,
mails.PRIMARY_EMAIL_CHANGED,
user=user,
new_address=username)
user.username = username
###################
# Timezone/Locale #
###################
if 'locale' in data:
if data['locale']:
locale = data['locale'].replace('-', '_')
user.locale = locale
# TODO: Refactor to something like:
# user.timezone = data.get('timezone', user.timezone)
if 'timezone' in data:
if data['timezone']:
u | ser.timezone = data['timezone']
user.save()
return _profile_view(user)
def _profile_view(profile, is_profile=False):
# TODO: Fix circular import
from website.addons.badges.util import get_sorted_user_badges
if profile and profile.is_disabled:
raise HTTPError(http.GONE)
if 'badges' in settings.ADDONS_REQUESTED:
badge_assertions = get_sorted_user_badges(profile),
badges = _get_user_created_badges(profile)
else:
# NOTE: While badges, are unuse | d, 'assertions' and 'badges' can be
# empty lists.
badge_assertions = []
badges = []
if profile:
profile_user_data = profile_utils.serialize_user(profile, full=True)
return {
'profile': profile_user_data,
'assertions': badge_assertions,
'badges': badges,
'user': {
'is_profile': is_profile,
'can_edit': None, # necessary for rendering nodes
'permissions': [], # necessary for rendering nodes
},
}
raise HTTPError(http.NOT_FOUND)
def _get_user_created_badges(user):
addon = user.get_addon('badges')
if addon:
return [badge for badge in addon.badge__creator if not badge.is_system_badge]
return []
@must_be_logged_in
def profile_view(auth):
return _profile_view(auth.user, True)
@collect_auth
def profile_view_id(uid, auth):
user = User.load(uid)
is_profile |
LAL/SLAM | src/slam/interface.py | Python | apache-2.0 | 32,758 | 0.004152 | """Helper to execute actions on the database independantly from the interface
and output format."""
import logging, datetime, sys, re
from slam import generator, models
from slam.log import DbLogHandler
# set-up logging to the database
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
DBLOGHANDLER = DbLogHandler()
DBLOGHANDLER.setLevel(logging.INFO)
LOGGER.addHandler(DBLOGHANDLER)
STDOUTHANDLER = logging.StreamHandler()
STDOUTHANDLER.setLevel(logging.INFO)
LOGGER.addHandler(STDOUTHANDLER)
class InexistantObjectError(Exception):
"""Exception raised when the given object name was not found in the
database."""
pass
class DuplicateObjectError(Exception):
"""Exception raised when trying to create an object that already exists."""
#def _get_message(self):
# return self._message
#def _set_message(self, message):
# self._message = message
#message = property(_get_message, _set_message)
#def __init__(self, message):
# Call the base class constructor with the parameters it needs
#Exception.__init__(self, message)
pass
class MissingParameterError(Exception):
"""Exception raised when a parameter is missing."""
pass
class ConfigurationFormatError(Exception):
"""Exception raised when the given configuration format is not
supported."""
pass
class PropertyFormatError(Exception):
"""Exception raised if the property format is invalid."""
pass
def isValidHostname(hostname):
disallowed = re.compile("[^a-zA-Z\d\-\.]")
#return all(map(lambda x: len(x) and not disallowed.search(x), hostname.split("."))) //pour chaque x dans split array(),appliquer les functions len(x) and not disallowed.search(x)
return len(hostname) and not disallowed.search(hostname)
def get_host(host_name=None):
"""Retrieve a host object from the database."""
host = None
if host_name:
if models.Host.objects.filter(name=host_name):
host = models.Host.objects.get(name=host_name)
elif models.Alias.objects.filter(name=host_name):
host = models.Alias.objects.get(name=host_name).host
else:
raise InexistantObjectError("Could not find host named: "
+ str(host_name))
return host
def get_pool(pool_name=None, category=None):
"""Retrieve a pool object from the database."""
if pool_name:
if not models.Pool.objects.filter(name=pool_name).count():
raise InexistantObjectError("Could not find pool named: "
+ str(pool_name))
return models.Pool.objects.get(name=pool_name)
elif category:
for poolobj in models.Pool.objects.exclude(category=""):
if category in poolobj.category.split(","):
return poolobj
raise InexistantObjectError("No pool in category: " + category)
else:
return None
def create_pool(pool_name=None, definition=None, category=None):
"""Try to retrieve the given *pool_name* from the database or a create a
new one with the given *definition* otherwise."""
if models.Pool.objects.filter(name=pool_name):
raise DuplicateObjectError("Pool named \""
+ pool_name + "\" already exists.")
if not pool_name:
raise MissingParameterError("Missing a name for the pool to create.")
if not definition:
raise MissingParameterError("Missing pool definition.")
if category is None:
category = ""
else:
category = ",".join(category)
pool = models.Pool.create(name=pool_name, definition=definition,
category=category)
LOGGER.info("Created pool: " + str(pool))
pool.save()
return pool
def create_generator(name, type_, outputfile, default=False, header=None,
footer=None, checkfile=None, timeout=None, domain=None, pools=None):
"""Create a new generator object."""
if name and models.Config.objects.filter(name=name):
raise DuplicateObjectError("Generator \"" + name
+ "\" already exists.")
if not name:
raise MissingParameterError(
"You must provide a name for the new generator.")
if not type_:
raise MissingParameterError(
"You must provide a type for the new generator.")
if not outputfile:
raise MissingParameterError(
"You must provide an output file for the new generator.")
genobj = None
if type_ == "bind":
genobj = generator.BindConfig.create(name=name, default=default,
outputfile=outputfile, header=header, footer=footer,
checkfile=checkfile, update=True, timeout=timeout)
elif type_ == "revbind":
genobj = generator.RevBindConfig.create(name=name, default=default,
outputfile=outputfile, header=header, footer=footer,
checkfile=checkfile, update=True, timeout=timeout)
elif type_ == "dhcp":
genobj = generator.DhcpdConfig.create(name=name, default=default,
outputfile=outputfile, header=header, footer=footer,
checkfile=checkfile, update=True)
elif type_ == "quattor":
genobj = generator.QuattorConfig.create(name=name, default=default,
outputfile=outputfile, header=header, checkfile=checkfile,
footer=footer, update=True)
elif type_ == "laldns":
genobj = generator.LalDnsConfig.create(name=name, default=default,
outputfile=outputfile, header=header, checkfile=checkfile,
footer=footer, update=True)
else:
raise MissingParameterError("Wrong configuration format: " + type_)
genobj.save()
LOGGER.info("Created new generator: " + str(genobj))
if pools:
for pool in pools:
pool = ge | t_pool(pool)
pool.generator.add(genobj)
genobj.save()
return genobj
def get_generator(name):
"""Get the correct configuration generator object."""
genobj = None
if name:
if not models.Config.objects.filter(name=name):
raise InexistantObjectError("Could not find generator: "
+ name)
confobj = models.Config.objects. | get(name=name)
if confobj.conftype == "bind":
genobj = generator.BindConfig.objects.get(name=name)
elif confobj.conftype == "rbind":
genobj = generator.RevBindConfig.objects.get(name=name)
elif confobj.conftype == "dhcp":
genobj = generator.DhcpdConfig.objects.get(name=name)
elif confobj.conftype == "quatt":
genobj = generator.QuattorConfig.objects.get(name=name)
elif confobj.conftype == "laldns":
genobj = generator.LalDnsConfig.objects.get(name=name)
else:
raise InexistantObjectError("Could not find generator: "
+ name)
return genobj
def get_default_generators(conf_type=None):
"""Get every generators marked as default, eventually filtered by
configuration type."""
gens = generator.Config.objects.filter(default=True)
if conf_type:
gens = gens.filter(conftype=conf_type)
res = []
for gen in gens:
tmp = get_generator(gen.name)
if tmp:
res.append(tmp)
return res
def modify_generator(name, default=False, outputfile=None, header=None,
footer=None, checkfile=None, timeout=None, domain=None, pools=None):
"""Modify an existing generator."""
gen = get_generator(name)
if gen is None:
raise InexistantObjectError("Could not find generator: " + name)
logmsg = ""
if default:
gen.default = not gen.default
if gen.default:
logmsg += ", set as default"
else:
logmsg += ", removed default"
if outputfile:
gen.outputfile = outputfile
logmsg += ", new output file (" + str(outputfile) + ")"
if header:
gen.headerfile = header
logmsg += ", new header file (" + str(header) + ")"
if footer:
gen.footerfile = footer
logmsg += ", new footer file (" + str(footer) + ")"
if checkfile:
gen.checkfile = ", ".join(checkfile)
logmsg += ", new check files (" + st |
OhmNomNom/Michelangelo | TempLogger/prog.py | Python | gpl-3.0 | 534 | 0.013109 | import serial,time
ser = serial.Serial( | 2, baudrate=9600)
print(ser.name)
file = open("Output.txt","w")
ser.write(b"M04\n")
ser.flush() |
ser.readline().decode();
t = time.clock()
while t <= 180:
ser.write(b"M105\n")
ser.flush()
resp = ser.readline().decode();
t = time.clock()
resp = resp[resp.find('T')+1:].split()[0]
print(str(t) + " " + resp)
file.write(str(t) + " " + resp + "\n")
file.flush()
time.sleep(0.1)
ser.write(b"M03\n")
ser.flush()
ser.close()
input() |
edespino/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/walrepl/oom/__init__.py | Python | apache-2.0 | 4,065 | 0.003936 | """
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under t | he License.
"""
import os
import subprocess
import socket
from time import sleep
import tinctest
from tinctest.lib import local_path
from gppylib.commands.base import Command
from mpp.lib.config import GPDBConfig
from mpp.lib.PSQL import PSQL
from mpp.gpdb.tests.storage.walrepl.lib.pg_util import GpUtility
from mpp.gpdb.tests.storage.walrepl.gpinitstandby import GpinitStandby
from | mpp.gpdb.tests.storage.walrepl.lib.verify import StandbyVerify
from mpp.gpdb.tests.storage.walrepl.lib.standby import Standby
config = GPDBConfig()
class OOMClass(object):
'''Class for methods required for OOM testcase'''
standby_port = '5433'
standby_dirname = 'newstandby'
def __init__(self):
self.gpinit = GpinitStandby()
self.mdd = os.environ.get('MASTER_DATA_DIRECTORY')
self.config = GPDBConfig()
self.pgutil = GpUtility()
self.verify = StandbyVerify()
self.host = socket.gethostname()
self.standby_loc = os.path.join(os.path.split(self.mdd)[0],
self.standby_dirname)
self.standby = Standby(self.standby_loc, self.standby_port)
def create_standby(self):
self.pgutil.clean_dir(self.host,self.standby_loc)
self.gpinit.run(option = '-P %s -s %s -F pg_system:%s' % (self.standby_port, self.host, self.standby_loc))
def setup_oom(self):
# Build it before testing.
thisdir = os.path.dirname(__file__)
builddir = os.path.join(thisdir, 'lib')
subprocess.check_call(['make', '-C', builddir, 'install'])
#Copy oom_malloc.so and wrapper.sh to all the segment nodes
for host in config.get_hosts() :
if host.strip() == self.host :
continue
cmd = "gpssh -h %s -e 'mkdir -p %s'; scp %s/oom_malloc.so %s:%s/; scp %s/wrapper.sh %s:%s/" % (host.strip(), builddir, builddir, host.strip(), builddir, builddir, host.strip(), builddir)
self.pgutil.run(cmd)
def touch_malloc(self):
# Touch file oom_malloc in standby directory
cmd = 'touch %s/oom_malloc' % self.standby_loc
self.pgutil.run(cmd)
def startdb(self):
(rc, result) = self.pgutil.run('gpstart -a --wrapper %s' % (local_path('lib/wrapper.sh')))
if rc != 0 and 'Could not start standby master' in result :
return False
return True
def restartdb(self):
# Remove file oom_malloc from standby
cmd = 'rm %s/oom_malloc' % self.standby_loc
self.pgutil.run(cmd)
(rc, result) = self.pgutil.run('gpstop -ar')
if rc == 0 and (self.verify.check_pg_stat_replication()):
return True
return False
def psql_and_oom(self):
#Touch oom_malloc in standby_dir and issue PSQL : Check if processes are gone
self.touch_malloc()
PSQL.run_sql_command('Drop table if exists wal_oomt1;Create table wal_oomt1(a1 int, a2 text) with(appendonly=true);')
sleep(2)
if not (self.verify.check_standby_processes()):
return True
return False
def start_standby(self):
# Remove oom_malloc and start standby : Check if all processes are back
cmd = 'rm %s/oom_malloc' % self.standby_loc
self.pgutil.run(cmd)
res = self.standby.start()
sleep(2)
if (self.verify.check_standby_processes()) :
return True
return False
|
lyubomir1993/AlohaServer | api/migrations/0002_auto_20170328_1936.py | Python | apache-2.0 | 448 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-28 19:36
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='user',
),
migrations.DeleteMod | el(
name | ='Profile',
),
]
|
fnl/libfnl | src/fnl/text/test/test_extract.py | Python | agpl-3.0 | 7,798 | 0.000513 | from html.parser import HTMLParseError
from unittest import main, TestCase
from fnl.text.extract import HtmlExtractor, GREEK_LOWER, GREEK_UPPER
class ExtractHtmlTests(TestCase):
def setUp(self):
self.ex = HtmlExtractor()
def testBasicExtraction(self):
self.ex.namespace = 'test'
self.ex.feed("""
<html>
<body>
<p>
<span>Hel<wbr>lo World<wbr>!</span>
</p>
</body>
</html>
""")
self.ex.close()
self.assertEqual('Hello World!', self.ex.string)
self.assertDictEqual({
('test', 'html', (0, 12)): {},
('test', 'body', (0, 12)): {},
('test', 'p', (0, 12)): {},
('test', 'span', (0, 12)): {},
}, self.ex.tags)
def testTagAttributes(self):
self.ex.feed('<span key=value>data</span>')
self.ex.close()
self.assertEqual('data', self.ex.string)
tag = (self.ex.namespace, 'span', (0, 4))
self.assertEqual({tag: {'key': 'value'}}, self.ex.tags)
def testIncompleteHtml(self):
self.ex.feed("broken <tag <p>")
self.ex.close()
self.assertRaises(HTMLParseError, lambda: self.ex.string)
self.ex.reset()
self.ex.feed("<p>hiho")
self.ex.close()
self.assertRaises(HTMLParseError, lambda: self.ex.string)
def testCharacterRef(self):
self.ex.feed("&#{};".format(ord("a")))
self.ex.close()
self.assertEqual('a', self.ex.string)
self.ex.reset()
s | elf.ex.feed("&#x{:x};".format(ord("a")))
self.ex.close()
self.assertEqual('a', self.ex.string)
self.ex.reset()
self.ex.feed("&#x{:x};".format(ord("Z")).upper())
self.ex.close()
self.assertEqual('Z', self.ex.string)
| self.ex.reset()
self.ex.feed("�")
self.ex.close()
self.assertEqual(HtmlExtractor.REPLACEMENT, self.ex.string)
def testIgnoredTags(self):
self.ex.feed("<script key=value />")
self.ex.close()
self.assertEqual('', self.ex.string)
self.assertEqual({}, self.ex.tags)
self.ex.reset()
self.ex.feed('<script>ignore</script>')
self.ex.close()
self.assertEqual('', self.ex.string)
self.assertEqual({}, self.ex.tags)
def testEntityReferences(self):
self.ex.feed("<")
self.ex.close()
self.assertEqual('<', self.ex.string)
self.ex.reset()
self.ex.feed("<".upper())
self.ex.close()
self.assertEqual('<', self.ex.string)
self.ex.reset()
self.ex.feed("&junk;")
self.ex.close()
self.assertEqual(HtmlExtractor.REPLACEMENT, self.ex.string)
def testComment(self):
self.ex.feed("<!-- comment -->")
self.ex.close()
self.assertEqual('', self.ex.string)
def testDeclaration(self):
self.ex.feed("<!declaration bla bla>sentinel")
self.ex.close()
self.assertEqual('sentinel', self.ex.string)
def testProcessInstruction(self):
self.ex.feed("<?proc bla bla>sentinel")
self.ex.close()
self.assertEqual('sentinel', self.ex.string)
def testImageAndAreaTags(self):
for t in ('img', 'area'):
self.ex.feed("<{}>".format(t))
self.ex.close()
self.assertEqual(HtmlExtractor.OBJECT_REPLACEMENT, self.ex.string)
self.assertEqual({(self.ex.namespace, t, (0, 1)): {}},
self.ex.tags)
self.ex.reset()
self.ex.feed("<{} title='a' alt='b'>".format(t))
self.ex.close()
self.assertEqual('b', self.ex.string)
self.assertEqual({(self.ex.namespace, t, (0, 1)): {'title': 'a'}},
self.ex.tags)
self.ex.reset()
self.ex.feed("<{} title='a'>".format(t))
self.ex.close()
self.assertEqual('a', self.ex.string)
self.assertEqual({(self.ex.namespace, t, (0, 1)): {}},
self.ex.tags)
self.ex.reset()
self.ex.feed("<{} key='val'>".format(t))
self.ex.close()
self.assertEqual(HtmlExtractor.OBJECT_REPLACEMENT, self.ex.string)
self.assertEqual({(self.ex.namespace, t, (0, 1)): {'key': 'val'}},
self.ex.tags)
self.ex.reset()
def testIgnoreUnknownElement(self):
self.ex.feed('<tag>data</tag>')
self.ex.close()
self.assertEqual('', self.ex.string)
self.assertEqual({}, self.ex.tags)
def testImageGreekCharacerAlt(self):
for n in ('alpha', 'AlPhA', 'ALPHA'):
self.ex.reset()
self.ex.feed('<img alt="{}">'.format(n))
self.ex.close()
self.assertEqual(GREEK_LOWER[n.lower()], self.ex.string)
self.ex.reset()
self.ex.feed('<img alt="Alpha">')
self.ex.close()
self.assertEqual(GREEK_UPPER['Alpha'], self.ex.string)
def testBrHr(self):
self.ex.feed("<br>a")
self.ex.close()
self.assertEqual("\na", self.ex.string)
self.assertEqual({(self.ex.namespace, 'br', (0, 1)): {}}, self.ex.tags)
self.ex.reset()
self.ex.feed("<hr>a")
self.ex.close()
self.assertEqual("\n\na", self.ex.string)
self.assertEqual({(self.ex.namespace, 'hr', (0, 2)): {}}, self.ex.tags)
def testMinorContent(self):
self.ex.feed("""
<html a=1>
<head b=2>
<title c=3>title</title>
</head>
<body d=4>
<h1 e=5>heading</h1>
<div f=6>
<ol g=7>
<li h=8>1</li>
<li i=9>2</li>
</ol>
</div>
<p j=0>footer</p>
</body>
</html>
""")
self.ex.close()
self.assertEqual("title\n\nheading\n\n1\n2\n\nfooter",
self.ex.string)
self.assertDictEqual({
(self.ex.namespace, 'html', (0, 27)): {'a': '1'},
(self.ex.namespace, 'head', (0, 7)): {'b': '2'},
(self.ex.namespace, 'title', (0, 7)): {'c': '3'},
(self.ex.namespace, 'body', (7, 27)): {'d': '4'},
(self.ex.namespace, 'h1', (7, 16)): {'e': '5'},
(self.ex.namespace, 'div', (16, 21)): {'f': '6'},
(self.ex.namespace, 'ol', (16, 21)): {'g': '7'},
(self.ex.namespace, 'li', (16, 18)): {'h': '8'},
(self.ex.namespace, 'li', (18, 20)): {'i': '9'},
(self.ex.namespace, 'p', (21, 27)): {'j': '0'},
}, self.ex.tags)
def testDropEmptyTag(self):
self.ex.feed("a <span style=fun> \n </span> b")
self.ex.close()
self.assertEqual('a b', self.ex.string)
self.assertEqual({}, self.ex.tags)
def testAddTag(self):
self.ex.feed("<i a=1 accesskey=x><i b=2 style=y>data</i></i>")
self.ex.close()
self.assertEqual({
(self.ex.namespace, 'em', (0, 4)): {'a': '1', 'b': '2'}
}, self.ex.tags)
def testNormalWhitespace(self):
self.ex.feed(" \n<span> a b \n c </span> \n \n")
self.ex.close()
self.assertEqual("a b \u00a0c", self.ex.string)
self.assertEqual({(self.ex.namespace, 'span', (0, 6)): {}},
self.ex.tags)
self.ex.reset()
self.ex.feed("a\nb c\td e\ff g\rh")
self.ex.close()
self.assertEqual("a b c d e f g h", self.ex.string)
self.ex.reset()
self.ex.feed("\u00a0a\u00a0\u00a0b\u00a0")
self.ex.close()
self.assertEqual("a b", self.ex.string)
self.ex.reset()
self.ex.feed("a\n")
self.ex.feed("b")
self.ex.close()
self.assertEqual("a b", self.ex.string)
if __name__ == '__main__':
main() |
coolralf/KaggleTraining | HELP.py | Python | mit | 18 | 0.055556 | print("hello!!!!" | ) | |
mayukh18/BlindChat | modules/gifs.py | Python | mit | 646 | 0.012384 | _male_start_hi = ['https://media.giphy.co | m/media/dzaUX7CAG0Ihi/giphy-downsized.gif',
'https://media.giphy.com/media/oJiCqvIqPZE3u/giphy.gif']
_female_start_hi = ['https://media.giphy.com/media/a1QLZUUtCcgyA/giphy-downsized.gif',
'https://media.giphy.com/media/EPJZhOrStSpz2/giphy-downsized.gif']
import random
def get_start_hi(gender):
if gender == "male":
#return random.choice(_male_start_hi)
return _male_start_hi[1]
| elif gender == "female":
#return random.choice(_female_start_hi)
return _female_start_hi[1]
else:
return random.choice(_male_start_hi) |
davande/hackernews-top | graphing.py | Python | apache-2.0 | 2,759 | 0.001812 | '''
Graphing
Author: Rylan Santinon
'''
import pygal
from csv_io import CsvIo
from urlparse import urlparse
import os
class Graphing(object):
'''Graphs and diagrams based on retrieved data'''
def __init__(self, directory):
self.directory = directory
self.csvio = CsvIo()
self.make_directory()
def make_directory(self):
'''Make the output directory if one does not exist'''
if not os.path.exists(self.directory):
os.makedirs(self.directory)
def output_png(self, chart, filename):
'''Output the chart to a png file at directory/filename'''
chart.render_to_png(os.path.join(self.directory, filename))
def karma_by_created(self, outpng):
FACTOR = 1.0/1000000000
users = self.csvio.get_all_users_full()
user_list = []
for k in users.keys():
user_list.append(users[k])
karmas = []
| createds = []
c = 0
| for u in user_list:
c = c + 1
if int(u[1]) > 250 and int(u[1]) < 110000:
if c % 15 != 0:
continue
karmas.append(int(u[1]))
createds.append(int(u[2])/FACTOR)
xychart = pygal.XY(stroke=False, x_title='Created time (seconds past epoch) x 10^-9')
xychart.title = 'Karma vs Created time'
xychart.add('Karma', zip(createds, karmas))
self.output_png(xychart, outpng)
def domain_frequency(self, topn, outpng):
'''Make a png frequency graph for top-n domains'''
stories = self.csvio.get_all_stories()
count_map = {}
for k in stories.keys():
count_map_key = self.canonical(stories[k][-1])
count = count_map.get(count_map_key, 0)
count_map[count_map_key] = count + 1
count_list = []
for k in count_map.keys():
if k == '':
continue
count_list.append([count_map[k], k])
sorted_list = sorted(count_list)
top = sorted_list[-topn:]
top.reverse()
count_axis = [l[0] for l in top]
name_axis = [l[1] for l in top]
bar_chart = pygal.Bar()
bar_chart.x_labels = name_axis
bar_chart.title = "Frequency of top " + str(topn) + " domains"
bar_chart.add('Domains', count_axis)
self.output_png(bar_chart, outpng)
def canonical(self, url):
'''Canonical representation of url's domain'''
loc = urlparse(url).netloc
if 'www.' in loc:
return loc.split('.')[1]
else:
return loc
if __name__ == '__main__':
G = Graphing('diagrams')
G.domain_frequency(10, 'frequency_bar.png')
G.karma_by_created('karma_created.png')
|
labordoc/labordoc-next | modules/bibformat/lib/bibformat_utils_unit_tests.py | Python | gpl-2.0 | 6,733 | 0.008911 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat - Unit Test Suite"""
from invenio.testutils import make_test_suite, run_test_suite, InvenioTestCase
from invenio.bibformat_utils import words_start_with_patterns, \
cut_out_snippet_core_creation
class WordsStartsWithPatternTest(InvenioTestCase):
"""Test for words start with pattern functionality"""
def test_word_starts_with_single_pattern(self):
"""bibformat - word starts with single pattern"""
self.assertEqual((False, 0), words_start_with_patterns(['thi'], ['this']))
self.assertEqual((True, 0), words_start_with_patterns(['this'], ['this']))
self.assertEqual((True, 0), words_start_with_patterns(['This'], ['this']))
self.assertEqual((True, 0), words_start_with_patterns(['this'], ['tHis']))
self.assertEqual((True, 0), words_start_with_patterns(['This'], ['tHis']))
self.assertEqual((True, 0), words_start_with_patterns(['Thiss'], ['tHis']))
def test_word_starts_with_multi_pattern(self):
"""bibformat - word starts with multi pattern"""
self.assertEqual((False, 0), words_start_with_patterns(['thi'], ['this', 'is', 'a']))
self.assertEqual((False, 0), words_start_with_patterns(['i'], ['this', 'is', 'a']))
self.assertEqual((True, 0), words_start_with_patterns(['this'], ['this', 'is', 'a']))
self.assertEqual((True, 0), words_start_with_patterns(['is'], ['this', 'is', 'a']))
def test_words_start_with_single_pattern(self):
"""bibformat - words start with single pattern"""
self.assertEqual((True, 0), words_start_with_patterns(['this', 'is'], ['thi']))
self.assertEqual((False, 0), words_start_with_patterns(['thi', 'this'], ['this']))
def test_words_start_with_multi_pattern(self):
"""bibformat - words start with multi pattern"""
# Only the first word is considered
self.assertEqual((True, 0), words_start_with_patterns(['this', 'is'], ['this', 'it']))
self.assertEqual((True, 0), words_start_with_patterns(['this', 'is'], ['it', 'thi']))
self.assertEqual((False, 0), words_start_with_patterns(['this', 'is'], ['it', 'if']))
self.assertEqual((False, 0), words_start_with_patterns(['this', 'is'], ['is', 'if']))
def test_words_start_with_phrase(self):
"""bibformat - words start with phrase"""
self.assertEqual((True, 2), words_start_with_patterns(['this', 'is', 'a', 'test'], ['this is a']))
self.assertEqual((False, 0), words_start_with_patterns(['this', 'is', 'a', 'test'], ['no I do not]']))
self.assertEqual((True, 2), words_start_with_patterns(['this', 'is', 'a', 'test'], ['no I do not]', 'this is a']))
self.assertEqual((False,0), words_start_with_patterns(['this', 'is'], ['no I do not', 'this is a']))
class SnippetCutOutCoreCreation(InvenioTestCase):
"""Test for snippet cut out core creation"""
_words = dict()
_words[0] = ['CERN', 'LIBRARIES,', 'GENEVA', 'SCAN-0005061', 'Development', 'of', 'Photon', 'Beam', 'Diagnostics',
'for','VUV', 'Radiation', 'from', 'a', 'SASE', 'FEL', 'R.', 'Treusch', '1,', 'T.', 'Lokajczyk,', 'W.',
'Xu', '2,','U.', 'Jastrow,', 'U.', 'Hahn,', 'Abstract', 'L.', 'Bittner', 'and', 'J.', 'Feldhaus',
'HASYLAB', 'at', 'DESY,', 'Notkcstr.', '85,', 'D\xe2\x80\x94226`U3', 'Hamburg,', 'Germany', 'For',
'the', 'proof-of-principle', 'experiment', 'of', 'self-amplified', 'spontaneous', 'emission', '[SASE)',
'at', 'short', 'wavelengths', 'on', 'the', 'VUV', 'FEL', 'at', 'DESY', 'a', 'multi-facetted', 'photon',
'beam', 'diagnostics', 'experiment', 'has', 'been', 'developed', 'employing', 'new', 'detection',
'concepts', 'to', 'measure', 'all', 'SASE', 'specific', 'properties', 'on', 'a', 'single', 'pulse',
'basis.', 'The', 'present', 'setup', 'includes', 'instrumentation', 'for', 'the', 'measurement', 'of',
'the', 'energy', 'and', 'the', 'angular', 'and', 'spectral', 'distribution', 'of', 'individual', 'photon',
'pulses.', 'Different', 'types', 'of', 'photon', 'detectors', 'such', 'as', 'PtSi-photodiodes', 'and']
def test_term_cut_out(self):
"""bibformat - | term snippet cut out core creation"""
self.assertEqual(('This', 0, 0), cut_out_snippet_core_crea | tion(['This', 'is', 'a', 'test'], ['This'], 50))
self.assertEqual(('This is a test', 0, 3), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This' ,'test'], 50))
self.assertEqual(('is', 1, 1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['is'], 50))
self.assertEqual(('is a new', 1, 3), cut_out_snippet_core_creation(['This', 'is', 'a', 'new', 'test'], ['is', 'new'], 50))
self.assertEqual(('', -1, -1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['new'], 50))
self.assertEqual(('of', 5, 5), cut_out_snippet_core_creation(self._words[0], ['of'], 100))
def test_phrase_cut_out(self):
"""bibformat - phrase snippet cut out core creation"""
self.assertEqual(('This is', 0, 1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This is'], 50))
self.assertEqual(('This is a', 0, 2), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This is a'], 50))
self.assertEqual(('', -1, -1), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['This not'], 50))
self.assertEqual(('is a', 1, 2), cut_out_snippet_core_creation(['This', 'is', 'a', 'test'], ['is a'], 50))
self.assertEqual(('of the', 92, 93), cut_out_snippet_core_creation(self._words[0], ['of the'], 100))
TEST_SUITE = make_test_suite(WordsStartsWithPatternTest,
SnippetCutOutCoreCreation,
)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
|
zl352773277/django-sae | django_sae/contrib/tasks/tests/views.py | Python | apache-2.0 | 276 | 0.003623 | # coding=ut | f-8
from django_sae.contrib.tasks.cron import OperationView
from django_sae.contrib.tasks.operations import TaskOperationMixin
class OperationViewMock(OperationView):
| def get_operation(self, request):
return [TaskOperationMixin() for _ in range(0, 3)] |
odoousers2014/odoo-addons-supplier_price | stock_scrap_resupply/stock_scrap_resupply.py | Python | agpl-3.0 | 1,818 | 0.003302 | # -*- coding: utf8 -*-
#
# Copyright (C) 2015 NDP Systèmes (<http://www.ndp-sy | stemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from openerp import fields, models, api
class StockMove(models.Model):
_inherit = 'stock.move'
@api.multi
def action_scrap(self, quantity, location_id, restrict_lot_id=False, restrict_partner_id=False):
"""Move the scrap/damaged product into scrap location.
Overridden here to recreate a procurement if we are in a chained move."""
res = super(StockMove, self).action_scrap(quantity, location_id, restrict_lot_id=restrict_lot_id,
restrict_partner_id=restrict_partner_id)
for move in self:
if move.state not in ['done', 'cancel'] and move.procure_method == 'make_to_order':
proc_vals = self._prepare_procurement_from_move(move)
proc_vals.update({
'product_qty': quantity,
'product_uos_qty': quantity * move.product_uos_qty / move.product_uom_qty,
})
self.env['procurement.order'].create(proc_vals)
return res
|
supermurat/hamsi-manager | SpecialTools/__init__.py | Python | gpl-3.0 | 16,028 | 0.002808 | # This file is part of HamsiManager.
#
# Copyright (c) 2010 - 2015 Murat Demir <mopened@gmail.com>
#
# Hamsi Manager is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Hamsi Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HamsiManager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from Core import Universals as uni
from Core.MyObjects import *
from Core import ReportBug
from SpecialTools import SpecialActions
from SpecialTools import SearchAndReplace
from SpecialTools import Fill
from SpecialTools import Clear
from SpecialTools import CharacterState
from SpecialTools import CharacterEncoding
from SpecialTools import QuickFill
class SpecialTools(MWidget):
def __init__(self, _parent):
MWidget.__init__(self, _parent)
self.tbAddToBefore = MToolButton(self)
self.btChange = MToolButton(self)
self.tbAddToAfter = MToolButton(self)
self.isShowAdvancedSelections = uni.getBoolValue("isShowAdvancedSelections")
self.tabwTabs = MTabWidget(self)
self.specialActions = SpecialActions.SpecialActions(self)
self.searchAndReplace = SearchAndReplace.SearchAndReplace(self)
self.fill = Fill.Fill(self)
self.clear = Clear.Clear(self)
self.characterState = CharacterState.CharacterState(self)
self.characterEncoding = CharacterEncoding.CharacterEncoding(self)
self.quickFill = QuickFill.QuickFill(self)
self.pbtnAdvancedSelections = MPushButton("Simple")
self.pbtnApply = MPushButton(translate("SpecialTools", "Apply"))
self.pbtnApply.setIcon(MIcon("Images:apply.png"))
self.pbtnApply.setObjectName("pbtnApply")
self.pbtnApply.setMinimumHeight(35)
self.tbAddToBefore.setToolTip(translate("SpecialTools", "Add In Front"))
self.btChange.setToolTip(translate("SpecialTools", "Change"))
self.tbAddToAfter.setToolTip(translate("SpecialTools", "Append"))
self.tbAddToBefore.setIcon(MIcon("Images:addToBefore.png"))
self.btChange.setIcon(MIcon("Images:change.png"))
self.tbAddToAfter.setIcon(MIcon("Images:addToAfter.png"))
self.tbAddToBefore.setAutoRaise(True)
self.btChange.setAutoRaise(True)
self.tbAddToAfter.setAutoRaise(True)
self.tbAddToBefore.setCheckable(True)
self.btChange.setCheckable(True)
self.tbAddToAfter.setCheckable(True)
self.btChange.setChecked(True)
MObject.connect(self.tbAddToBefore, SIGNAL("clicked()"), self.changeTypeChanged)
MObject.connect(self.btChange, SIGNAL("clicked()"), self.changeTypeChanged)
MObject.connect(self.tbAddToAfter, SIGNAL("clicked()"), self.changeTypeChanged)
self.tabwTabs.addTab(self.specialActions, translate("SpecialTools", "Special Actions"))
self.tabwTabs.addTab(self.searchAndReplace, translate("SpecialTools", "Search - Replace"))
self.tabwTabs.addTab(self.fill, translate("SpecialTools", "Fill"))
self.tabwTabs.addTab(self.clear, translate("SpecialTools", "Clear"))
self.tabwTabs.addTab(self.characterState, translate("SpecialTools", "Character State"))
self.tabwTabs.addTab(self.characterEncoding, translate("SpecialTools", "Character Encoding"))
self.tabwTabs.addTab(self.quickFill, translate("SpecialTools", "Quick Fill"))
HBox0 = MHBoxLayout()
HBox0.addWidget(self.tbAddToBefore)
HBox0.addWidget(self.btChange)
HBox0.addWidget(self.tbAddToAfter)
lblX = MLabel(translate("SpecialTools", "X : "))
lblY = MLabel(translate("SpecialTools", "Y : "))
self.cbInformationSection = MComboBox()
self.cbInformationSection.addItems([translate("SpecialTools", "All"),
translate("SpecialTools", "Before X"),
translate("SpecialTools", "After X"),
translate("SpecialTools", "From Last, Before X"),
translate("SpecialTools", "From Last After X"),
translate("SpecialTools", "Between X And Y"),
translate("SpecialTools", "Not Between X And Y")])
self.cbInformationSectionX = MSpinBox()
self.cbInformationSectionX.setRange(1, 100)
self.cbInformationSectionX.setValue(3)
self.cbInformationSectionY = MSpinBox()
self.cbInformationSectionY.setRange(1, 100)
self.cbInformationSectionY.setValue(5)
self.pnlAdvancedSelections = MWidget()
VBox = MVBoxLayout()
self.pnlAdvancedSelections.setLayout(VBox)
VBox1 = MVBoxLayout()
VBox1.addWidget(self.pbtnAdvancedSelections)
VBox.addWidget(self.cbInformationSection)
HBoxs1 = MHBoxLayout()
HBoxs1.addWidget(lblX)
HBoxs1.addWidget(self.cbInformationSectionX)
HBoxs1.addWidget(lblY)
HBoxs1.addWidget(self.cbInformationSectionY)
VBox.addLayout(HBoxs1)
VBox.addLayout(HBox0)
VBox1.addWidget(self.pnlAdvancedSelections)
VBox1.addWidget(self.pbtnApply)
HBox = MHBoxLayout()
HBox.addWidget(self.tabwTabs)
HBox.addLayout(VBox1)
self.setLayout(HBox)
_parent.dckSpecialTools = MDockWidget(translate("SpecialTools", "Special Tools"))
_parent.dckSpecialTools.setObjectName("Special Tools")
_parent.dckSpecialTools.setWidget(self)
_parent.dckSpecialTools.setAllowedAreas(Mt.AllDockWidgetAreas)
_parent.dckSpecialTools.setFeatures(MDockWidget.AllDockWidgetFeatures)
_parent.addDockWidget(Mt.BottomDockWidgetArea, _parent.dckSpecialTools)
self.cbInformationSectionX.setEnabled(False)
self.cbInformationSectionY.setEnabled(False)
self.cbInformationSection.setFixedWidth(175)
self.tabwTabs.setCurrentIndex(int(uni.MySettings["activeTabNoOfSpecialTools"]))
self.tabChanged(int(uni.MySettings["activeTabNoOfSpecialTools"]))
MObject.connect(self.pbtnApply, SIGNAL("clicked()"), self.apply)
MObject.connect(self.pbtnAdvancedSelections, SIGNAL("clicked()"), self.showOrHideAdvancedSelections)
MObject.connect(self.tabwTabs, SIGNAL("currentChanged(int)"), self.tabChanged)
MObject.connect(self.cbInformationSection, SIGNAL("currentIndexChanged(int)"), self.InformationSectionChanged)
self.refreshForColumns()
self.reFillCompleters()
def InformationSectionChanged(self):
if self.cbInfor | mationSection.currentIndex() == 0:
self.cbInformationSectionX.setEnabled(False)
else:
self.cbInformationSectionX.setEnabled(True)
if self.cbInformationSection.currentIndex() > 4:
self.cbInformationSectionY.setEnabled(True)
else:
self.cbInformationSectionY.setEnabled(False)
def refreshForColumns(self):
self.searchA | ndReplace.columns.clear()
self.fill.columns.clear()
self.clear.columns.clear()
self.characterState.columns.clear()
self.characterEncoding.columns.clear()
try:
for btn in self.specialActions.pbtnAddObjects:
btn.setVisible(False)
btn.deleteLater()
except: pass
try:
for lbl in self.quickFill.lblColumns:
lbl.setVisible(False)
lbl.deleteLater()
for le in self.quickFill.leColumns:
le.setVisible(False)
le.deleteLater()
except: pass
self.specialActions.pbtnAddObjects = []
self.quickFill.lblColumns = []
se |
tectronics/chimerascan | chimerascan/deprecated/nominate_spanning_reads_v04.py | Python | gpl-3.0 | 12,171 | 0.004519 | '''
Created on Jan 30, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import os
import pysam
from chimerascan.lib import config
from chimerascan.lib.base import LibraryTypes
from chimerascan.lib.sam import parse_pe_reads
from chimerascan.lib.chimera import Chimera, OrientationTags, ORIENTATION_TAG_NAME, get_orientation
from chimerascan.lib.batch_sort import batch_sort
from chimerascan.lib.seq import DNA_reverse_complement
def to_fastq(qname, readnum, seq, qual, is_reverse=False):
if is_reverse:
seq = DNA_reverse_complement(seq)
qual = qual[::-1]
return "@%s/%d\n%s\n+\n%s" % (qname, readnum+1, seq, qual)
def nominate_encomp_spanning_reads(chimera_file, output_fastq_file):
"""
find all encompassing reads that should to be remapped to see if they
span the breakpoint junction
"""
fqfh = open(output_fastq_file, "w")
remap_qnames = set( | )
for c in Chimera.parse(open(chimera_file)):
# find breakpoint coords of chimera
end5p = c.tx_end_5p
start3p = c.tx_start_3p
for r5p,r3p in c.encomp_frags:
# if 5' read overlaps breakpoint then it should be remapped
if r5p.clipstart < end5p < r5p.clipend:
key5p = (r5p.qname, r5p.readnum)
if | key5p not in remap_qnames:
remap_qnames.add((r5p.qname, r5p.readnum))
print >>fqfh, to_fastq(r5p.qname, r5p.readnum,
r5p.seq, "I" * len(r5p.seq),
is_reverse=r5p.is_reverse)
# if 3' read overlaps breakpoint then it should be remapped
if r3p.clipstart < start3p < r3p.clipend:
key3p = (r3p.qname, r3p.readnum)
if key3p not in remap_qnames:
remap_qnames.add((r3p.qname, r3p.readnum))
print >>fqfh, to_fastq(r3p.qname, r3p.readnum,
r3p.seq, "I" * len(r3p.seq),
is_reverse=r3p.is_reverse)
fqfh.close()
return config.JOB_SUCCESS
def parse_chimeras_by_gene(chimera_file, orientation):
clist = []
prev_tx_name = None
for c in Chimera.parse(open(chimera_file)):
tx_name = c.tx_name_5p if (orientation == OrientationTags.FIVEPRIME) else c.tx_name_3p
if prev_tx_name != tx_name:
if len(clist) > 0:
yield prev_tx_name, clist
clist = []
prev_tx_name = tx_name
clist.append(c)
if len(clist) > 0:
yield prev_tx_name, clist
def parse_reads_by_rname(bamfh, orientation):
"""
reads must be sorted and include an orientation tag
"""
reads = []
prev_rname = None
for r in bamfh:
o = r.opt(ORIENTATION_TAG_NAME)
if o != orientation:
continue
if prev_rname != r.tid:
if len(reads) > 0:
yield reads
reads = []
prev_rname = r.tid
reads.append(r)
if len(reads) > 0:
yield reads
def parse_sync_chimera_with_bam(chimera_file, bam_file, orientation):
# setup iterators
chimera_iter = parse_chimeras_by_gene(chimera_file, orientation)
# get first item from each iterator
try:
tx_name, clist = chimera_iter.next()
chimera_tx_name = tx_name
except StopIteration:
return
bamfh = pysam.Samfile(bam_file, "rb")
try:
for reads in parse_reads_by_rname(bamfh, orientation):
read_tx_name = bamfh.references[reads[0].tid]
if read_tx_name < chimera_tx_name:
continue
while read_tx_name > chimera_tx_name:
tx_name, clist = chimera_iter.next()
chimera_tx_name = tx_name
if read_tx_name == chimera_tx_name:
yield clist, reads,
except StopIteration:
pass
bamfh.close()
def extract_single_mapped_reads(chimera_file,
unmapped_bam_file,
single_mapped_bam_file,
unmapped_fastq_file,
library_type,
tmp_dir):
# find all reads that need to be remapped to see if they span the
# breakpoint junction
fqfh = open(unmapped_fastq_file, "w")
# annotate mapped reads with sequence/quality of unmapped mate
bamfh = pysam.Samfile(unmapped_bam_file, "rb")
unsorted_single_mapped_bam_file = os.path.join(tmp_dir, "unsorted_single_mapped_reads.bam")
singlemap_bamfh = pysam.Samfile(unsorted_single_mapped_bam_file, "wb", template=bamfh)
for pe_reads in parse_pe_reads(bamfh):
# find which of the original reads was unmapped
r1_unmapped = any(r.is_unmapped for r in pe_reads[0])
r2_unmapped = any(r.is_unmapped for r in pe_reads[1])
# if both reads unmapped, then remap to breakpoints
if r1_unmapped and r2_unmapped:
for readnum in (0,1):
print >>fqfh, to_fastq(pe_reads[readnum][0].qname, readnum,
pe_reads[readnum][0].seq,
pe_reads[readnum][0].qual)
else:
# annotate the mapped reads with the seq/qual of the
# unmapped reads
mapped_readnum = 0 if r2_unmapped else 1
unmapped_readnum = 1 if r2_unmapped else 0
unmapped_seq = pe_reads[unmapped_readnum][0].seq
unmapped_qual = pe_reads[unmapped_readnum][0].qual
for r in pe_reads[mapped_readnum]:
orientation = get_orientation(r, library_type)
# TODO: may need to REVERSE read here to get original
r.tags = r.tags + [("R2", unmapped_seq),
("Q2", unmapped_qual),
(ORIENTATION_TAG_NAME, orientation)]
singlemap_bamfh.write(r)
singlemap_bamfh.close()
fqfh.close()
# sort/index the annotated single-mapper unmapped reads by reference/position
logging.debug("Sorting single-mapped mates by reference")
single_mapped_bam_prefix = os.path.splitext(single_mapped_bam_file)[0]
pysam.sort("-m", str(int(1e9)), unsorted_single_mapped_bam_file, single_mapped_bam_prefix)
pysam.index(single_mapped_bam_file)
# remove unsorted file
if os.path.exists(unsorted_single_mapped_bam_file):
os.remove(unsorted_single_mapped_bam_file)
return config.JOB_SUCCESS
def nominate_single_mapped_spanning_reads(chimera_file,
single_mapped_bam_file,
single_mapped_fastq_file,
tmp_dir):
# find sequences that could cross a breakpoint
tmp_seqs_to_remap = os.path.join(tmp_dir, "tmp_singlemap_seqs.txt")
f = open(tmp_seqs_to_remap, "w")
# search for matches to 5' chimeras
logging.debug("Matching single-mapped frags to 5' chimeras")
for clist, reads in parse_sync_chimera_with_bam(chimera_file,
single_mapped_bam_file,
OrientationTags.FIVEPRIME):
# TODO: test more specifically that read has a chance to cross breakpoint
for r in reads:
# r |
titusjan/argos | argos/qt/treeitems.py | Python | gpl-3.0 | 11,395 | 0.004563 | import logging
from argos.external import six
from argos.utils.cls import check_class, check_is_a_string
logger = logging.getLogger(__name__)
class BaseTreeItem(object):
""" Base class for storing item data in a tree form. Each tree item represents a row
in the BaseTreeModel (QAbstractItemModel).
The tree items have no notion of which field is stored in which column. This is implemented
in BaseTreeModel._itemValueForColumn
"""
def _ | _init__(self, nodeName):
""" Constructor
:param nodeName: short name describing this node. Is used to construct the nodePath.
Currently we don't check for | uniqueness in the children but this may change.
The nodeName may not contain slashes (/).
"""
check_class(nodeName, six.string_types, allow_none=False)
assert nodeName, "nodeName may not be empty"
assert '/' not in nodeName, "nodeName may not contain slashes"
self._nodeName = str(nodeName)
self._parentItem = None
self._model = None
self._childItems = [] # the fetched children
self._nodePath = self._constructNodePath()
def finalize(self):
""" Can be used to cleanup resources. Should be called explicitly.
Finalizes its children before closing itself
"""
for child in self.childItems:
child.finalize()
def __str__(self):
return "<{}: {}>".format(type(self).__name__, self.nodePath)
def __repr__(self):
return ("<{}: {!r}, children:[{}]>".
format(type(self).__name__, self.nodePath,
', '.join([repr(child) for child in self.childItems])))
@property
def model(self):
""" Returns the ConfigTreeModel this item belongs to.
If the model is None (not set), it will use and cache the parent's model.
Therefore make sure that an ancestor node has a reference to the model! Typically by
setting the model property of the invisible root item in the model constructor.
"""
if self._model is None and self.parentItem is not None:
self._model = self.parentItem.model
return self._model
@model.setter
def model(self, value):
""" Sets ConfigTreeModel this item belongs to.
"""
self._model = value
@property
def decoration(self):
""" An optional decoration (e.g. icon).
The default implementation returns None (no decoration).
"""
return None
@property
def font(self):
""" Returns a font for displaying this item's text in the tree.
The default implementation returns None (i.e. uses default font).
"""
return None
@property
def backgroundBrush(self):
""" Returns a brush for drawing the background role in the tree.
The default implementation returns None (i.e. uses default brush).
"""
return None
@property
def foregroundBrush(self):
""" Returns a brush for drawing the foreground role in the tree.
The default implementation returns None (i.e. uses default brush).
"""
return None
@property
def sizeHint(self):
""" Returns a size hint for displaying the items in the tree
The default implementation returns None (i.e. no hint).
Should return a QSize object or None
"""
return None
@property
def nodeName(self):
""" The node name. Is used to construct the nodePath"""
return self._nodeName
@nodeName.setter
def nodeName(self, nodeName):
""" The node name. Is used to construct the nodePath"""
assert '/' not in nodeName, "nodeName may not contain slashes"
self._nodeName = nodeName
self._recursiveSetNodePath(self._constructNodePath())
def _constructNodePath(self):
""" Recursively prepends the parents nodeName to the path until the root node is reached."""
if self.parentItem is None:
return '' # invisible root node; is not included in the path
else:
return self.parentItem.nodePath + '/' + self.nodeName
@property
def nodePath(self):
""" The sequence of nodeNames from the root to this node. Separated by slashes."""
return self._nodePath
def _recursiveSetNodePath(self, nodePath):
""" Sets the nodePath property and updates it for all children.
"""
self._nodePath = nodePath
for childItem in self.childItems:
childItem._recursiveSetNodePath(nodePath + '/' + childItem.nodeName)
@property
def parentItem(self):
""" The parent item """
return self._parentItem
@parentItem.setter
def parentItem(self, value):
""" The parent item """
self._parentItem = value
self._recursiveSetNodePath(self._constructNodePath())
@property
def childItems(self):
""" List of child items """
#logger.debug("childItems {!r}".format(self))
return self._childItems
def hasChildren(self):
""" Returns True if the item has children.
If it has children the corresponding node in the tree can be expanded.
"""
return len(self.childItems) > 0
def nChildren(self): # TODO: numChildren
""" Returns the number of children
"""
return len(self.childItems)
def child(self, row):
""" Gets the child given its row number
"""
return self.childItems[row]
def childByNodeName(self, nodeName):
""" Gets first (direct) child that has the nodeName.
"""
assert '/' not in nodeName, "nodeName can not contain slashes"
for child in self.childItems:
if child.nodeName == nodeName:
return child
raise IndexError("No child item found having nodeName: {}".format(nodeName))
def findByNodePath(self, nodePath):
""" Recursively searches for the child having the nodePath. Starts at self.
"""
def _auxGetByPath(parts, item):
"Aux function that does the actual recursive search"
#logger.debug("_auxGetByPath item={}, parts={}".format(item, parts))
if len(parts) == 0:
return item
head, tail = parts[0], parts[1:]
if head == '':
# Two consecutive slashes. Just go one level deeper.
return _auxGetByPath(tail, item)
else:
childItem = item.childByNodeName(head)
return _auxGetByPath(tail, childItem)
# The actual body of findByNodePath starts here
check_is_a_string(nodePath)
assert not nodePath.startswith('/'), "nodePath may not start with a slash"
if not nodePath:
raise IndexError("Item not found: {!r}".format(nodePath))
return _auxGetByPath(nodePath.split('/'), self)
def childNumber(self):
""" Gets the index (nr) of this node in its parent's list of children.
"""
# This is O(n) in time. # TODO: store row number in the items?
if self.parentItem is not None:
return self.parentItem.childItems.index(self)
return 0
def insertChild(self, childItem, position=None):
""" Inserts a child item to the current item.
The childItem must not yet have a parent (it will be set by this function).
IMPORTANT: this does not let the model know that items have been added.
Use BaseTreeModel.insertItem instead.
param childItem: a BaseTreeItem that will be added
param position: integer position before which the item will be added.
If position is None (default) the item will be appended at the end.
Returns childItem so that calls may be chained.
"""
if position is None:
position = self.nChildren()
assert childItem.parentItem is None, "childItem already has a parent: {}".format(childItem)
assert childItem._ |
OpenNetworkingFoundation/ONFOpenTransport | RI/flask_server/tapi_server/models/tapi_photonic_media_application_identifier.py | Python | apache-2.0 | 3,599 | 0.001945 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_photonic_media_application_identifier_type import TapiPhotonicMediaApplicationIdentifierType # noqa: F401,E501
from tapi_server import util
class TapiPhotonicMediaApplicationIdentifier(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, application_identifier_type=None, application_code=None): # noqa: E501
"""TapiPhotonicMediaApplicationIdentifier - a model defined in OpenAPI
:param application_identifier_type: The application_identifier_type of this TapiPhotonicMediaApplicationIdentifier. # noqa: E501
:type application_identifier_type: TapiPhotonicMediaApplicationIdentifierType
:param application_code: The application_code of this TapiPhotonicMediaApplicationIdentifier. # noqa: E501
:type application_code: str
"""
self.openapi_types = {
'application_identifier_type': TapiPhotonicMediaApplicationIdentifierType,
'application_code': str
}
self.attribute_map = {
'application_identifier_type': 'application-identifier-type',
'application_code': 'application-code'
}
self._application_identifier_type = application_identifier_type
self._application_code = application_code
@classmethod
def from_dict(cls, dikt) -> 'TapiPhotonicMediaApplicationIdentifier':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.photonic.media.ApplicationIdentifier of this TapiPhotonicMediaApplicationIdentifier. # noqa: E501
:rtype: TapiPhotonicMediaApplicationIdentifier
"""
return util.deserialize_model(dikt, cls)
@property
def application_identifier_type(self):
"""Gets the application_identifier_type of this TapiPhotonicMediaApplicationIdentifier.
:return: The application_identifier_type of this TapiPhotonicMediaApplicationIdentifier.
:rtype: TapiPhotonicMediaApplicationIdentifierType
"""
return self._application_identifier_type
@application_identifier_type.setter
def application_identifier_type(self, application_identifier_type):
"""Sets the application_identifier_type of this TapiPhotonicMediaApplicationIdentifier.
:param application_identifier_type: The application_identifier_type of this TapiPhotonicMediaApplicationIdentifier.
:type application_identifier_type: TapiPhotonicMediaApplicationIdentifierType
"""
self._application_identifier_type = application_identifier_type
@property
def application_code(self):
"""Gets the application_code of this TapiPhotonicMediaApplicationIdentifier.
none # noqa: E501
:return: The application_code of this TapiPhotonicMediaApplicationIdentifier.
:rtype: str
"""
return self._application_code
@application_code.setter
def application_code(self, application_code):
"""Sets the | application_code of this Ta | piPhotonicMediaApplicationIdentifier.
none # noqa: E501
:param application_code: The application_code of this TapiPhotonicMediaApplicationIdentifier.
:type application_code: str
"""
self._application_code = application_code
|
wkschwartz/django | tests/i18n/contenttypes/tests.py | Python | bsd-3-clause | 752 | 0.001333 | import os
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, override_settings
from django.utils import translation
@override_settings(
USE_I18N=True,
LOCALE_PATHS=[
os.path.join(os.path.dirname(__file__), 'locale'),
],
LANGUAGE_CODE='en',
LANGUAGES=[
('en', 'English'),
('fr', 'French'),
],
)
class ContentTypeTests(TestCase):
def test_verbose_name(self):
company_type = ContentType.objects.get(app_label='i18n', model='company')
with translation.override('en'):
self.assertEqual(str(company_type), 'i18n | Company')
wit | h translation.override('fr'):
| self.assertEqual(str(company_type), 'i18n | Société')
|
dkoudlo/py-manage-server | modules/file/replace.py | Python | apache-2.0 | 1,593 | 0.00565 | import modules.options_helper as opt_helper
from modules.file.file_helper import File
import sys
def main(options):
# available config keys
options_registry = ["path","find","replace_with"]
# verify config option provided match registry
opt_helper.check_opt | ions(options, options_ | registry)
path = options.get("path", False)
find = options.get("find", False)
replace_with = options.get("replace_with", False)
# see if all required fields are present
if path and find and replace_with:
f = File(path)
is_find_in_file = f.is_in_file(find)
filetype = f.get_ftype()
# only supporting files right now, no links, directories
if filetype == "file" and is_find_in_file:
# check if the change was applied already to avoid replacing duplicate lines if any
if f.is_in_file(replace_with) and is_find_in_file:
print "Will not replace. Looks like following is already in file " + path + ": " + replace_with
else:
print "Replacing content in file: " + path
f.replace_in_file(find, replace_with)
else:
if filetype != "file":
print "Can't run this playbook because provided 'path' is not a file, it's a " + filetype
# TODO: raise exception
sys.exit()
if not is_find_in_file:
print "Didn't find " + find + " in the file " + path + ". Nothing to replace."
if __name__ == '__main__':
main(options)
|
coblo/isccbench | iscc_bench/readers/harvard.py | Python | bsd-2-clause | 2,886 | 0.001386 | # -*- coding: utf-8 -*-
"""Read data from 'Harvard Library Open Metadata'.
Records: ~12 Million
Size: 12.8 GigaByte (Unpacked)
Info: http://library.harvard.edu/open-metadata
Data: https://s3.amazonaws.com/hlom/harvard.tar.gz
Instructions:
Download datafile and run `tar xvf harvard.tar.gz` to extract marc21 files.
After moving the .mrc files to the /data/harvard folder you should be able
to run this script and see log output of parsed data.
"""
import os
import logging
import isbnlib
from pymarc import MARCReader
from iscc_bench import DATA_DIR, MetaData
log = logging.getLogger(__name__)
HARVARD_DATA = os.path.join(DATA_DIR, "harvard")
def harvard(path=HARVARD_DATA):
"""Return a generator that iterates over all harvard records with complete metadata.
:param str path: path to directory with harvard .mrc files
:return: Generator[:class:`MetaData`] (filtered for records that have ISBNs)
"""
for meta in marc21_dir_reader(path):
if all((meta.isbn, meta.title, meta.author)) and not isbnlib.notisbn(meta.isbn):
# Basic cleanup
try:
isbn = isbnlib.to_isbn13(meta.isbn)
title = meta.title.strip("/").strip().split(" : ")[0]
cleaned = MetaData(isbn, title, meta.author)
except Exception:
log.exception("Error parsing data")
continue
log.debug(cleaned)
yield cleaned
def marc21_dir_reader(path=HARVARD_DATA):
"""Return a generator that iterates over all harvard marc21 files in a
directory and yields parsed MetaData objects from those files.
:param str path: path to directory with harvard .mrc files
:return: Generator[:class:`MetaData`]
"""
for marc21_file_name in os.listdir(path):
marc21_file_path = os.path.join(path, marc21_file_name)
log.info("Reading harvard marc21 file: {}".format(marc21_file_name))
for meta_record in marc21_f | ile_reader(marc21_file_path):
yield meta_record
def marc21_file_reader(file_path):
"""Return a generator that yields parsed MetaData records from a harvard marc21 file.
:param str file_path: path to harvard marc21 file
:return: Generator[:class:`MetaData`]
"""
with open(file_path, "rb") as mf:
reader = MARCReader | (mf, utf8_handling="ignore")
while True:
try:
record = next(reader)
yield MetaData(record.isbn(), record.title(), record.author())
except UnicodeDecodeError as e:
log.error(e)
continue
except StopIteration:
break
if __name__ == "__main__":
"""Demo usage."""
# logging.basicConfig(level=logging.DEBUG)
for entry in harvard():
# Do something with entry (MetaData object)
print(entry)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.