text stringlengths 8 6.05M |
|---|
from django import template
from apps.characters.models import Character
register = template.Library()
@register.filter
def is_alive(user):
if user.character_set.filter(alive=True):
return True
else:
return False
@register.filter
def humanize_time(secs):
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
days, hours = divmod(hours, 24)
temp = ""
if days > 0:
temp += "%dd " % days
if hours > 0:
temp += "%dh " % hours
if mins > 0:
temp += "%dm " % mins
if secs > 0:
temp += "%ds " % secs
return temp
@register.filter
def check_timer(timers, field):
return timers.check_timer(field)
|
import unittest
import basecrm
from basecrm.test.testutils import BaseTestCase
from decimal import *
from basecrm.coercion import Coercion
class TestCoercion(BaseTestCase):
def test_to_decimal(self):
self.assertEqual(Coercion.to_decimal(0), Decimal(0))
self.assertEqual(Coercion.to_decimal("0"), Decimal(0))
self.assertEqual(Coercion.to_decimal(1.11), Decimal("1.11"))
self.assertEqual(Coercion.to_decimal("1.11"), Decimal("1.11"))
|
from datetime import timedelta
from django.conf import settings
from rest_framework.settings import APISettings, api_settings
USER_SETTINGS = getattr(settings, 'AUTH_TOKEN_SETTING', None)
DEFAULTS = {
'HASH_ALGORITHM': 'HS256',
'JWT_SECRET_KEY': settings.SECRET_KEY,
'AUTH_TOKEN_CHARACTER_LENGTH': 64,
'TOKEN_EXPIRY': timedelta(seconds=3000),
'USER_SERIALIZER': 'auth_token.serializers.UserSerializer',
'TOKEN_LIMIT_PER_USER': None,
'AUTO_REFRESH': False,
'MIN_REFRESH_INTERVAL': 60,
'AUTH_HEADER_PREFIX': 'Token',
'EXPIRY_DATETIME_FORMAT': api_settings.DATETIME_FORMAT,
}
IMPORT_STRINGS = {
'USER_SERIALIZER',
}
auth_token_settings = APISettings(user_settings=USER_SETTINGS, defaults=DEFAULTS, import_strings=IMPORT_STRINGS)
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name = 'index'),
path('contact',views.contact, name = 'contact'),
path('about',views.about,name = 'about'),
path('pricing',views.pricing,name = 'pricing'),
path('service',views.service,name = 'service'),
path('blog',views.blog,name = 'blog'),
path('blog_detail',views.blog_detail,name = 'blog_detail'),
path('appointment',views.appointment,name = 'appointment'),
] |
from .lnetwork_plugin import LNetworkPlugin
from .models import *
from .schedule_record_lan_traffic import ScheduleRecordLANTraffic
from . import lnetwork_api
|
# coding:utf-8
from __future__ import absolute_import, unicode_literals
from sanic.blueprints import Blueprint
from . import spider, other
__author__ = "golden"
__date__ = '2018/6/25'
bp = Blueprint(__name__, '/api/')
bp.add_route(spider.SpidersApi.as_view(), '<project:[A-z]+>/spiders/')
bp.add_route(spider.ProjectsApi.as_view(), 'projects/')
bp.add_route(other.MenusApi.as_view(), 'menus/')
|
# This programs calculates the minimum fixed monthly payment needed in order to
# pay off a credit card balance within 12 months.
def calculatePayment(balance, annualInterestRate, numOfMonths = 12):
'''(number, float, int) => float Raises AssertionError
Returns the minimum fixed monthly payment needed in order to pay off a
credit card balance within 12 months
>>> calculatePayment(320000, 0.2, 12)
29157.09
>>> calculatePayment(999999, 0.18, 12)
90325.03
'''
# Check if parameters are of the correct type.
assert type(balance) == int or type(balance) == float, "balance should be\
a int or float"
assert type(annualInterestRate) == float, "annualInterestRate should be of\
type float"
assert type(numOfMonths) == int, "numOfMonths should be of type int"
# Helper functions
def calculateMinimumMonthlyPayment(currentBalance, monthlyPaymentRate):
'''(number, float) => float
Returns the minimum monthly payment required by the credit card company
>>> calculateMinimumMonthlyPayment(5000, 0.02)
100.0
'''
return round(currentBalance * monthlyPaymentRate, 2)
def calculateUnpaidBalance(currentBalance, minimumPayment):
'''(number, float) => float
Returns the unpaid balance after subtracting minimumPayment from
currentBalance
>>> calculateUnpaidBalance(5000, 100.0)
4900.00
'''
return round(currentBalance - minimumPayment, 2)
def calculateInterest(unpaidBalance, annualInterestRate):
'''(float, float) => float
Returns the interest on unpaidBalance with an annualInterestRate
>>> calculateInterest(4900.00, 0.18)
73.50
'''
return round(annualInterestRate / 12.0 * unpaidBalance ,2)
def calculateNewBalancePlusInterest(unpaidBalance, interestOnBalance):
'''(float, float) => float
Returns the sum of unpaidBalance and interestOnBalance
>>> calculateNewBalancePlusInterest(4900.00, 73.50)
4973.50
'''
return round(unpaidBalance + interestOnBalance, 2)
# Tests
def TestCalculateMinimumMonthlyPayment():
assert calculateMinimumMonthlyPayment(5000, 0.02) == 100.0,\
"Minimum payment should be 100.0"
assert calculateMinimumMonthlyPayment(4973.50, 0.02) == 99.47,\
"Minimum payment should be 99.47"
assert calculateMinimumMonthlyPayment(4947.14, 0.02) == 98.94,\
"Minimum payment should be 98.94"
print("minimumMonthlyPayment() - All tests pass!")
def TestCalculateUnpaidBalance():
assert calculateUnpaidBalance(5000, 100.0) == 4900.00,\
"Unpaid balance should be 4900.00"
assert calculateUnpaidBalance(4973.50, 99.47) == 4874.03,\
"Unpaid balance should be 4874.03"
assert calculateUnpaidBalance(4947.14, 98.94) == 4848.20,\
"Unpaid balance should be 4848.20"
print("calculateUnpaidBalance() - Alll tests pass!")
def TestCalculateInterest():
assert calculateInterest(4900.00, 0.18) == 73.50,\
"Interest should be 73.50"
assert calculateInterest(4874.03, 0.18) == 73.11,\
"Interest should be 73.11"
assert calculateInterest(4848.20, 0.18) == 72.72,\
"Interest should be 72.72"
print("calculateInterest() - All tests pass!")
def TestCalculateNewBalancePlusInterest():
assert calculateNewBalancePlusInterest(4900.00, 73.50) == 4973.50,\
"New balance should be 4973.50"
assert calculateNewBalancePlusInterest(4874.03, 73.11) == 4947.14,\
"New balance should be 4947.14"
assert calculateNewBalancePlusInterest(4848.20, 72.72) == 4920.92,\
"New balance should be 4920.92"
print("calculateNewBalancePlusInterest() - All tests pass!")
# Test the helper functions
#TestCalculateMinimumMonthlyPayment()
#TestCalculateUnpaidBalance()
#TestCalculateInterest()
#TestCalculateNewBalancePlusInterest()
currentBalance = balance
monthlyInterestRate = annualInterestRate / 12.0
monthlyPaymentLowerBound = balance / 12.0
monthlyPaymentUpperBound = (balance * (1 + monthlyInterestRate)**12) / 12.0
# Initial guess for bi-section search
#minPayment = (monthlyPaymentLowerBound + monthlyPaymentUpperBound) / 2.0
while abs(currentBalance) >= 0.05:
# Next guess for the bi-section search
minPayment = (monthlyPaymentLowerBound + monthlyPaymentUpperBound) / 2.0
# Reset variables for the next iteration
currentBalance = balance
unpaidBalance = 0
interestOnBalance = 0
for i in range(numOfMonths):
unpaidBalance = calculateUnpaidBalance(currentBalance, minPayment)
interestOnBalance = calculateInterest(unpaidBalance, annualInterestRate)
currentBalance = calculateNewBalancePlusInterest(unpaidBalance, interestOnBalance)
# Adjust the lower and upper bounds
if currentBalance < 0:
monthlyPaymentUpperBound = minPayment
else:
monthlyPaymentLowerBound = minPayment
return round(minPayment, 2)
# Main program
#print("Lowest Payment:", calculatePayment(32000, 0.2, 12))
print("Lowest Payment:", calculatePayment(320000, 0.2, 12))
#print("Lowest Payment:", calculatePayment(999999, 0.18))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-08-06 17:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0020_badge_criteria'),
]
operations = [
migrations.AddField(
model_name='classbadge',
name='show_info_before_completion',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='classbadge',
name='show_progress',
field=models.BooleanField(default=True),
),
]
|
'''earth.py: get the clouds + earth image
May 2014 -- Mendez
http://flatplanet.sourceforge.net/maps/night.html
http://flatplanet.sourceforge.net/maps/natural.html
http://wiki.birth-online.de/know-how/software/linux/xplanet
http://www.fourmilab.ch/fourmilog/archives/Monthly/2005/2005-05.html
http://mathematica.stackexchange.com/questions/3326/composition-how-to-make-a-day-and-night-world-map
'''
URL = 'http://xplanet.sourceforge.net.nyud.net:8080/clouds/tmp/201405310046.765843/clouds_2048.jpg'
class Earth(object):
"""Determine earth imaging"""
def __init__(self):
pass
def update(self):
pass
def display(self):
pass
def plot():
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from datetime import datetime
# miller projection
# map = Basemap(projection='mill',lon_0=180)
map = Basemap(projection='kav7',lon_0=180)
# plot coastlines, draw label meridians and parallels.
map.drawcoastlines()
map.drawparallels(np.arange(-90,90,30),labels=[1,0,0,0])
map.drawmeridians(np.arange(map.lonmin,map.lonmax+30,60),labels=[0,0,0,1])
# fill continents 'coral' (with zorder=0), color wet areas 'aqua'
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral',lake_color='aqua')
# shade the night areas, with alpha transparency so the
# map shows through. Use current time in UTC.
date = datetime.utcnow()
CS=map.nightshade(date)
plt.title('Day/Night Map for %s (UTC)' % date.strftime("%d %b %Y %H:%M:%S"))
plt.show()
if __name__ == '__main__':
# e = Earth()
plot()
|
import unittest
from katas.beta.how_much_hex_is_the_fish import fisHex
class FisHexTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(fisHex('redlionfish'), 12)
def test_equal_2(self):
self.assertEqual(fisHex('pufferfish'), 1)
def test_equal_3(self):
self.assertEqual(fisHex('puffers'), 14)
def test_equal_4(self):
self.assertEqual(fisHex('balloonfish'), 14)
def test_equal_5(self):
self.assertEqual(fisHex('blowfish'), 4)
def test_equal_6(self):
self.assertEqual(fisHex('bubblefish'), 10)
def test_equal_7(self):
self.assertEqual(fisHex('globefish'), 10)
def test_equal_8(self):
self.assertEqual(fisHex('swellfish'), 1)
def test_equal_9(self):
self.assertEqual(fisHex('toadfish'), 8)
def test_equal_10(self):
self.assertEqual(fisHex('toadies'), 9)
def test_equal_11(self):
self.assertEqual(fisHex('honey toads'), 9)
def test_equal_12(self):
self.assertEqual(fisHex('sugar toads'), 13)
def test_equal_13(self):
self.assertEqual(fisHex('sea squab'), 5)
|
from rest_framework import serializers
from rest_framework.exceptions import APIException
from clasificador.models import ClassifierModel
from gerente.datatxt_helpers import Datatxt
import simplejson as json
from pruebas.models import BaseTestResult
class DataTXTErrors(APIException):
status_code = 504
default_detail = 'DataTXT error.'
class ClassifierModelSerializer(serializers.BaseSerializer):
@staticmethod
def update_on_datatxt(model_id, model):
dt = Datatxt()
req = dt.update_model(model_id, model)
if req.status_code != 200:
print req.content
raise DataTXTErrors()
@staticmethod
def create_on_datatxt(model):
dt = Datatxt()
req = dt.create_model(model)
if req.status_code == 200:
res = req.json()
return res.get('id')
raise DataTXTErrors()
def to_internal_value(self, data):
model = data.get('data')
name = data.get('name')
return {
'json_model': model,
'name': name,
}
def to_representation(self, instance):
tests = BaseTestResult.objects.filter(model_version=instance)\
.order_by('-created')
last_test = {}
len_tests = len(tests)
if len_tests:
i = 0
while tests[i].macro_f1 is None and i <= len_tests:
i += 1
if i < len_tests:
test = tests[i]
last_test = {
'f1': '{0:.2f}'.format(test.macro_f1),
'precision': '{0:.2f}'.format(test.macro_precision),
'recall': '{0:.2f}'.format(test.macro_recall),
}
return {
'id': instance.datatxt_id,
'name': instance.name,
'data': instance.json_model,
'testing_task': instance.testing_task_id,
'last_test': last_test,
}
def create(self, validated_data):
datatxt_id = self.create_on_datatxt(validated_data.get('json_model'))
validated_data['datatxt_id'] = datatxt_id
return ClassifierModel.objects.create(**validated_data)
def update(self, instance, validated_data, init_dt=False):
new_data = validated_data.get('json_model')
if init_dt:
datatxt_id = self.create_on_datatxt(new_data)
instance.datatxt_id = datatxt_id
else:
self.update_on_datatxt(
instance.datatxt_id, new_data
)
instance.json_model = json.loads(new_data)
if validated_data.get('name') is not None:
instance.name = validated_data.get('name')
instance.save()
return instance
|
# Generated by Django 2.2 on 2019-10-19 17:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0008_pedidos_finalizado'),
]
operations = [
migrations.AddField(
model_name='carro',
name='activo',
field=models.BooleanField(default=True),
),
]
|
"""
Localiza el número mas pequeño de una serie de números introducidos por el usuario
"""
numeros_usuario = []
comprobacion= ""
while len(numeros_usuario) < 10:
while not comprobacion.isdigit():
comprobacion=(input("Dime un número: "))
numeros_usuario.append(comprobacion)
comprobacion=""
numero_pequeño=numeros_usuario[0]
for num in numeros_usuario:
if num<numero_pequeño:
numero_pequeño=num
print("El número más pequeño es: {}".format(numero_pequeño))
|
import io
import sys
import unittest
from logic.parking import Parking
class TestParking(unittest.TestCase):
def setUp(self):
self.parking_obj = Parking()
self.parking_obj2 = Parking()
self.parking_obj2.create_parking_lot(2)
self.parking_obj2.park("MH14GN5463", "blue")
self.parking_obj2.park("MH14GN5463", "blue")
self.capturedOutput = io.StringIO() # capture stdout
sys.stdout = self.capturedOutput
def tearDown(self): pass
def test_create_parking_lot(self):
self.parking_obj.create_parking_lot(5)
sys.stdout = sys.__stdout__
self.capturedOutput.getvalue()
self.assertEqual(self.capturedOutput.getvalue(), 'Created a parking lot with 5 slots\n')
def test_create_parking_lot_already_created(self):
self.parking_obj2.create_parking_lot(2)
sys.stdout = sys.__stdout__
self.capturedOutput.getvalue()
self.assertEqual(self.capturedOutput.getvalue(),
'Parking Lot already created\n')
def test_create_parking_lot_incorrect_int(self):
self.parking_obj.create_parking_lot(-10)
sys.stdout = sys.__stdout__
self.capturedOutput.getvalue()
self.assertEqual(self.capturedOutput.getvalue(),
'Number of slots provided is incorrect.\n')
def test_create_parking_lot_incorrect_return(self):
self.assertEqual(self.parking_obj.create_parking_lot(2), None)
def test_get_nearest_available_slot(self):
self.parking_obj2.leave(2)
free_lot_obj = self.parking_obj2.get_nearest_available_slot()
self.assertEqual(free_lot_obj.slot_no, 2)
def test_park(self):
self.parking_obj.create_parking_lot(3)
self.parking_obj.park("MH14GN5463", "blue")
sys.stdout = sys.__stdout__
self.capturedOutput.getvalue()
self.assertEqual(self.capturedOutput.getvalue(),
'Created a parking lot with 3 slots\nAllocated slot number: 1\n')
def test_park_lot_full(self):
self.parking_obj2.park("test", 'blue')
sys.stdout = sys.__stdout__
self.assertEqual(self.capturedOutput.getvalue(),
'Sorry, parking lot is full.\n')
def test_leave(self):
self.parking_obj2.leave(2)
sys.stdout = sys.__stdout__
self.assertEqual(self.capturedOutput.getvalue(),
'Slot number 2 is free\n')
def test_leave_not_exists_slot(self):
self.parking_obj2.leave(4)
sys.stdout = sys.__stdout__
self.assertEqual(self.capturedOutput.getvalue(),
'Sorry, slot number does not exist in the parking lot.\n')
def test_leave_not_exists_car(self):
self.parking_obj2.leave(2)
self.parking_obj2.leave(2)
sys.stdout = sys.__stdout__
self.assertEqual(self.capturedOutput.getvalue(),
'Slot number 2 is free\nNo car is present at slot number 2\n')
def test_status(self):
self.parking_obj2.status()
sys.stdout = sys.__stdout__
self.assertEqual(self.capturedOutput.getvalue(),
'SlotNo \t RegistrationNo \t Colour\n1\tMH14GN5463\tblue\n2\tMH14GN5463\tblue\n')
def test__pre_checks(self):
self.parking_obj._pre_checks()
sys.stdout = sys.__stdout__
self.assertEqual(self.capturedOutput.getvalue(), 'Parking Lot not created\n')
def test_registration_numbers_for_cars_with_colour(self):
self.parking_obj2.registration_numbers_for_cars_with_colour('blue')
sys.stdout = sys.__stdout__
self.assertEqual(self.capturedOutput.getvalue(), 'MH14GN5463MH14GN546\n')
def test_registration_numbers_for_cars_with_colour_not_found(self):
self.parking_obj2.registration_numbers_for_cars_with_colour('blue1')
sys.stdout = sys.__stdout__
self.assertEqual(self.capturedOutput.getvalue(), 'Not found\n')
def test_slot_numbers_for_cars_with_colour(self):
self.parking_obj2.slot_numbers_for_cars_with_colour('blue')
sys.stdout = sys.__stdout__
self.assertEqual(self.capturedOutput.getvalue(), '1 2\n')
def test_slot_number_for_registration_number(self):
self.parking_obj2.slot_number_for_registration_number('MH14GN5463')
sys.stdout = sys.__stdout__
self.assertEqual(self.capturedOutput.getvalue(), '1\n')
if __name__ == '__main__':
unittest.main() |
# 백화점 고객의 구매 데이터
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, RobustScaler,MinMaxScaler
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from imblearn.under_sampling import *
import warnings
warnings.filterwarnings("ignore")
x_train = pd.read_csv('C:/Users/hyelim/Desktop/X_train.csv',encoding='cp949')
x_test = pd.read_csv('C:/Users/hyelim/Desktop/X_test.csv',encoding='cp949')
y_train = pd.read_csv('C:/Users/hyelim/Desktop/y_train.csv',encoding='cp949')
df = pd.merge(x_train,y_train, how='inner',on='cust_id')
#0이 여자, 1이 남자
sns.countplot('gender',data=y_train)
df.info()
df.isnull().sum()
df['환불금액'] = df['환불금액'].fillna(0)
df.loc[df['환불금액'] != 0, '환불금액'] = 1
x_test['환불금액'] = x_test['환불금액'].fillna(0)
x_test.loc[x_test['환불금액'] != 0, '환불금액'] = 1
fig, ax = plt.subplots(figsize=(10,5))
sns.countplot(x='환불금액',data = df, hue = df['gender'],palette='GnBu',ax=ax)
plt.show()
t_list = df['주구매상품'].unique().tolist()
df['주구매상품'] = df['주구매상품'].map(lambda x : t_list.index(x))
t_list = df['주구매지점'].unique().tolist()
df['주구매지점'] = df['주구매지점'].map(lambda x : t_list.index(x))
t_list = x_test['주구매상품'].unique().tolist()
x_test['주구매상품'] = x_test['주구매상품'].map(lambda x : t_list.index(x))
t_list = x_test['주구매지점'].unique().tolist()
x_test['주구매지점'] = x_test['주구매지점'].map(lambda x : t_list.index(x))
x = df.drop(['gender'],axis=1)
y = df['gender']
x = RobustScaler().fit_transform(x)
features = ['cust_id', '총구매액', '최대구매액', '환불금액', '주구매상품', '주구매지점', '내점일수', '내점당구매건수','주말방문비율', '구매주기']
x = pd.DataFrame(x, columns=features)
y = pd.DataFrame(y,columns=['gender'])
df_train = pd.concat([x,y],axis=1)
df_train.head()
X=df_train.drop(['gender'],axis=1)
y=df_train['gender']
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.20,random_state=10,shuffle=True)
print(X_train.shape)
print(X_test.shape)
#모델링 함수
def modeling(model,X_train,X_test,y_train,y_test):
model.fit(X_train,y_train)
pred = model.predict(X_test)
metrics(y_test,pred)
#정확도, 정밀도, 재현율, f1-score, auroc 확인
def metrics(y_test,pred):
accuracy = accuracy_score(y_test,pred)
precision = precision_score(y_test,pred)
recall = recall_score(y_test,pred)
f1 = f1_score(y_test,pred)
roc_score = roc_auc_score(y_test,pred,average='macro')
print('정확도 : {0:.2f}, 정밀도 : {1:.2f}, 재현율 : {2:.2f}'.format(accuracy,precision,recall))
print('f1-score : {0:.2f}, auc : {1:.3f}'.format(f1,roc_score))
#랜덤포레스트
rfc = RandomForestClassifier(n_estimators=500,random_state=30, max_depth=10,n_jobs=-1)
modeling(rfc,X_train,X_test,y_train,y_test)
#lightgbm
lgb = LGBMClassifier(n_estimators=1000,num_leaves=64,n_jobs=-1,boost_from_average=False,random_state=10)
modeling(lgb,X_train,X_test,y_train,y_test)
y_pred = xgb.predict(x_test)
r = rfc.predict_proba(x_test)[:,1]
ids = x_test['cust_id']
output = pd.DataFrame({'cust_id' : ids, 'gender': r})
output.to_csv('C:/Users/hyelim/Desktop/test_predictions.csv', index = False)
output.head() |
def to_n_bits(input, input_bits = 8, output_bits = 5):
"""
Convert an array of N-bits integer into an array of N'-bits integers
"""
carry = 0
bits_count = 0
output = []
for number in input:
carry = carry << input_bits
carry += number
bits_count += input_bits
while bits_count >= output_bits:
number = (carry >> (bits_count - output_bits))
output.append(number)
carry -= (number << bits_count - output_bits)
bits_count -= output_bits
if bits_count and output_bits > bits_count:
output.append(carry << (output_bits - bits_count))
return bytes(output)
def _convertbits(data, frombits, tobits, pad=True):
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = bytearray()
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad and bits:
ret.append((acc << (tobits - bits)) & maxv)
return ret |
import z
import zen
import dask_help
import csv
import util
from sortedcontainers import SortedSet
def lowSale():
z.getStocks.devoverride = "ITOT"
dask_help.convertToDask.directory = "history"
dask_help.createRollingData.dir = "historyCalculated"
savedlow = dict()
sorts = SortedSet()
for astock in z.getStocks():
path = z.getPath("{}/{}.csv".format("historical", astock))
for row in csv.DictReader(open(path)):
closep = float(row['Close'])
if closep < 10.0:
data = util.getLiveData(astock, andkey='sharesOutstanding')
savedlow[astock] = data
if float(data[0]) > 0.0:
sorts.add((data[0]*data[1], astock))
count = int(len(sorts)/12)
print("count : {}".format( count ))
z.setp(savedlow,"savedlow")
z.setp(sorts,"savedlowsorts")
print("sorts: {}".format( sorts))
zen.whatAboutThese(sorts[:count])
zen.whatAboutThese(sorts[-1*count:])
#lowSale()
def sortedEtfPrice():
z.online.online = False
stocks = z.getStocks("IVV|IUSG")
z.getStocks.devoverride = "ITOT"
sorts = SortedSet()
for astock in stocks:
price = zen.getPrice(astock)
if price:
sorts.add((price, astock))
zen.whatAboutThese(sorts[-16:])
zen.whatAboutThese(sorts[:16])
def sortedDropPrice():
z.getStocks.devoverride = "ITOT"
dask_help.convertToDask.directory = "history"
dask_help.createRollingData.dir = "historyCalculated"
zen.setDropRanking()
latestdrop = z.getp("{}latestdrop".format(z.getStocks.devoverride))
zen.whatAboutThese(latestdrop[-15:])
#sortedDropPrice()
#sortedEtfPrice()
import os
import pandas
def marketCapSort():
outname = "ITOT_total_mcsorted"
outd = z.getp(outname)
path = z.getPath("analysis/mc.csv")
with open(path, "w") as f:
for item in reversed(outd):
astock = item[1]
etfc = util.getEtfQualifications(astock)
f.write("{},{},{}\n".format(astock, item[0], etfc))
# cols = ["mc", "etfs"]
# df = pandas.DataFrame.from_dict(dicti, orient = 'index', columns=cols)
# df.to_csv(path)
## print(outs[-5:])
# print(outs[:5])
#marketCapSort()
# for astock in z.getStocks("ITOT"):
# mc = mcsets[astock]
# path = z.getPath("{}/{}.csv".format("historical", astock))
# for row in csv.DictReader(open(path)):
|
# coding: utf-8
"""
To use this backend,
1. create /var/opt/mallet/{tool,data}
2. download mallet to /var/opt/mallet/tool
3. put dictionary file to /var/opt/mallet/data
"""
def retrain(data):
"""
/var/opt/mallet/
|- tool/
|- data/
|- dictionary
|- 11397283704/
|- text
|- data
|- model
|- 11397283928/
"""
MALLET_BIN = '/var/opt/mallet/tool/bin/mallet'
MALLET_DATA_DIR = '/var/opt/mallet/data'
import subprocess
from os.path import abspath, dirname, join
import sys
sys.path.insert(0, join(dirname(dirname(abspath(__file__))), 'modules'))
import utils
sys.path.pop(0)
latest_dir = utils.get_latest_dir(MALLET_DATA_DIR)
new_dir = utils.create_child_dir(MALLET_DATA_DIR)
text_path = '{}/text'.format(new_dir)
data_path = '{}/data'.format(new_dir)
model_path = '{}/model'.format(new_dir)
dict_path = '{}/dictionary'.format(MALLET_DATA_DIR)
with open('{}/text'.format(new_dir), 'w') as f:
for chars in data:
f.write(''.join(map(lambda d: chr(int(d)), chars)) + '\n')
utils.execute_cmd("cp {} {}".format(dict_path, new_dir))
dict_path = '{}/dictionary'.format(new_dir)
utils.execute_cmd((
"{bin} import-file --input {input} --output {output} --token-regex '[\p{{L}}\p{{P}}]+' "
"--keep-sequence --remove-stopwords --use-pipe-from {dictionary} "
).format(
bin=MALLET_BIN,
input=text_path,
output=data_path,
dictionary=dict_path))
utils.execute_cmd((
"{bin} train-topics --input {input} --num-topics 10 --output-model {model} "
"--num-iterations 1000 --show-topics-interval 1000000 {base_model}"
).format(
bin=MALLET_BIN,
input=data_path,
model=model_path,
base_model=('' if latest_dir is None else
'--input-model {}/model'.format(latest_dir))))
import time
time.sleep(10)
utils.commit_dir(new_dir)
def fetch():
pass
def main(rpc_service,
continuum_host='localhost',
continuum_port=7001,
redis_host='localhost',
redis_port=6379,
backend_name='mallet',
backend_version='1.0',
backend_module='mallet_entries',
app_name='mallet-app',
policy_name='NaiveBestEffortPolicy',
input_type='doubles',
params={},
**kwargs):
params['alpha'] = params['alpha'] if 'alpha' in params else 24.0
params['beta'] = params['beta'] if 'beta' in params else 12000.0
rpc_service.start(continuum_host, continuum_port, backend_name, backend_version, redis_host, \
redis_port, backend_module, app_name, policy_name, input_type, params)
|
from django.contrib import admin
from .models import *
# Register your models here.
def created_by(obj):
return "%s" % obj.created_by.username
@admin.register(NewsText)
class NewsTextAdmin(admin.ModelAdmin):
list_display = ('title', 'created_at', created_by)
@admin.register(Happening)
class HappeningAdmin(admin.ModelAdmin):
list_display = ('title', 'created_at', created_by)
created_by.short_description = "Created By"
|
from panda3d.core import LineSegs, Vec4, Point3, TextNode, Vec3
# Widget that shows which direction the camera is looking in the 3D viewport.
class ViewportGizmo:
def __init__(self, vp):
self.vp = vp
axes = self.vp.getGizmoAxes()
self.np = self.vp.a2dBottomLeft.attachNewNode("viewAxisWidget")
self.np.setScale(0.14)
self.np.setPos(0.19, 0, 0.19)
if 0 in axes:
# X line
self.xNp = self.makeGizmoAxis(0, "X", 1.2)
if 1 in axes:
# Y line
self.yNp = self.makeGizmoAxis(1, "Y")
if 2 in axes:
# Z line
self.zNp = self.makeGizmoAxis(2, "Z")
def cleanup(self):
self.vp = None
if hasattr(self, 'xNp'):
self.xNp.removeNode()
self.xNp = None
if hasattr(self, 'yNp'):
self.yNp.removeNode()
self.yNp = None
if hasattr(self, 'zNp'):
self.zNp.removeNode()
self.zNp = None
self.np.removeNode()
self.np = None
def makeGizmoAxis(self, axis, text, textOffset = 1.1):
color = Vec4(0, 0, 0, 1)
color[axis] = 1
pos = Vec3(0, 1, 0)
if axis == 1:
pos[1] = -pos[1]
if axis == 1:
textOffset = -textOffset
direction = Vec3(0)
direction[axis] = 1
segs = LineSegs()
segs.setColor(color)
segs.moveTo(Point3(0))
segs.drawTo(pos)
np = self.np.attachNewNode(segs.create())
np.lookAt(direction)
tn = TextNode('gizmoAxis%iText' % axis)
tn.setTextColor(color)
tn.setAlign(TextNode.ACenter)
tn.setText(text)
tnnp = np.attachNewNode(tn.generate())
tnnp.setY(textOffset)
tnnp.setBillboardPointEye()
tnnp.setScale(0.5)
return np
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import NaoCreator.SpeechToText.nao_listen as Nl
from NaoCreator.setting import *
def test_naolisten():
Setting.naoSpeech.say("Test d'une reponse courte !")
Setting.naoSpeech.say("Test de se que tu ma dit {}".format(Nl.nao_listen()))
if __name__ == '__main__':
test_naolisten()
|
from marshmallow import Schema, fields
from bitcoin_acks.data_schemas.project_schema import ProjectCardsSchema
class AuthorSchema(Schema):
avatarUrl = fields.Url()
login = fields.Str()
url = fields.Url()
class CommentSchema(Schema):
author = fields.Nested(AuthorSchema, allow_none=True)
bodyText = fields.Str()
id = fields.Str()
publishedAt = fields.DateTime()
url = fields.Url()
class CommentsSchema(Schema):
nodes = fields.Nested(CommentSchema, many=True)
totalCount = fields.Int()
class StatusContextSchema(Schema):
description = fields.Str()
class StatusSchema(Schema):
contexts = fields.Nested(StatusContextSchema, many=True)
state = fields.Str()
class CommitSchema(Schema):
oid = fields.Str()
pushedDate = fields.Str(allow_none=True)
status = fields.Nested(StatusSchema, allow_none=True)
class PullRequestCommitSchema(Schema):
commit = fields.Nested(CommitSchema)
class PullRequestCommitsSchema(Schema):
nodes = fields.Nested(PullRequestCommitSchema, many=True)
totalCount = fields.Int()
class LabelSchema(Schema):
color = fields.Str()
id = fields.Str()
name = fields.Str()
class LabelsSchema(Schema):
nodes = fields.Nested(LabelSchema, many=True)
totalCount = fields.Int()
class TimelineItemSchema(Schema):
typename = fields.Str(data_key='__typename')
createdAt = fields.Str()
projectColumnName = fields.Str()
class TimelineItemsSchema(Schema):
nodes = fields.Nested(TimelineItemSchema, many=True)
class PullRequestSchema(Schema):
additions = fields.Int()
author = fields.Nested(AuthorSchema, allow_none=True)
bodyText = fields.Str()
closedAt = fields.DateTime(allow_none=True)
comments = fields.Nested(CommentsSchema)
reviews = fields.Nested(CommentsSchema)
headRefOid = fields.Str()
commits = fields.Nested(PullRequestCommitsSchema)
createdAt = fields.DateTime()
deletions = fields.Int()
id = fields.Str()
labels = fields.Nested(LabelsSchema)
mergeable = fields.Str()
mergedAt = fields.DateTime(allow_none=True)
number = fields.Int()
projectCards = fields.Nested(ProjectCardsSchema)
state = fields.Str()
title = fields.Str()
updatedAt = fields.DateTime()
timelineItems = fields.Nested(TimelineItemsSchema)
|
from marshmallow_sqlalchemy import ModelSchema, ModelSchemaOpts
from models import *
from marshmallow import fields, Schema
from db import db
from marshmallow_util import AppModelConverter
from marshmallow_sqlalchemy import field_for
class BaseOpts(ModelSchemaOpts):
def __init__(self, meta, *args, **kwargs):
if not hasattr(meta, 'sqla_session'):
meta.sqla_session = db.session
super(BaseOpts, self).__init__(meta, *args, **kwargs)
class AppModelSchema(ModelSchema):
OPTIONS_CLASS = BaseOpts
class ProfileSchema(AppModelSchema):
class Meta:
model = Profile
include_fk = True
ordered = True
class TagCategoriesSchema(AppModelSchema):
task_tag = fields.Nested('TaskTagSchema', many=True)
class Meta:
model = TagCategories
include_fk = True
ordered = True
class PlanetSequenceSchema(AppModelSchema):
parent = fields.Nested('PlanetSequenceSchema')
planet = fields.Nested('PlanetSchema')
profile = fields.Nested('ProfileSchema')
task = fields.Nested('TaskSchema')
planet_color = fields.Nested('PlanetColorSchema')
class Meta:
model = PlanetSequence
include_fk = True
ordered = True
class PlanetSequenceSchemaWithChildren(ModelSchema):
children = fields.Nested('PlanetSequenceSchema', many=True)
parent = fields.Nested('PlanetSchema')
planet = fields.Nested('PlanetSchema')
profile = fields.Nested('ProfileSchema')
task = fields.Nested('TaskSchema')
planet_color = fields.Nested('PlanetColorSchema')
class Meta:
model = PlanetSequence
include_fk = True
ordered = True
class PlanetSchema(AppModelSchema):
class Meta:
model = Planet
include_fk = True
ordered = True
class TaskSchema2(AppModelSchema):
class Meta:
model = Task
include_fk = True
ordered = True
class TaskTagSchema(AppModelSchema):
class Meta:
model = TaskTag
include_fk = True
ordered = True
class PriorityLevelSchema(AppModelSchema):
class Meta:
model = PriorityLevel
include_fk = True
ordered = True
class PlanetBreadcrumbSchema(ModelSchema):
parent = fields.Nested('PlanetBreadcrumbSchema')
planet = fields.Nested('PlanetSchema')
profile = fields.Nested('ProfileSchema')
task = fields.Nested('TaskSchema')
planet_color = fields.Nested('PlanetColorSchema')
class Meta:
model = PlanetSequence
include_fk = True
ordered = True
class TestSchema(Schema):
type = fields.String()
name = fields.String()
body = fields.String()
class PlanetBreadcrumbSchemas(Schema):
children = fields.Nested('PlanetBreadcrumbSchemas', many=True)
planet = fields.Nested('PlanetSchema')
profile = fields.Nested('ProfileSchema')
task = fields.Nested('TaskSchema')
planet_color = fields.Nested('PlanetColorSchema')
class Meta:
model = PlanetSequence
include_fk = True
ordered = True
class TaskSchema(AppModelSchema):
planet_image = fields.Nested('PlanetImageSchema', many=True)
task_tag = fields.Nested('TaskTagSchema', many=True)
class Meta:
model = Task
include_fk = True
ordered = True
class PlanetColorSchema(AppModelSchema):
class Meta:
model = PlanetColor
include_fk = True
ordered = True
class PlanetImageSchema(AppModelSchema):
class Meta:
model = PlanetImage
include_fk = True
ordered = True
|
""" Script to get the energy and hittime distribution of the prompt signal of preselected events of atmospheric
NC neutrino background that are simulated with JUNO detector simulation.
1. read only the preselected events (preselection done with script preselection_detsim_user.py and saved in folder
/home/astro/blum/juno/atmoNC/data_NC/output_preselection/preselection_detsim/ in files evtID_preselected_{}.txt)
2. Calculate hittime distribution (with time-of-flight correction and PMT time resolution) for each event:
Procedure to get the hittime distribution with vertex reconstruction and time smearing of PMTs (same procedure
like in script hittime_distribution_positron.py):
2.1. calculate time of flight:
2.1.1 for every photon, that hits a PMT (20inch and 3inch), take the PMT position (via PMT ID from file
PMT_position.root) and calculate the time-of-flight distance with the reconstructed position from
file evtID_preselected_{}.txt
2.1.2 with the time-of-flight distance, calculate the time-of-flight of this photon from production to
PMT by considering an effective speed of light in the LS.
2.2. consider TTS of PMTs:
2.2.1 for every photon, that hits a PMT (20inch and 3inch), take the time resolution (sigma) of the PMT
(via PMT ID either from file PmtData.root for the 20inch PMTs or set TTS = 5 ns for 3inch PMTs.)
2.2.2 the TTS of the PMT is FWHM. Therefore calculate sigma from TTS (FWHM = 2*sqrt(2*ln(2)) * sigma).
2.2.3 smear hittime of detsim with gaussian of sigma (time resolution) around the value of detsim hittime
to get the smeared hittime
2.3. for every photon, calculate the 'real' hittime (= smeared hittime - time_of_flight) and store it in
array
2.4. Do points 2.1 to 2.3 for every photon. Then you get the correct hittime of this event. Build histogram
with correct hittimes.
3. Take the prompt signal of the corrected hittime histogram and do a cut on the prompt signal:
use function conversion_npe_to_evis() and convert the number of pe of the prompt signal to visible energy in
MeV and do a cut on the visible energy (10 MeV to 100 MeV).
Only analyze events further that pass prompt energy cut.
4. Analyze delayed signal:
5. Save the hittime histogram of the prompt signal to txt file and png file for further analysis with script
pulse_shape_analysis.py
6. Save number of pe of the prompt signal and number of pe of delayed signal of each event in txt file.
"""
import datetime
import ROOT
import sys
import NC_background_functions
import numpy as np
from matplotlib import pyplot as plt
# get the date and time, when the script was run:
date = datetime.datetime.now()
now = date.strftime("%Y-%m-%d %H:%M")
""" set the number of the first file and number of the last file that should be read: """
start_number = 0
stop_number = 999
# number of entries in the input files:
Number_entries_input = 100
# set the path of the input root files:
input_path_root = "/local/scratch1/pipc51/astro/blum/detsim_output_data/"
# set the path of evtID_preselected_{}.txt files:
input_path_preselect = "/home/astro/blum/juno/atmoNC/data_NC/output_preselection/preselection_detsim/"
# set the path of the output, where the txt file with the number of pe of each preselected events is saved:
output_path = "/home/astro/blum/juno/atmoNC/data_NC/output_detsim/"
""" define time window and bin width: """
# set time window of whole signal in ns:
min_time = -50
max_time = 1000000
# set time in ns, where the prompt signal should be 0:
time_limit_prompt = 500
# Set bin-width of hittime histogram in ns:
binwidth = 5.0
""" parameters for prompt energy cut: """
# minimal visible energy of prompt signal in MeV:
min_energy = 10.0
# maximal visible energy of prompt signal in MeV:
max_energy = 100.0
# preallocate number of events that are rejected by prompt energy cut:
number_rejected_prompt_cut = 0
# preallocate number of events where nPE of prompt signal is below min_energy:
number_rejected_prompt_cut_min = 0
# preallocate number of events where nPE of prompt signal is above max_energy:
number_rejected_prompt_cut_max = 0
""" thresholds and cuts for delayed signal: """
# Set threshold of number of PE per bin for possible delayed signal (bin-width = 5 ns):
threshold1_del = 50
# set threshold2 of number of PEs per bin (signal peak is summed as long as nPE is above threshold2):
threshold2_del = 0
# min and max number of PE for delayed energy cut (from check_delayed_energy.py):
min_PE_delayed = 2805.53
max_PE_delayed = 3731.04
# preallocate number of events that are rejected by delayed energy cut:
number_rejected_delayed_energy_cut = 0
# preallocate array, where npe of delayed signal, that wouldn't pass the delayed energy cut are saved:
number_pe_delayed_rejected_array = np.array([])
""" load position of the PMTs and corresponding PMT ID from file PMT_position.root: """
file_PMT_position = "/home/astro/blum/juno/atmoNC/PMT_information/PMT_position.root"
# array with PMT ID and corresponding x, y, z position in mm:
pmtID_pos_file, x_pos_pmt, y_pos_pmt, z_pos_pmt = NC_background_functions.get_pmt_position(file_PMT_position)
""" load 'time resolution' in ns of the 20 inch PMTs and corresponding PMT ID from file PmtData.root: """
file_PMT_time = "/home/astro/blum/juno/atmoNC/PMT_information/PmtData.root"
# array with PMT ID and corresponding sigma in ns:
pmtID_time_file, sigma_time_20inch = NC_background_functions.get_20inchpmt_tts(file_PMT_time)
# set TTS (FWHM) of the 3inch PMTs in ns:
tts_3inch = 5.0
# calculate time resolution (sigma) for the 3inch PMTs in ns:
sigma_time_3inch = tts_3inch / (2 * np.sqrt(2 * np.log(2)))
# set effective speed of light in the liquid scintillator in mm/ns (see page 7 of c_effective_JUNO-doc-3144-v2.pdf in
# folder /home/astro/blum/PhD/paper/Pulse_Shape_Discrimination/). Effective refraction index in LS n_eff = 1.54.
# c/n_eff = 299792458 m / 1.54 s ~ 194670427 m/s = 194670427 * 10**(-6) mm/ns ~ 194.67 mm/ns:
c_effective = 194.67
# loop over the files:
for index in range(start_number, stop_number+1, 1):
# read evtID_preselected_{}.txt file:
evtID_pre_arr, x_reco_arr, y_reco_arr, z_reco_arr = np.loadtxt(input_path_preselect + "evtID_preselected_{0:d}.txt"
.format(index), unpack=True)
# load user_atmoNC_index.root file:
rfile = ROOT.TFile(input_path_root + "user_atmoNC_{0:d}.root".format(index))
print("... read {0}...".format(rfile))
# get the "evt"-TTree from the TFile:
rtree_evt = rfile.Get("evt")
# get the number of events in the 'evt' Tree:
number_events_evt = rtree_evt.GetEntries()
# check number of events:
if number_events_evt != Number_entries_input:
sys.exit("ERROR: number of events in root file ({0:d}) != {1:d}"
.format(number_events_evt, Number_entries_input))
# preallocate array, where total nPE of prompt signal per event for one file is saved:
number_pe_total = np.array([])
# preallocate array, where total nPE of delayed signal per event for one file is saved:
number_pe_total_del = np.array([])
# loop over the length of evtID_pre_arr and read only the preselected events from the root file:
for index1 in range(len(evtID_pre_arr)):
# get evtID of preselected event:
event_id = int(evtID_pre_arr[index1])
# get event of 'evt'-tree:
rtree_evt.GetEntry(event_id)
# get evtID of the tree and compare with evtID of evtID_pre_arr:
evt_id = int(rtree_evt.GetBranch('evtID').GetLeaf('evtID').GetValue())
if evt_id != event_id:
sys.exit("ERROR: evtID of tree ({0:d}) != evtID of evtID_preselected.txt ({1:d})".format(evt_id, event_id))
# print("\nanalyze event {0:d}".format(evt_id))
""" calculate the real hittime distribution (time of flight correction with reconstructed position and time
smearing with TTS for each hit): """
# get number of photons of this event:
n_photons = int(rtree_evt.GetBranch('nPhotons').GetLeaf('nPhotons').GetValue())
# preallocate empty array to build default hittime-histogram:
hittime_array = []
# loop over every photon in the event:
for index2 in range(n_photons):
# get nPE for this photon:
n_pe = int(rtree_evt.GetBranch('nPE').GetLeaf('nPE').GetValue(index2))
# check, if photon produces only 1 PE:
if n_pe != 1:
print("{1:d} PE for 1 photon in event {0:d} in file user_atmoNC_{2:d}.root"
.format(event_id, n_pe, index))
# get the pmtID of the hit PMT:
pmtID = int(rtree_evt.GetBranch('pmtID').GetLeaf('pmtID').GetValue(index2))
# get hittime of this photon:
hit_time = float(rtree_evt.GetBranch('hitTime').GetLeaf('hitTime').GetValue(index2))
# get position of the PMT with specific pmtID (pmtID is ascending number from 0 to 17738 (17739 large PMTs)
# and from 300000 to 336571 (36572 small PMTs)).
# For large PMTs -> For 20inch PMTs, the pmtID is equal to index of x,y,z_pos_pmt array.
# For small PMTs -> For 3inch PMTs, the pmtID - (300000 - 17739) is equal to index of x,y,z_pos_pmt array.
# check if PMT is 20 inch or 3inch (pmtID < 50000 means 20inch PMT):
if pmtID < 50000:
# 20inch PMT:
# get PMT position in mm from arrays:
x_pmt = x_pos_pmt[pmtID]
y_pmt = y_pos_pmt[pmtID]
z_pmt = z_pos_pmt[pmtID]
else:
# 3inch PMT:
# calculate index of pos_pmt array that correspond to pmtID of 3inch PMTs (for example:
# first small PMT: 300000-282261 = 17739, last small PMT: 336571-282261 = 54310)
index_3inch = pmtID - 282261
# get PMT position in mm from arrays:
x_pmt = x_pos_pmt[index_3inch]
y_pmt = y_pos_pmt[index_3inch]
z_pmt = z_pos_pmt[index_3inch]
# calculate distance between reconstructed position of event and position of PMT (in mm):
distance_tof = np.sqrt((x_reco_arr[index1] - x_pmt)**2 + (y_reco_arr[index1] - y_pmt)**2 +
(z_reco_arr[index1] - z_pmt)**2)
# calculate time of flight in ns:
time_of_flight = distance_tof / c_effective
""" time resolution of PMT: """
# get time resolution of PMT with specific pmtID (pmtID is ascending number from 0 to 17738 (17739 large
# PMTs)) -> For 20inch PMTs, the pmtID is equal to index of sigma_time_20inch array.
# check if PMT is 20 inch or 3inch (pmtID < 50000 means 20inch PMT):
if pmtID < 50000:
# 20inch PMT:
# get time resolution (sigma) of PMT in ns from array:
sigma_pmt = sigma_time_20inch[pmtID]
else:
# 3inch PMT:
sigma_pmt = sigma_time_3inch
# consider time resolution of PMT by generating normal distributed random number with mu = hit_time and
# sigma = sigma_pmt (only the hit_time at the PMT must be smeared, not the time-of-flight):
hittime_tts = np.random.normal(hit_time, sigma_pmt)
""" calculate the 'real' hittime of the photon in ns: """
hittime_real = hittime_tts - time_of_flight
if hittime_real < min_time:
print("------")
print(hittime_real)
print(pmtID)
print(sigma_pmt)
# append hittime to array:
hittime_array.append(hittime_real)
""" analyze prompt signal: """
# build histogram, where hittimes are saved:
# set bin-edges of hittime histogram in ns:
bins_hittime = np.arange(min_time, max_time + 2 * binwidth, binwidth)
# build hittime histogram:
npe_per_hittime, bin_edges_hittime = np.histogram(hittime_array, bins_hittime)
# get index of bins_hittime corresponding to min_time (should be index = 0):
index_min_hittime_prompt = 0
# Where does prompt signal end?
# get index of bins_hittime corresponding to time_limit_prompt
index_time_limit_prompt = int((time_limit_prompt + np.abs(min_time)) / binwidth)
# check if npe_per_hittime (and the following two bins) are 0 for this index:
if (npe_per_hittime[index_time_limit_prompt] == npe_per_hittime[index_time_limit_prompt+1]
== npe_per_hittime[index_time_limit_prompt+2] == 0):
# prompt signal already 0:
index_max_hittime_prompt = index_time_limit_prompt
else:
# prompt signal not yet 0.
# loop over npe_per_hittime from index_time_limit_prompt until npe_per_hittime (and the following two bins)
# are 0:
for index3 in range(index_time_limit_prompt, index_time_limit_prompt+200):
if npe_per_hittime[index3] == npe_per_hittime[index3+1] == npe_per_hittime[index3+2] == 0:
index_max_hittime_prompt = index3
break
# calculate nPE as function of hittime only for prompt time window (from min_hittime_prompt to
# max_hittime_prompt+1, last index should be included):
npe_per_hittime_prompt = npe_per_hittime[index_min_hittime_prompt:index_max_hittime_prompt+1]
# bin edges of hittime histogram only for prompt time window:
bins_hittime_prompt = bin_edges_hittime[index_min_hittime_prompt:index_max_hittime_prompt+1]
# get the minimum and maximum time of the prompt signal time window in ns:
min_time_prompt = bins_hittime_prompt[0]
max_time_prompt = bins_hittime_prompt[-1]
# sum up the values of npe_per_hittime_prompt to get the total number of pe of the prompt signal:
number_pe_prompt = np.sum(npe_per_hittime_prompt)
# convert the total number of pe to quenched deposited energy in MeV:
quenched_deposit_energy = NC_background_functions.conversion_npe_to_evis(number_pe_prompt)
# check, if energy is in the correct time window:
if quenched_deposit_energy < min_energy or quenched_deposit_energy > max_energy:
# event is rejected:
number_rejected_prompt_cut += 1
# check further:
if quenched_deposit_energy < min_energy:
# e_vis to small:
number_rejected_prompt_cut_min += 1
elif quenched_deposit_energy > max_energy:
# e_vis too large:
number_rejected_prompt_cut_max += 1
# go to next event:
continue
# append number of pe to array:
number_pe_total = np.append(number_pe_total, number_pe_prompt)
""" analyze delayed signal: """
# INFO-me: nPE of delayed signal is only saved as info -> NO cut is applied to delayed signal in this script!
# print("analyze delayed signal")
# get index of bin_edges_hittime corresponding to the end of the prompt signal window:
index_min_hittime_del = index_max_hittime_prompt
# calculate nPE as function of hittime only for delayed time window (from min_hittime_del to end):
npe_per_hittime_del = npe_per_hittime[index_min_hittime_del:]
# bin edges of hittime histogram only for delayed time window:
bins_hittime_del = bin_edges_hittime[index_min_hittime_del:-1]
# get the minimum and maximum time of the delayed signal time window in ns:
min_time_delayed = bins_hittime_del[0]
max_time_delayed = bins_hittime_del[-1] + binwidth
# preallocate number of possible delayed signals:
number_delayed_signal = 0
# preallocate first index of npe_per_hittime_del:
index_first_del = 0
# preallocate number of pe of delayed signal:
number_pe_delayed = 0
# analyze npe_per_hittime_del for possible delayed signals. As long as number_delayed_signal<2 and as long as
# index has not reached the end of npe_per_hittime_del, check event for possible delayed signals
while number_delayed_signal < 2 and index_first_del < len(npe_per_hittime_del):
number_delayed, index_first_del, num_pe_delayed, begin_pulse, end_pulse = \
NC_background_functions.analyze_delayed_signal(npe_per_hittime_del, bins_hittime_del, index_first_del,
threshold1_del, threshold2_del, min_PE_delayed,
max_PE_delayed, event_id)
number_delayed_signal += number_delayed
number_pe_delayed += num_pe_delayed
if number_delayed_signal != 1:
# 0 or more than one delayed signals that pass the delayed energy cut (-> no or more than one neutron
# capture on hydrogen):
number_rejected_delayed_energy_cut += 1
# append npe of delayed signal, that wouldn't pass delayed energy cut to array:
number_pe_delayed_rejected_array = np.append(number_pe_delayed_rejected_array, number_pe_delayed)
# append number of pe of delayed signal to array:
number_pe_total_del = np.append(number_pe_total_del, number_pe_delayed)
# check number_delayed_signal:
if number_delayed_signal == 0:
print("---------------ERROR: no delayed signal in event {0:d}".format(event_id))
h1 = plt.figure(1)
plt.step(bins_hittime_del, npe_per_hittime_del, label="nPE of peak = {0:.0f}".format(number_pe_delayed))
plt.xlabel("hit-time in ns")
plt.ylabel("number of p.e. per bin (bin-width = {0:0.2f} ns)".format(binwidth))
plt.title("Hit-time distribution of delayed time window of event {0:d}\nNO delayed signal".format(event_id))
plt.xlim(xmin=min_time_delayed, xmax=max_time_delayed)
plt.legend()
plt.grid()
plt.savefig(output_path + "file{1:d}_evt{0:d}_no_delayed_signal.png".format(event_id, index))
plt.close()
# plt.show()
elif number_delayed_signal > 1:
print("+++++++++++++++ERROR: more than one delayed signal in event {0:d}".format(event_id))
h1 = plt.figure(1)
plt.step(bins_hittime_del, npe_per_hittime_del, label="nPE of peak = {0:d}".format(number_pe_delayed))
plt.xlabel("hit-time in ns")
plt.ylabel("number of p.e. per bin (bin-width = {0:0.2f} ns)".format(binwidth))
plt.title("Hit-time distribution of delayed time window of event {0:d}\n"
"More than 1 delayed signals".format(event_id))
plt.xlim(xmin=min_time_delayed, xmax=max_time_delayed)
plt.legend()
plt.grid()
plt.savefig(output_path + "file{1:d}_evt{0:d}_more_than_1_delayed_signal.png".format(event_id, index))
plt.close()
# plt.show()
# save hittime distribution of the IBD-like events to png and txt file:
h2 = plt.figure(2)
plt.step(bins_hittime_prompt, npe_per_hittime_prompt, label="number of pe = {0:d}\n"
"visible energy = {1:.3f} MeV"
.format(number_pe_prompt, quenched_deposit_energy))
plt.xlabel("hit-time in ns")
plt.ylabel("number of p.e. per bin (bin-width = {0:0.2f} ns)".format(binwidth))
plt.title("Hit-time distribution of prompt time window of event {0:d}".format(event_id))
plt.xlim(xmin=min_time_prompt, xmax=max_time_prompt)
plt.legend()
plt.grid()
plt.savefig(output_path + "file{1:d}_evt{0:d}_prompt_signal.png".format(event_id, index))
plt.close()
# plt.show()
# save npe_per_hittime_prompt to txt file:
# build list, where 0th entry is start-hittime in ns, 1st entry is last-hittime in ns, 2nd entry is binwidth in
# ns and the following entries are nPE of each hittime-bin of prompt signal:
npe_per_hittime_prompt_save = [min_time_prompt, max_time_prompt, binwidth]
npe_per_hittime_prompt_save.extend(npe_per_hittime_prompt)
np.savetxt(output_path + "file{0:d}_evt{1:d}_prompt_signal.txt".format(index, event_id),
npe_per_hittime_prompt_save, fmt='%1.2f',
header="Number of pe as function of the hittime of the prompt signal (time-of-flight correction "
"and TTS smearing) of file user_atmoNC_{0:d}.root,"
"\npreselected event {1:d} (analyzed with script prompt_signal_preselected_evts.py, {2}):"
"\ntime window of hittime: from {3:.3f} ns to {4:.3f} ns with bin-width = {5:0.3f} ns,"
"\nEnergy cut on prompt signal is applied: {6:0.1f} MeV <= E_vis <= {7:0.1f} MeV,"
"\nConversion function E_vis = 0.0007475 * nPE:"
.format(index, event_id, now, min_time_prompt, max_time_prompt, binwidth, min_energy, max_energy))
# save array number_pe_total to txt file:
np.savetxt(output_path + "number_pe_file{0:d}.txt".format(index), number_pe_total, fmt="%i",
header="Total number of pe of prompt signal for every preselected event (that pass prompt energy cut) in"
" file user_atmoNC_{0:d}.root\n"
"({1}). Number of pe is analyzed with script prompt_signal_preselected_evts.py.\n"
"Time window of prompt signal is defined from {2:0.2f} ns to around {3:0.2f} ns (number of events"
" = {4:d}).\n"
"Energy cut on prompt signal is applied: {5:0.1f} MeV <= E_vis <= {6:0.1f} MeV,\n"
"Conversion function E_vis = 0.0007475 * nPE:"
.format(index, now, min_time_prompt, time_limit_prompt, len(number_pe_total), min_energy, max_energy))
# save array number_pe_total_del to txt file:
np.savetxt(output_path + "number_pe_delayed_file{0:d}.txt".format(index), number_pe_total_del, fmt="%i",
header="Total number of pe of delayed signal for every preselected event (that pass prompt energy cut) "
"in file user_atmoNC_{0:d}.root\n"
"({1}). Number of pe is analyzed with script prompt_signal_preselected_evts.py.\n"
"Time window of delayed signal is defined from around {2:0.2f} ns to {3:0.2f} ns.\n"
"Threshold of delayed signal is set to {4:0.2f} PE and {5:0.2f} PE (number of events = {6:d}).\n"
"A not very strict energy cut on the delayed signal ({7:0.0f} PE <= nPE delayed <= {8:0.0f} PE) "
"is also applied:"
.format(index, now, time_limit_prompt, max_time_delayed, threshold1_del, threshold2_del,
len(number_pe_total_del), min_PE_delayed, max_PE_delayed))
print("number of events (of preselect. evts) rejected by prompt energy cut = {0:d}".format(number_rejected_prompt_cut))
print("number of events (of preselect. evts) with nPE of prompt signal < min_energy = {0:d}"
.format(number_rejected_prompt_cut_min))
print("number of events (of preselect. evts) with nPE of prompt signal > max_energy = {0:d}"
.format(number_rejected_prompt_cut_max))
print("number of events (of preselect. evts), that would be rejected by delayed energy cut = {0:d}"
.format(number_rejected_delayed_energy_cut))
|
from goods.models import Good
from django.forms import ModelForm
class GoodForm(ModelForm):
class Meta:
model = Good
fields = ["name", "category", "description", "content", "price", "price_acc", "in_stock", "featured", "image"]
|
from django.db import models
from .utils import constants
from .utils.models import CreationModificationDateMixin
from datetime import time
# Create your models here.
class Lote(CreationModificationDateMixin):
"""
Modelo que representa un lote
"""
lote_nro = models.PositiveIntegerField(help_text="ID lote, debe ser unico",
primary_key=True, null=False)
observaciones = models.TextField(max_length=100,
null=True, blank=True)
# fecha_creacion = models.DateTimeField(auto_now_add=True)
# listamos los lotes con el mas reciente primero
class Meta:
ordering = ["-fecha_creacion"]
def __str__(self):
return str(self.lote_nro)
class Barril(CreationModificationDateMixin):
"""
Modelo que representa un lote
"""
barril_nro = models.CharField(max_length=20,
help_text="ID barril, debe ser unico",
primary_key=True, null=False)
observaciones = models.TextField(max_length=100,
null=True, blank=True)
# listamos los lotes con el mas reciente primero
class Meta:
ordering = ["-fecha_creacion"]
def __str__(self):
return str(self.barril_nro)
class MovimientosBarril(CreationModificationDateMixin):
"""
Modelo para registrar movientos de un barrir para un lote determinado
"""
# fecha = models.DateField()
barril = models.ForeignKey(
'Barril', on_delete=models.CASCADE, null=True)
lote = models.ForeignKey(
'Lote', on_delete=models.CASCADE, null=True)
cliente = models.CharField(max_length=20,
help_text="Cliente, obligatorio")
ingresa = models.DateField(null=True, blank=True)
egresa = models.DateField(null=True, blank=True)
estado_devolucion = models.CharField(max_length=20,
choices=constants.DEVOLUCION,
null=True, blank=True)
class Meta:
ordering = ["-fecha_creacion"]
def __str__(self):
mov = "Movimiento de barril " + \
str(self.barril) + " - lote " + str(self.lote)
return mov
# PLANILLA MACERACION COCCION
class SeguimientoMaceracionCoccion(CreationModificationDateMixin):
"""
Primer proceso de la elaboración,
se distingue todo el proceso por un ID unico, el lote_nro
"""
lote = models.OneToOneField(
Lote, on_delete=models.CASCADE, primary_key=True)
#fecha_inicio = models.DateField(null=True, blank=True)
fecha_fin = models.DateField(null=True, blank=True)
observaciones = models.TextField(
max_length=100, null=True, blank=True)
def __str__(self):
return str(f"Planilla de Maceración/Cocción - Lote número {self.lote.lote_nro}")
class Maceracion(models.Model):
"""
Etapa general de maceracion, puede ser batch_nro 1 o 2
"""
NRO_BATCH = (
(1, 1),
(2, 2),
)
batch_nro = models.PositiveIntegerField(choices=NRO_BATCH)
seguimiento_maceracion_coccion = models.ForeignKey(
'SeguimientoMaceracionCoccion', on_delete=models.CASCADE, null=True)
densidad_finalizacion_maceracion = models.FloatField(null=True, blank=True)
densidad_finalizacion_lavado = models.FloatField(null=True, blank=True)
observaciones = models.TextField(
max_length=100, null=True, blank=True)
def __str__(self):
return str(f"Maceracion - Lote {self.seguimiento_maceracion_coccion.lote.lote_nro} - Batch número {self.batch_nro}")
class Correccion(models.Model):
"""
Registro de correccion del PH para una
etapa de maceracion/batch determinada
"""
maceracion = models.ForeignKey(
'Maceracion', on_delete=models.CASCADE, null=True)
inicial = models.FloatField(blank=True, null=True)
acido_fosforico = models.FloatField(null=True, blank=True)
final_maceracion = models.FloatField(null=True, blank=True)
class OllaMaceracion(models.Model):
"""
Clase Registro de datos tomdos de la Olla de Maceracion,
tipo de granos, cantidad en kilogramos y agua (L)
"""
maceracion = models.ForeignKey(
'Maceracion', on_delete=models.CASCADE, null=True)
granos = models.CharField(
max_length=20, help_text="Tipo de grano", blank=True)
cantidad = models.FloatField(
null=True, blank=True, help_text='Cantidad expresada en kilogramos')
agua = models.CharField(
max_length=20, help_text="Litros", null=True, blank=True)
class OllaAguaCaliente(models.Model):
"""
Clase que representa el proceso durante Olla de agua Caliente
"""
maceracion = models.ForeignKey(
'Maceracion', on_delete=models.CASCADE, null=True)
agua_dureza = models.CharField(
max_length=20, help_text="dureza de agua dentro de Olla caliente",
null=True, blank=True)
agua_ph = models.CharField(
max_length=20, help_text="ph del agua dentro Olla caliente",
null=True, blank=True)
filtracion_hora_inicio = models.TimeField(
choices=constants.HORA, null=True, blank=True)
filtracion_temperatura = models.CharField(
max_length=20, null=True, blank=True)
class EtapaOllaAguaCaliente(models.Model):
"""
Etapa perteneciente a la Olla de agua caliente,
por ahora solo existen "empaste" y "maceracion"
"""
olla_agua_caliente = models.ForeignKey(
'OllaAguaCaliente', on_delete=models.CASCADE, null=True)
NOMBRE_ETAPA = (
('empaste', 'Empaste'),
('maceracion', 'Maceracion'),
)
etapa_nombre = models.CharField(max_length=20, choices=NOMBRE_ETAPA,
help_text="etapa nombre, solo puede ser Empaste o Maceracion")
etapa_hora_inicio = models.TimeField(
choices=constants.HORA, null=True, blank=True)
temperatura_R = models.CharField(max_length=10, null=True, blank=True)
temperatura_M = models.CharField(max_length=10, null=True, blank=True)
altura = models.CharField(max_length=10, null=True, blank=True)
agit_rec = models.CharField(
max_length=10, help_text="", null=True, blank=True)
# Proceso de Cocccion
class Coccion(models.Model):
"""
Etapa general de maceracion, puede ser batch_nro 1 o 2
"""
proceso_maceracion_coccion = models.ForeignKey(
'SeguimientoMaceracionCoccion', on_delete=models.CASCADE, null=True)
NRO_BATCH = (
(1, 1),
(2, 2),
)
batch_nro = models.PositiveIntegerField(
choices=NRO_BATCH, help_text="nro de batch correspondiente, puede ser 1 o 2")
densidad_finalizacion_hervor = models.FloatField(null=True, blank=True)
hora_fin_trasiego = models.TimeField(
choices=constants.HORA, null=True, blank=True)
observaciones = models.TextField(
max_length=50, null=True, blank=True)
class EtapaCoccion(models.Model):
"""
Clase que describe una etapa de coccion,
actualmente pueden ser 5, Lavado, Aumento T, Hervor, Reposos y Trasiego
"""
coccion = models.ForeignKey('Coccion', on_delete=models.CASCADE, null=True)
NOMBRE_ETAPA = (
('Lavado', 'Lavado'),
('Aumento T', 'Aumento T'),
('Hervor', 'Hervor'),
('Reposo', 'Reposo'),
('Trasiego', 'Trasiego'),
)
etapa_nombre = models.CharField(max_length=20)
etapa_hora_inicio = models.TimeField(
choices=constants.HORA, null=True, blank=True)
# según planilla, cada etpa solo tiene una adicion
class AdicionCoccion(models.Model):
"""
Clase que representa una adicion durante la etapa de hervor para una
coccion determinada
"""
# etapa_coccion = models.ForeignKey(
# 'EtapaCoccion', on_delete=models.CASCADE, null=True)
coccion = models.ForeignKey('Coccion', on_delete=models.CASCADE, null=True)
tipo = models.CharField(max_length=20, help_text="Tipo de adicion")
gramos = models.PositiveIntegerField(
null=True, blank=True, help_text='Cantidad expresada en gramos')
hora_adicion = models.TimeField(
choices=constants.HORA, null=True, blank=True)
# PLANILLA CONTROL DE FERMENTACION
class ParametrosFundamentales(models.Model):
lote = models.OneToOneField(
Lote, on_delete=models.CASCADE, primary_key=True)
dO = models.CharField(max_length=10, null=True, blank=True)
dF = models.CharField(max_length=10, null=True, blank=True)
alcohol_teorico = models.CharField(max_length=10, null=True, blank=True)
pH_inicial = models.CharField(max_length=10, null=True, blank=True)
pH_final = models.CharField(max_length=10, null=True, blank=True)
observaciones = models.TextField(max_length=100, null=True, blank=True)
class SeguimientoFermentacion(CreationModificationDateMixin):
"""
Proceso de control de Fermentacion,
se distingue todo el proceso por un ID unico, el lote_nro
"""
class Meta:
verbose_name = "Seguimiento de Fermentacion"
verbose_name_plural = "Seguimientos de Fermentacion"
lote = models.OneToOneField(
Lote, on_delete=models.CASCADE, primary_key=True)
#fecha_inicio = models.DateField(null=True, blank=True)
vasija = models.CharField(max_length=10, null=True, blank=True)
fecha_llenado = models.DateField(null=True, blank=True)
litros = models.FloatField(null=True, blank=True)
fecha_inoculacion_levadura = models.DateField(null=True, blank=True)
tipo_levadura = models.CharField(max_length=20, null=True, blank=True)
def __str__(self):
return str(f"Planilla de Control de Fermentación - Lote número {self.lote.lote_nro}")
class InoculacionLevadura(models.Model):
"""
Podría estar incluida en la misma Clase de seguimiento de Fermentación,
pero se separa por su cantidad de atributos
"""
class Meta:
verbose_name = "Inoculación de Levadura"
verbose_name_plural = "Inoculaciones de Levadura"
seguimiento_control_fermentacion = models.OneToOneField(
SeguimientoFermentacion, on_delete=models.CASCADE)
hora = models.TimeField(
choices=constants.HORA, null=True, blank=True)
levadura = models.CharField(max_length=20, null=True, blank=True)
dosis = models.CharField(
help_text="(g/Hl)", max_length=20, null=True, blank=True)
temp_sala = models.CharField(
help_text="grados Centígrados", max_length=10, null=True, blank=True)
temp_mosto = models.CharField(
help_text="grados Centígrados", max_length=10, null=True, blank=True)
densidad = models.CharField(max_length=20, null=True, blank=True)
observaciones = models.TextField(max_length=100, null=True, blank=True)
class RegistroFermentacion(models.Model):
class Meta:
verbose_name = "Registro de Fermentación"
verbose_name_plural = "Registros de Fermentación"
seguimiento_control_fermentacion = models.ForeignKey(
'SeguimientoFermentacion', on_delete=models.CASCADE, null=True)
fecha = models.DateField(null=True, blank=True)
hora = models.TimeField(
choices=constants.HORA, null=True, blank=True)
densidad = models.CharField(max_length=20, null=True, blank=True)
temp_sala = models.CharField(
help_text="grados Centígrados", max_length=10, null=True, blank=True)
temp_mosto = models.CharField(
help_text="grados Centígrados", max_length=10, null=True, blank=True)
pH = models.CharField(max_length=20, null=True, blank=True)
observaciones = models.TextField(
max_length=100, null=True, blank=True)
# PLANILLA CONTROL DE CLARIFICACION / FILTRACION
class SeguimientoClarificacionFiltracion(CreationModificationDateMixin):
"""
Proceso de control de Clarificación/Filtración,
se distingue todo el proceso por un ID unico, el lote_nro
"""
class Meta:
verbose_name = "Seguimiento Clarificación y Filtración"
verbose_name_plural = "Seguimientos de Clarificación y Filtración"
lote = models.OneToOneField(
Lote, on_delete=models.CASCADE, primary_key=True)
placa_tipo = models.CharField(max_length=10, null=True, blank=True)
placa_cantidad = models.PositiveIntegerField(null=True, blank=True)
def __str__(self):
return str(f"Planilla de Control de Clarificación/Filtración - Lote número {self.lote.lote_nro}")
class RegistroClarificacionFiltracion(models.Model):
class Meta:
ordering = ["orden"]
verbose_name = "Registro Clarificación y Filtración"
verbose_name_plural = "Registros de Clarificación y Filtración"
seguimiento_control_clarificacion_filtracion = models.ForeignKey(
'SeguimientoClarificacionFiltracion',
on_delete=models.CASCADE, null=True)
orden = models.PositiveIntegerField()
origen = models.CharField(max_length=20, null=True, blank=True)
# WIP, Debe ser cambiado a FK a clase barril
destino_barril = models.ForeignKey('Barril',
on_delete=models.CASCADE,
blank=True, null=True)
hora_inicio = models.TimeField(
choices=constants.HORA, null=True, blank=True)
kg_fin = models.FloatField(null=True, blank=True)
presion_filtro = models.CharField(max_length=10, null=True, blank=True)
observaciones = models.TextField(
max_length=100, help_text="Turbidez", null=True, blank=True)
# PLANILLA CONTROL DE CARBONATACION
class SeguimientoCarbonatacion(CreationModificationDateMixin):
"""
Tercer seguimiento de la elaboración,
se distingue todo el suiemiento por un ID único, el lote_nro
"""
fecha_fin = models.DateField(null=True, blank=True)
lote = models.OneToOneField(
Lote, on_delete=models.CASCADE, primary_key=True)
observaciones = models.TextField(
max_length=100, null=True, blank=True)
def __str__(self):
return str(f"Planilla de Carbonatación - Lote número {self.lote.lote_nro}")
|
"""Upgrades Class."""
from fmcapi.api_objects.apiclasstemplate import APIClassTemplate
from fmcapi.api_objects.device_services.devicerecords import DeviceRecords
from .upgradepackages import UpgradePackages
import logging
class Upgrades(APIClassTemplate):
"""
The Upgrades Object in the FMC.
NOTE: This should be called UpgradePackage but that collides with a Deprecated name for UpgradePackages.
We can rename this after we remove that deprecation... which will be a while from now.
"""
VALID_JSON_DATA = [
"id",
"name",
"type",
"upgradePackage",
"targets",
"pushUpgradeFileOnly",
]
VALID_FOR_KWARGS = VALID_JSON_DATA + []
URL_SUFFIX = "/updates/upgrades"
def __init__(self, fmc, **kwargs):
"""
Initialize Upgrades object.
Set self.type to "Upgrade", parse the kwargs, and set up the self.URL.
:param fmc (object): FMC object
:param **kwargs: Any other values passed during instantiation.
:return: None
"""
super().__init__(fmc, **kwargs)
logging.debug("In __init__() for Upgrades class.")
self.type = "Upgrade"
self.URL = f"{self.fmc.platform_url}{self.URL_SUFFIX}"
self.parse_kwargs(**kwargs)
def upgrade_package(self, package_name):
"""
Upgrade named package.
:param package_name: (str) Name of package to upgrade
:return: None
"""
logging.debug("In upgrade_package() for Upgrades class.")
package1 = UpgradePackages(fmc=self.fmc)
package1.get(name=package_name)
if "id" in package1.__dict__:
self.upgradePackage = {"id": package1.id, "type": package1.type}
else:
logging.warning(
f'UpgradePackage "{package_name}" not found. Cannot add package to Upgrades.'
)
def devices(self, devices):
"""
List of devices.
:param devices: (list) List of device names.
:return: None
"""
logging.debug("In devices() for Upgrades class.")
for device in devices:
device1 = DeviceRecords(fmc=self.fmc)
device1.get(name=device)
if "id" in device1.__dict__ and "targets" in self.__dict__:
self.targets.append(
{"id": device1.id, "type": device1.type, "name": device1.name}
)
elif "id" in device1.__dict__:
self.targets = [
{"id": device1.id, "type": device1.type, "name": device1.name}
]
else:
logging.warning(
f'Device "{device}" not found. Cannot prepare devices for Upgrades.'
)
def get(self):
"""GET method for API for Upgrades not supported."""
logging.info("GET method for API for Upgrades not supported.")
pass
def post(self, **kwargs):
"""Run POST call on FMC."""
# returns a task status object
logging.debug("In post() for Upgrades class.")
self.fmc.autodeploy = False
return super().post(**kwargs)
def put(self):
"""PUT method for API for Upgrades not supported."""
logging.info("PUT method for API for Upgrades not supported.")
pass
def delete(self):
"""DELETE method for API for Upgrades not supported."""
logging.info("DELETE method for API for Upgrades not supported.")
pass
|
#%%
import attr
from typing import Dict, List
import gzip
import argparse
import json
from collections import defaultdict
from typing import DefaultDict, Set, Optional, Tuple
#%%
@attr.s
class TrecRunEntry(object):
query_id = attr.ib(type=str)
para_id = attr.ib(type=str)
score = attr.ib(type=float)
rank = attr.ib(type=int)
system = attr.ib(type=str)
def to_output(self):
return "{0} Q0 {1} {2} {3} {4}".format(
self.query_id, self.para_id, self.rank, self.score, self.system
)
@attr.s
class QueryOutput(object):
squid = attr.ib(type=str)
run_id = attr.ib(type=str)
title = attr.ib(type=str)
paragraphs = attr.ib(type=List[Dict])
paragraph_origins = attr.ib(type=List[Dict])
query_facets = attr.ib(type=Optional[List[Dict]])
def to_section_trecrun(self) -> List[TrecRunEntry]:
system = self.run_id
output = []
for po in self.paragraph_origins:
rank = po.get("rank", 0)
para_id = po["para_id"]
score = po["rank_score"]
query_id = po["section_path"]
output.append(TrecRunEntry(query_id, para_id, score, rank, system))
return output
def to_page_trecrun(self) -> List[TrecRunEntry]:
"""
Note, the scores here may be nonsense: if each heading was a different query, they are not comparable.
Perform Reciprocal Rank Fusion: if a paragraph
"""
system = self.run_id
query_id = self.squid
output = []
ranks_for_doc: DefaultDict[str, List[float]] = defaultdict(list)
for po in self.paragraph_origins:
rank = po.get("rank", 1)
para_id = po["para_id"]
score = po["rank_score"]
ranks_for_doc[para_id].append(1.0 / max(1, rank))
for (doc, recip_ranks) in ranks_for_doc.items():
score = sum(recip_ranks)
output.append(TrecRunEntry(query_id, para_id, score, -1, system))
sorted_by_score = sorted(output, key=lambda tre: tre.score, reverse=True)
for (i, tre) in enumerate(sorted_by_score):
tre.rank = i + 1
return sorted_by_score
def para_id_order(self) -> List[str]:
return [para["para_id"] for para in self.paragraphs]
def paragraph_transitions(self):
paras = self.para_id_order()
pairs = []
for i in range(len(paras) - 1):
pairs.append((paras[i], paras[i + 1]))
return pairs
def get_paragraph_transitions(input: str) -> Dict[str, List[Tuple[str, str]]]:
found = {}
with gzip.open(input, "rt") as fp:
for line in fp:
query_dict = json.loads(line)
if "query_facets" not in query_dict:
query_dict["query_facets"] = []
query = QueryOutput(**query_dict)
found[query.squid] = query.paragraph_transitions()
return found
def convert_to_section_trecrun(input: str, output: str):
with open(output, "w") as out:
with gzip.open(input, "rt") as fp:
for line in fp:
query_dict = json.loads(line)
if "query_facets" not in query_dict:
query_dict["query_facets"] = []
query = QueryOutput(**query_dict)
seen_docids: DefaultDict[str, Set[str]] = defaultdict(set)
for entry in query.to_section_trecrun():
while entry.para_id in seen_docids[entry.query_id]:
entry.para_id += "_dup"
seen_docids[entry.query_id].add(entry.para_id)
print(entry.to_output(), file=out)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("INPUT", type=str)
parser.add_argument("OUTPUT", type=str)
args = parser.parse_args()
convert_to_section_trecrun(args.INPUT, args.OUTPUT)
|
#!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Verifies whether any encodings have changed compared to what's in the
# database. Intended for use after upgrading software, or for checking
# whether specific parameters generate stable encodings.
#
import argparse
import sys
import encoder
import pick_codec
def StableEncode(codec_name, configuration, bitrate, videofile_name):
codec = pick_codec.PickCodec(codec_name)
my_context = encoder.Context(codec, encoder.EncodingDiskCache)
my_encoder = encoder.Encoder(my_context,
parameters=encoder.OptionValueSet(
codec.option_set, configuration))
my_encoding = my_encoder.Encoding(bitrate, encoder.Videofile(videofile_name))
my_encoding.Recover()
if not my_encoding.Result():
# For detecting unstable encodings, we may want to try parameters
# that have not been run before. So we generate the baseline if missing.
print "Producing baseline encode"
my_encoding.Execute().Store()
return my_encoding.VerifyEncode()
def main():
parser = argparse.ArgumentParser('Verifies a specific configuration')
parser.add_argument('--codec')
parser.add_argument('configuration', help='Parameters to use. '
'Remember to quote the string and put'
'"--" in the command line before it if needed.')
parser.add_argument('rate')
parser.add_argument('videofile')
args = parser.parse_args()
if StableEncode(args.codec, args.configuration, int(args.rate),
args.videofile):
print 'Encoding is stable'
else:
print 'Encoding is NOT stable'
if __name__ == '__main__':
sys.exit(main())
|
#!/usr/bin/env python3
# coding: utf-8
from .app import run
if __name__ == '__main__':
run()
|
from pylab import *
from numpy import *
from PIL import Image
import harris
import imresize
im1 = array(Image.open("C://Users//HASEE//Desktop//crans_1_small.jpg").convert("L"))
im2 = array(Image.open("C://Users//HASEE//Desktop//crans_2_small.jpg").convert("L"))
# 调整大小加快匹配速度
im1 = imresize(im1,(im1.shape[1]//2,im1.shape[0]//2))
im2 = imresize(im2,(im2.shape[1]//2,im2.shape[0]//2))#书种版本较旧 此处应为两个//
wid = 5
harrisim = harris.compute_harris_response(im1,5)
filtered_coords1 = harris.get_harris_points(harrisim,wid+1)
d1 = harris.get_descriptors(im1,filtered_coords1,wid)
harrisim = harris.compute_harris_response(im2,5)
filtered_coords2 = harris.get_harris_points(harrisim,wid+1)
d2 = harris.get_descriptors(im2,filtered_coords2,wid)
print ('starting matching')
matches = harris.match_twosided(d1,d2)
figure()
gray()
harris.plot_matches(im1,im2,filtered_coords1,filtered_coords2,matches)
show()
|
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from datetime import datetime
from typing import List, Any, Optional
from ethtx.models.base_model import BaseModel
from ethtx.models.objects_model import BlockMetadata, TransactionMetadata
from ethtx.models.semantics_model import AddressSemantics, ERC20Semantics
class AddressInfo(BaseModel):
address: str
name: str
badge: Optional[str]
class DecodedTransactionMetadata(BaseModel):
chain_id: Optional[str]
tx_hash: str
block_number: Optional[int]
block_hash: Optional[str]
timestamp: Optional[datetime]
gas_price: Optional[int]
sender: Optional[AddressInfo]
receiver: Optional[AddressInfo]
tx_index: int
tx_value: int
eth_price: Optional[float]
gas_limit: int
gas_used: int
success: bool
class Argument(BaseModel):
name: str
type: str
value: Any
class DecodedEvent(BaseModel):
chain_id: str
tx_hash: str
timestamp: datetime
contract: AddressInfo
index: int
call_id: Optional[str]
event_signature: str
event_name: str
parameters: List[Argument]
event_guessed: bool = False
class DecodedCall(BaseModel):
chain_id: str
timestamp: datetime
tx_hash: str
call_id: Optional[str]
call_type: str
from_address: AddressInfo
to_address: AddressInfo
value: float
function_signature: str
function_name: str
arguments: List[Argument]
outputs: List[Argument]
gas_used: Optional[int]
error: Optional[str]
status: bool
indent: int
subcalls: List[DecodedCall] = []
function_guessed: bool = False
class DecodedTransfer(BaseModel):
from_address: AddressInfo
to_address: AddressInfo
token_address: Optional[str]
token_symbol: str
token_standard: Optional[str]
value: float
class DecodedBalance(BaseModel):
holder: AddressInfo
tokens: List[dict]
class DecodedTransaction(BaseModel):
block_metadata: BlockMetadata
metadata: TransactionMetadata
events: List[DecodedEvent]
calls: Optional[DecodedCall]
transfers: List[DecodedTransfer]
balances: List[DecodedBalance]
status: bool = False
class Proxy(BaseModel):
address: str
name: str
type: str
semantics: Optional[List[AddressSemantics]]
token: Optional[ERC20Semantics]
|
"""
client.py
---------
Helper methods for performing HTTP calls.
Wrapper around requests module.
"""
import requests
import base64
# declare constants here
DEFAULT_TIMEOUT = 30 # in seconds
CREDS_FILE = "C:\Creds\creds.txt"
def get_creds(filename):
"""
Read credentials from a file. Format is:
URL
USERNAME
PASSWORD
Parameters
----------
filename: string
path to the file with the credentials
Returns
-------
list as [URL, USERNAME, PASSWORD]
"""
f = open(filename, "r")
lines = f.readlines()
if len(lines) != 3:
raise ValueError("Not a valid credentials file.")
# strip out newline chars
content = [x.rstrip() for x in lines]
return content
def get_url():
"""
Returns the base URL of the instance
"""
content = get_creds(CREDS_FILE)
url = content[0]
# get rid of trailing slash
if url[len(url) - 1] == "/":
return url[:len(url) - 1]
return url
def get_username():
"""
Returns the username (email)
"""
return get_creds(CREDS_FILE)[1]
def get_password():
"""
Returns the password
"""
return get_creds(CREDS_FILE)[2]
#----------------------------
# Header info
#----------------------------
def gen_auth_header(username, password):
encoding = "utf-8"
unencoded = "{0}:{1}".format(username, password)
encoded = base64.b64encode(bytes(unencoded, encoding))
# bytes back to string
encoded = encoded.decode(encoding)
header = "Basic {0}".format(encoded)
return header
def get_basic_auth_header():
username = get_username()
password = get_password()
return gen_auth_header(username, password)
def get_headers():
headers = {}
headers["authorization"] = get_basic_auth_header()
headers["content-type"] = "application/json"
headers["x-csrf-header"] = "-"
return headers
#----------------------------
# HTTP methods
#----------------------------
def handle_response(r, http_method, custom_err):
"""
Handles the HTTP response and returns the JSON
Parameters
----------
r: requests module's response
http_method: string
"GET", "POST", "PUT", etc.
custom_err: string
the custom error message if any
Returns
-------
json : dict
"""
json = {}
if r.status_code == requests.codes.ok:
if r.text:
json = r.json()
else:
print("{0} returned an empty response.".format(http_method))
else:
if custom_err is not None:
print(custom_err)
print("Status code: " + str(r.status_code))
if r.text:
print(r.text)
r.raise_for_status()
return json
def get(url_ext, query_params={}, custom_err=None, timeout=DEFAULT_TIMEOUT):
"""
Performs a GET on base_url + url_ext
e.g. https://my-instance.com + /relativity.REST
Parameters
----------
url_ext: string
this is the URL endpoint we are hitting
(without the host name)
query_params: dict
specifies any additional query strings and/or replaces default ones
custom_err: string
specifies a custom error message
timeout: int
max time the request should wait in seconds
Returns
-------
json : a JSON object (as Python dict)
the response from the request
"""
url = get_url() + url_ext
# get request headers
headers = get_headers()
r = requests.get(url, params=query_params, headers=headers, timeout=timeout)
return handle_response(r, "GET", custom_err)
def post(url_ext, query_params={}, payload={}, custom_err=None, timeout=DEFAULT_TIMEOUT):
"""
Performs a POST on the base_url + url_ext
Parameters
----------
url_ext: string
this is the URL endpoint we are hitting
(without the host name)
query_params: dict
specifies any additional query strings and/or replaces default ones
payload: dict
JSON payload for request
custom_err: string
specifies a custom error message
timeout: int
max time the request should wait in seconds
Returns
-------
json : a JSON object (as Python dict)
the response from the request
"""
url = get_url() + url_ext
headers = get_headers()
r = requests.post(url, headers=headers, params=query_params, data=payload, timeout=timeout)
return handle_response(r, "POST", custom_err) |
# Generated by Django 2.0.3 on 2019-07-25 05:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('orders', '0004_auto_20190724_1829'),
]
operations = [
migrations.CreateModel(
name='Dinner_Platter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dinner_platter', models.CharField(max_length=64)),
('price', models.FloatField()),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dinner_platters', models.ManyToManyField(blank=True, to='orders.Dinner_Platter')),
],
),
migrations.CreateModel(
name='Pasta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pasta', models.CharField(max_length=64)),
('price', models.FloatField()),
],
),
migrations.CreateModel(
name='Pizza_Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pizza', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Pizza')),
('toppings', models.ManyToManyField(blank=True, to='orders.Topping')),
],
),
migrations.CreateModel(
name='Salad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('salad', models.CharField(max_length=64)),
('price', models.FloatField()),
],
),
migrations.CreateModel(
name='Sub',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('style', models.CharField(max_length=64)),
],
),
migrations.CreateModel(
name='Sub_Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('extra_cheese', models.BooleanField(default=False)),
('price', models.FloatField()),
('sub', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orders.Sub')),
],
),
migrations.AddField(
model_name='order',
name='pastas',
field=models.ManyToManyField(blank=True, to='orders.Pasta'),
),
migrations.AddField(
model_name='order',
name='pizza_orders',
field=models.ManyToManyField(blank=True, to='orders.Pizza_Order'),
),
migrations.AddField(
model_name='order',
name='salads',
field=models.ManyToManyField(blank=True, to='orders.Salad'),
),
migrations.AddField(
model_name='order',
name='sub_orders',
field=models.ManyToManyField(blank=True, to='orders.Sub_Order'),
),
]
|
import sqlite3
import shutil
from datetime import datetime
import os
from os import listdir
import csv
from logger import App_Logger
class DbOperation:
"""
This class shall be used for handling all the SQL operations and Data Type Validation.
Written By: Durgesh Kumar
Version: 1.0
Revisions: None
"""
def __init__(self):
self.path = 'TrainingDataBase/'
self.BadFilesPath = "TrainingRawValidatedFiles/BadRaw"
self.GoodFilesPath = "TrainingRawValidatedFiles/GoodRaw"
self.logger = App_Logger()
def DataBaseConnection(self, DataBaseName):
"""
Method Name: DataBaseConnection
Description: This method creates the database with the given name and if Database already exists then opens the connection to the DB.
Output: Connection to the DB
On Failure: Raise ConnectionError
Written By: Durgesh Kumar
Version: 1.0
Revisions: None
"""
try:
conn = sqlite3.connect(self.path+DataBaseName+'.db')
file = open("TrainingLog/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Opened %s DataBase Successfully" %DataBaseName)
file.close()
except ConnectionError:
file = open("TrainingLog/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Error While connecting to Database : %s" %ConnectionError)
file.close()
raise ConnectionError
return conn
def CreateTableInDB(self,DataBaseName,ColumnName):
"""
Method Name: CreateTableInDB
Description: This method creates a table in the given database which will be used to insert the Good data after raw data validation.
Output: None
On Failure: Raise Exception
Written By: Durgesh Kumar
Version: 1.0
Revisions: None
"""
try:
conn = self.DataBaseConnection(DataBaseName)
c = conn.cursor()
c.execute("SELECT count(name) FROM sqlite_master WHERE type = 'table'AND name = 'GoodRawData'")
if c.fetchone()[0] == 1:
conn.close()
file = open("TrainingLog/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Tables Created Successfully!!!")
file.close()
file = open("TrainingLog/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Closed %s database Successfully!!" %DataBaseName)
file.close()
else:
for key in ColumnName.keys():
type = ColumnName[key]
# in try block we check if the table exists, if yes then add columns to the table
# else in catch block we will create the table
try:
# cur = cur.execute("SELECT name FROM {dbName} WHERE type='table' AND name='GoodRawData'".format(dbName=DatabaseName))
conn.execute('ALTER TABLE GoodRawData ADD COLUMN "{ColumnName}" {dataType}'.format(ColumnName =key, dataType=type))
except:
conn.execute('CREATE TABLE GoodRawData ({ColumnName} {dataType})'.format(ColumnName =key, dataType=type))
conn.close()
file = open("TrainingLog/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Tables created successfully!!")
file.close()
file = open("TrainingLog/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Closed %s database successfully" %DataBaseName)
file.close()
except Exception as e:
file = open ("TrainingLog/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Error While Creating table: %s" %e)
file.close()
conn.close()
file = open("TrainingLog/DataBaseConnectionLog.txt", 'a+')
self.logger.log(file, "Closed %s DataBase Successfully!!!" %DataBaseName)
file.close()
raise e
def InsertGoodDataIntoTable(self,DataBase):
"""
Method Name: InsertGoodDataIntoTable
Description: This method inserts the Good data files from the Good_Raw folder into the
above created table.
Output: None
On Failure: Raise Exception
Written By: Durgesh Kumar
Version: 1.0
Revisions: None
"""
conn = self.DataBaseConnection(DataBase)
GoodFilesPath = self.GoodFilesPath
BadFilesPath = self.BadFilesPath
OnlyFiles = [f for f in listdir(GoodFilesPath)]
file = open("TrainingLog/DBInsertLog.txt", 'a+')
for files in OnlyFiles:
try:
with open(GoodFilesPath + '/' + files, "r") as f:
next(f)
reader = csv.reader(f, delimiter="\n")
for line in enumerate(reader):
for list_ in (line[1]):
try:
conn.execute('INSERT INTO GoodRawData values ({values})'.format(values=(list_)))
self.logger.log(file,"%s: File Loaded Successfully!!!" %files)
conn.commit()
except Exception as e:
raise e
except Exception as e:
conn.rollback()
self.logger.log(file, "Error While Creating table: %s" %e)
shutil.move(GoodFilesPath+'/'+files, BadFilesPath)
self.logger.log(file, "File Moved Successfully : %s" %files)
file.close()
conn.close()
conn.close()
file.close()
def SelectingDataFromTableIntoCsv(self,DataBase):
"""
Method Name: SelectingDataFromTableIntoCsv
Description: This method exports the data in GoodRawData table as a CSV file. in a given location.
above created .
Output: None
On Failure: Raise Exception
Written By: Durgesh Kumar
Version: 1.0
Revisions: None
"""
self.FileFromDB = 'TraingingFilesFromDB/'
self.FileName = "InputFile.csv"
file = open("TrainingLog/ExportToCsvLog.txt", 'a+')
try:
conn = self.DataBaseConnection(DataBase)
SqlSelect = "SELECT * FROM GoodRawData"
cursor = conn.cursor()
cursor.execute(SqlSelect)
result = cursor.fetchall()
# Get the headers of the Csv Files
headers = [i[0] for i in cursor.description]
# Make the CSV Output Directory
if not os.path.isdir(self.FileFromDB):
os.makedirs(self.FileFromDB)
#Open CSV File for writing
CsvFile = csv.writer(open(self.FileFromDB + self.FileName, 'w' , newline='') , delimiter = ',' , lineterminator = '\r\n' , quoting=csv.QUOTE_ALL, escapechar='\\')
# Add the header and data To Csv FIle
CsvFile.writerow(headers)
CsvFile.writerows(result)
self.logger.log(file , "File Exported Successfully!!!!")
file.close()
except Exception as e:
self.logger.log(file , "File Exported Failed. Error : %s" %e)
file.close()
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
class AuthIdNtoken(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
AuthIdNtoken - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'additional_id': 'list[GroupMember]',
'gid': 'GroupMember',
'group_sid': 'GroupMember',
'ifs_restricted': 'bool',
'local_address': 'str',
'on_disk_group_id': 'GroupMember',
'on_disk_user_id': 'GroupMember',
'privilege': 'list[AuthIdNtokenPrivilegeItem]',
'protocol': 'int',
'remote_address': 'str',
'uid': 'GroupMember',
'user_sid': 'GroupMember',
'zid': 'int',
'zone_id': 'str'
}
self.attribute_map = {
'additional_id': 'additional_id',
'gid': 'gid',
'group_sid': 'group_sid',
'ifs_restricted': 'ifs_restricted',
'local_address': 'local_address',
'on_disk_group_id': 'on_disk_group_id',
'on_disk_user_id': 'on_disk_user_id',
'privilege': 'privilege',
'protocol': 'protocol',
'remote_address': 'remote_address',
'uid': 'uid',
'user_sid': 'user_sid',
'zid': 'zid',
'zone_id': 'zone_id'
}
self._additional_id = None
self._gid = None
self._group_sid = None
self._ifs_restricted = None
self._local_address = None
self._on_disk_group_id = None
self._on_disk_user_id = None
self._privilege = None
self._protocol = None
self._remote_address = None
self._uid = None
self._user_sid = None
self._zid = None
self._zone_id = None
@property
def additional_id(self):
"""
Gets the additional_id of this AuthIdNtoken.
:return: The additional_id of this AuthIdNtoken.
:rtype: list[GroupMember]
"""
return self._additional_id
@additional_id.setter
def additional_id(self, additional_id):
"""
Sets the additional_id of this AuthIdNtoken.
:param additional_id: The additional_id of this AuthIdNtoken.
:type: list[GroupMember]
"""
self._additional_id = additional_id
@property
def gid(self):
"""
Gets the gid of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:return: The gid of this AuthIdNtoken.
:rtype: GroupMember
"""
return self._gid
@gid.setter
def gid(self, gid):
"""
Sets the gid of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:param gid: The gid of this AuthIdNtoken.
:type: GroupMember
"""
self._gid = gid
@property
def group_sid(self):
"""
Gets the group_sid of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:return: The group_sid of this AuthIdNtoken.
:rtype: GroupMember
"""
return self._group_sid
@group_sid.setter
def group_sid(self, group_sid):
"""
Sets the group_sid of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:param group_sid: The group_sid of this AuthIdNtoken.
:type: GroupMember
"""
self._group_sid = group_sid
@property
def ifs_restricted(self):
"""
Gets the ifs_restricted of this AuthIdNtoken.
Indicates if this user has restricted access to the /ifs file system.
:return: The ifs_restricted of this AuthIdNtoken.
:rtype: bool
"""
return self._ifs_restricted
@ifs_restricted.setter
def ifs_restricted(self, ifs_restricted):
"""
Sets the ifs_restricted of this AuthIdNtoken.
Indicates if this user has restricted access to the /ifs file system.
:param ifs_restricted: The ifs_restricted of this AuthIdNtoken.
:type: bool
"""
self._ifs_restricted = ifs_restricted
@property
def local_address(self):
"""
Gets the local_address of this AuthIdNtoken.
The IP address of the node that is servicing the request.
:return: The local_address of this AuthIdNtoken.
:rtype: str
"""
return self._local_address
@local_address.setter
def local_address(self, local_address):
"""
Sets the local_address of this AuthIdNtoken.
The IP address of the node that is servicing the request.
:param local_address: The local_address of this AuthIdNtoken.
:type: str
"""
self._local_address = local_address
@property
def on_disk_group_id(self):
"""
Gets the on_disk_group_id of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:return: The on_disk_group_id of this AuthIdNtoken.
:rtype: GroupMember
"""
return self._on_disk_group_id
@on_disk_group_id.setter
def on_disk_group_id(self, on_disk_group_id):
"""
Sets the on_disk_group_id of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:param on_disk_group_id: The on_disk_group_id of this AuthIdNtoken.
:type: GroupMember
"""
self._on_disk_group_id = on_disk_group_id
@property
def on_disk_user_id(self):
"""
Gets the on_disk_user_id of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:return: The on_disk_user_id of this AuthIdNtoken.
:rtype: GroupMember
"""
return self._on_disk_user_id
@on_disk_user_id.setter
def on_disk_user_id(self, on_disk_user_id):
"""
Sets the on_disk_user_id of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:param on_disk_user_id: The on_disk_user_id of this AuthIdNtoken.
:type: GroupMember
"""
self._on_disk_user_id = on_disk_user_id
@property
def privilege(self):
"""
Gets the privilege of this AuthIdNtoken.
Privileges held by the currently authenticated user.
:return: The privilege of this AuthIdNtoken.
:rtype: list[AuthIdNtokenPrivilegeItem]
"""
return self._privilege
@privilege.setter
def privilege(self, privilege):
"""
Sets the privilege of this AuthIdNtoken.
Privileges held by the currently authenticated user.
:param privilege: The privilege of this AuthIdNtoken.
:type: list[AuthIdNtokenPrivilegeItem]
"""
self._privilege = privilege
@property
def protocol(self):
"""
Gets the protocol of this AuthIdNtoken.
:return: The protocol of this AuthIdNtoken.
:rtype: int
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this AuthIdNtoken.
:param protocol: The protocol of this AuthIdNtoken.
:type: int
"""
self._protocol = protocol
@property
def remote_address(self):
"""
Gets the remote_address of this AuthIdNtoken.
The IP address of the client making the request for information.
:return: The remote_address of this AuthIdNtoken.
:rtype: str
"""
return self._remote_address
@remote_address.setter
def remote_address(self, remote_address):
"""
Sets the remote_address of this AuthIdNtoken.
The IP address of the client making the request for information.
:param remote_address: The remote_address of this AuthIdNtoken.
:type: str
"""
self._remote_address = remote_address
@property
def uid(self):
"""
Gets the uid of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:return: The uid of this AuthIdNtoken.
:rtype: GroupMember
"""
return self._uid
@uid.setter
def uid(self, uid):
"""
Sets the uid of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:param uid: The uid of this AuthIdNtoken.
:type: GroupMember
"""
self._uid = uid
@property
def user_sid(self):
"""
Gets the user_sid of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:return: The user_sid of this AuthIdNtoken.
:rtype: GroupMember
"""
return self._user_sid
@user_sid.setter
def user_sid(self, user_sid):
"""
Sets the user_sid of this AuthIdNtoken.
A persona consists of either a 'type' and 'name' or a 'ID'.
:param user_sid: The user_sid of this AuthIdNtoken.
:type: GroupMember
"""
self._user_sid = user_sid
@property
def zid(self):
"""
Gets the zid of this AuthIdNtoken.
The zone id that is serving the request.
:return: The zid of this AuthIdNtoken.
:rtype: int
"""
return self._zid
@zid.setter
def zid(self, zid):
"""
Sets the zid of this AuthIdNtoken.
The zone id that is serving the request.
:param zid: The zid of this AuthIdNtoken.
:type: int
"""
self._zid = zid
@property
def zone_id(self):
"""
Gets the zone_id of this AuthIdNtoken.
The name of the zone serving the request.
:return: The zone_id of this AuthIdNtoken.
:rtype: str
"""
return self._zone_id
@zone_id.setter
def zone_id(self, zone_id):
"""
Sets the zone_id of this AuthIdNtoken.
The name of the zone serving the request.
:param zone_id: The zone_id of this AuthIdNtoken.
:type: str
"""
self._zone_id = zone_id
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
name = "soan"
|
#!/bin/env python
#_*_coding:utf-8_*_
#Author:swht
#E-mail:qingbo.song@gmail.com
#Date:2015.11.24
#Version:V0.0.1
import shoplogin
import banklogin
import linecache
import time
import random
moneynum = 0
#获取文件的行数
# def countnum(filename):
# count = 0
# thefile = open(filename, 'rb')
# while True:
# buffer = thefile.read(8192*1024)
# if not buffer:
# break
# count += buffer.count('\n')
# return count
# thefile.close()
def countnum(filename):
files = open(filename)
data = files.read()
files.flush()
files.close()
return data.count('\n')
def shopping(choiceshop):
#获取文件中某一行字符串
linecache.clearcache() #刷新缓存数据
line = linecache.getline('shoplist.txt',choiceshop)
#将字符串转换成列表
line = line.split()
usershoplist = open('usershoplist.txt','a')
print '''
================================================
你已将%s添加到购物车,消费%s元!
================================================
''' % (line[1],line[2])
usershoplistline = '''%s %s\n''' % (line[1],line[2])
usershoplist.write(usershoplistline)
usershoplist.close()
def clearshopping(): #用户退出购物中心时清空(初始化)购物车
usershoplist = open('usershoplist.txt','rb+')
usershoplist.truncate() #清空文件
usershoplist.close()
def showshopping(title = "购物车"): #展示购物车信息
data = open('usershoplist.txt').read()
if len(data) == 0:
print "你的购物车为空,请选择指定指令添加商品!"
else:
global moneynum
moneynum = 0 #初始化全局变量,防止金额计算多次
print '*****************%s******************'%(title)
print '%s\t\t%s%s%s\t\t%s'% (u'商品名称',u'单价',u'*',u'数量',u'总价')
for num in range(1,countnum('usershoplist.txt')+1): #countnum() = countnum()+1 表示获取的数值(实际行数)+1(取值范围+1)
#获取用户购物车列表的每行内容,并将其转换成列表
linecache.clearcache() #刷新缓存数据
usershoplistline = linecache.getline('usershoplist.txt',num).strip('\n').split(' ')
print "%s\t%s%s%s\t\t%s元\n" % (usershoplistline[0],usershoplistline[1],'*','1',usershoplistline[1]),
moneynum += int(usershoplistline[1]) #将全局变量moneynum进行了修改,其他函数调用这个变量时就会携带修改后的值
print "总金额: %d " % moneynum
def buyshopping():
showshopping()
global moneynum #使用全局变量,目的是将需要支付的钱
flag = 0
while flag <= 3:
choiceshoptry = raw_input('''请确认你本次消费总金额为%s元[y/n]''' % moneynum).strip()
if len(choiceshoptry) == 0:
flag += 1
print "输入不能为空,请重新输入[y/n]确认!"
else:
if choiceshoptry == 'Y' or choiceshoptry == 'y' or choiceshoptry == 'N' or choiceshoptry == 'n':
if choiceshoptry == 'y' or choiceshoptry == 'Y':
orders()
# print '跳转到信用卡支付页面'
banklogin.bank_login()
if choiceshoptry == 'n' or choiceshoptry == 'N':
print '你本次确认不结算购物车内商品!%s' % time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) #格式化输出当前时间2015-11-29 15:39:30
break
else:
flag += 1
print "你的输入有误,请重新输入[y/n]!"
print "系统将退出购物车!"
time.sleep(1)
def orders():
global moneynum
#创建订单列表
usrshophistorylist = open('usrshophistorylist.txt','a')
create_shopID = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time())) + '' + str(random.randint(100, 200))
for num in range(1,countnum('usershoplist.txt')+1): #countnum() = countnum()+1+1 表示获取的数值+1(实际行数)+1(取值范围+1)
linecache.clearcache() #刷新缓存数据
usershoplistline = linecache.getline('usershoplist.txt',num).strip('\n').split(' ')
form = "%s\t%s\t%s%s%s\t\t%s\n" % (create_shopID,usershoplistline[0],usershoplistline[1],'*','1',usershoplistline[1])
usrshophistorylist.write(form)
formall = '''需支付金额为: %d 元\n''' % moneynum
usrshophistorylist.write(formall)
usrshophistorylist.close()
|
from threading import Thread, Event
import time
def countdown(n: int, started_evt: Event):
print('countdown starting')
started_evt.set()
while n > 0:
print('T-minus', n)
n -= 1
time.sleep(5)
started_evt=Event()
print("Launching countdown")
t=Thread(target=countdown,args=(10,started_evt))
t.start()
started_evt.wait()
print('countdown is running') |
import os
from scikits.samplerate import resample
import pandas as pd
import numpy as np
import logging
from copy import deepcopy
from braindecode.datasets.pylearn import DenseDesignMatrixWrapper
import lasagne
import theano
from zipfile import ZipFile
from zipfile import ZIP_DEFLATED
import StringIO
from braindecode.veganlasagne.layers import get_n_sample_preds
from braindecode.veganlasagne.monitors import get_reshaped_cnt_preds
from braindecode.datahandling.preprocessing import exponential_running_mean,\
exponential_running_var_from_demeaned
log = logging.getLogger(__name__)
def load_train(train_folder, i_subject, i_series):
data_filename = 'subj{:d}_series{:d}_data.csv'.format(
i_subject, i_series)
data_file_path = os.path.join(train_folder, data_filename)
data = pd.read_csv(data_file_path)
# events file
events_file_path = os.path.join(train_folder,
data_filename.replace('_data','_events'))
# read event file
labels= pd.read_csv(events_file_path)
clean = data.drop(['id' ], axis=1)#remove id
labels = labels.drop(['id' ], axis=1)#remove id
return clean, labels
def load_test(test_folder, i_subject, i_series):
data_filename = 'subj{:d}_series{:d}_data.csv'.format(
i_subject, i_series)
data_file_path = os.path.join(test_folder, data_filename)
data = pd.read_csv(data_file_path)
clean = data.drop(['id' ], axis=1)#remove id
return clean
class KaggleGraspLiftSet(object):
""" Dataset from the kaggle grasp lift competition.
resample_half true means resampling to 250 Hz (from original 500 Hz)
"""
reloadable=False
def __init__(self, data_folder, i_subject, resample_half,
standardize=False):
self.data_folder = data_folder
self.i_subject = i_subject
self.resample_half = resample_half
self.standardize = standardize
def ensure_is_loaded(self):
if not hasattr(self, 'train_X_series'):
self.load()
def load(self):
log.info("Loading data...")
self.load_data()
if self.resample_half:
log.info("Resampling data...")
self.resample_data()
if self.standardize:
log.info("Standardizing data...")
self.standardize_data()
log.info("..Done.")
# hack to allow experiment class to know targets will have two dimensions
self.y = np.ones((1,1)) * np.nan
def load_data(self):
# First just load the data
self.train_X_series = []
self.train_y_series = []
train_folder = os.path.join(self.data_folder, 'train/')
for i_series in xrange(1,9):
X_series, y_series = load_train(train_folder, self.i_subject, i_series)
# all sensor names should be the same :)
# so just set it here directly
if not hasattr(self, 'sensor_names'):
self.sensor_names = X_series.keys()
else:
assert np.array_equal(self.sensor_names, X_series.keys())
self.train_X_series.append(np.array(X_series).astype(np.float32))
self.train_y_series.append(np.array(y_series).astype(np.int32))
assert len(self.train_X_series) == 8, "Should be 8 train series for each subject"
def resample_data(self):
for i_series in xrange(8):
X_series = np.array(self.train_X_series[i_series]).astype(np.float32)
X_series = resample(X_series, 250.0/500.0, 'sinc_fastest')
self.train_X_series[i_series] = X_series
y_series = np.array(self.train_y_series[i_series]).astype(np.int32)
# take later predictions ->
# shift all predictions backwards compared to data.
# this ensures you are not using data from the future to make a prediciton
# rather in a bad case maybe you do not even have all data up to the sample
# to make the prediction
y_series = y_series[1::2]
# maybe some border effects remove predictions
y_series = y_series[-len(X_series):]
self.train_y_series[i_series] = y_series
def standardize_data(self):
factor_new = 0.01
for i_series in xrange(8):
X_series = self.train_X_series[i_series]
if i_series == 0:
init_block_size=8000
means = exponential_running_mean(X_series, factor_new=factor_new,
init_block_size=init_block_size, axis=None)
demeaned = X_series - means
stds = np.sqrt(exponential_running_var_from_demeaned(
demeaned, factor_new, init_block_size=init_block_size, axis=None))
else:
start_mean = means[-1]
start_var = stds[-1] * stds[-1]
means = exponential_running_mean(X_series, factor_new=factor_new,
start_mean=start_mean, axis=None)
demeaned = X_series - means
stds = np.sqrt(exponential_running_var_from_demeaned(
demeaned, factor_new, start_var=start_var, axis=None))
eps = 1e-6
standardized = demeaned / np.maximum(stds, eps)
self.train_X_series[i_series] = standardized
# for later test standardizing
self.final_std = stds[-1]
self.final_mean = means[-1]
def load_test(self):
"""Refers to test set from evaluation(without labels)"""
log.info("Loading test data...")
self.load_test_data()
if self.resample_half:
log.info("Resampling test data...")
self.resample_test_data()
if self.standardize:
log.info("Standardizing test data...")
self.standardize_test_data()
log.info("..Done.")
def load_test_data(self):
test_folder = os.path.join(self.data_folder, 'test/')
self.test_X_series = []
for i_series in xrange(9,11):
X_series = load_test(test_folder, self.i_subject, i_series)
self.test_X_series.append(np.array(X_series).astype(np.float32))
assert len(self.test_X_series) == 2, "Should be 2 test series for each subject"
def resample_test_data(self):
for i_series in xrange(2):
X_series = np.array(self.test_X_series[i_series]).astype(np.float32)
X_series = resample(X_series, 250.0/500.0, 'sinc_fastest')
self.test_X_series[i_series] = X_series
def standardize_test_data(self):
factor_new = 0.01
for i_series in xrange(2):
X_series = self.test_X_series[i_series]
start_mean = self.final_mean
start_var = self.final_std * self.final_std
means = exponential_running_mean(X_series, factor_new=factor_new,
start_mean=start_mean, axis=None)
demeaned = X_series - means
stds = np.sqrt(exponential_running_var_from_demeaned(
demeaned, factor_new, start_var=start_var, axis=None))
eps = 1e-6
standardized = demeaned / np.maximum(stds, eps)
self.test_X_series[i_series] = standardized
class AllSubjectsKaggleGraspLiftSet(object):
""" Kaggle grasp lift set loading the data for all subjects """
reloadable=False
def __init__(self, data_folder, resample_half, standardize=False,
last_subject=12):
self.data_folder = data_folder
self.resample_half = resample_half
self.standardize = standardize
self.last_subject = last_subject
def ensure_is_loaded(self):
if not hasattr(self, 'kaggle_sets'):
self.load()
def load(self):
self.create_kaggle_sets()
self.load_kaggle_sets()
# hack to allow experiment class to know targets will have two dimensions
self.y = np.ones((1,1)) * np.nan
def create_kaggle_sets(self):
self.kaggle_sets = [
KaggleGraspLiftSet(self.data_folder, i_sub, self.resample_half,
self.standardize)
for i_sub in range(1,self.last_subject+1)]
def load_kaggle_sets(self):
for i_set, kaggle_set in enumerate(self.kaggle_sets):
log.info("Loading Subject {:d}...".format(i_set + 1))
kaggle_set.load()
def standardize_train_data(self):
for i_set, kaggle_set in enumerate(self.kaggle_sets):
log.info("Standardizing Train Subject {:d}...".format(i_set + 1))
kaggle_set.standardize_data()
def load_test(self):
for i_set, kaggle_set in enumerate(self.kaggle_sets):
log.info("Loading Test Subject {:d}...".format(i_set + 1))
kaggle_set.load_test()
def load_test_data(self):
for i_set, kaggle_set in enumerate(self.kaggle_sets):
log.info("Loading Test Data Subject {:d}...".format(i_set + 1))
kaggle_set.load_test_data()
def resample_test_data(self):
for i_set, kaggle_set in enumerate(self.kaggle_sets):
log.info("Resample Test Subject {:d}...".format(i_set + 1))
kaggle_set.resample_test_data()
def standardize_test_data(self):
for i_set, kaggle_set in enumerate(self.kaggle_sets):
log.info("Standardizing Test Subject {:d}...".format(i_set + 1))
kaggle_set.standardize_test_data()
def create_submission_csv_for_one_subject(folder_name, kaggle_set, iterator, preprocessor,
final_layer, submission_id):
### Load and preprocess data
kaggle_set.load()
# remember test series lengths before and after resampling to more accurately pad predictions
# later (padding due to the lost samples)
kaggle_set.load_test_data()
test_series_lengths = [len(series) for series in kaggle_set.test_X_series]
kaggle_set.resample_test_data()
test_series_lengths_resampled = [len(series) for series in kaggle_set.test_X_series]
X_train = deepcopy(np.concatenate(kaggle_set.train_X_series)[:,:,np.newaxis,np.newaxis])
X_test_0 = deepcopy(kaggle_set.test_X_series[0][:,:,np.newaxis,np.newaxis])
X_test_1 = deepcopy(kaggle_set.test_X_series[1][:,:,np.newaxis,np.newaxis])
# create dense design matrix sets
train_set = DenseDesignMatrixWrapper(
topo_view=X_train,
y=None, axes=('b','c',0,1))
fake_test_y = np.ones((len(X_test_0), 6))
test_set_0 = DenseDesignMatrixWrapper(
topo_view=X_test_0,
y=fake_test_y)
fake_test_y = np.ones((len(X_test_1), 6))
test_set_1 = DenseDesignMatrixWrapper(
topo_view=X_test_1,
y=fake_test_y)
log.info("Preprocessing data...")
preprocessor.apply(train_set, can_fit=True)
preprocessor.apply(test_set_0, can_fit=False)
preprocessor.apply(test_set_1, can_fit=False)
### Create prediction function and create predictions
log.info("Create prediction functions...")
input_var = lasagne.layers.get_all_layers(final_layer)[0].input_var
predictions = lasagne.layers.get_output(final_layer, deterministic=True)
pred_fn = theano.function([input_var], predictions)
log.info("Make predictions...")
batch_gen_0 = iterator.get_batches(test_set_0, shuffle=False)
all_preds_0 = [pred_fn(batch[0]) for batch in batch_gen_0]
batch_gen_1 = iterator.get_batches(test_set_1, shuffle=False)
all_preds_1 = [pred_fn(batch[0]) for batch in batch_gen_1]
### Pad and reshape predictions
n_sample_preds = get_n_sample_preds(final_layer)
input_time_length = lasagne.layers.get_all_layers(final_layer)[0].shape[2]
n_samples_0 = test_set_0.get_topological_view().shape[0]
preds_arr_0 = get_reshaped_cnt_preds(all_preds_0, n_samples_0,
input_time_length, n_sample_preds)
n_samples_1 = test_set_1.get_topological_view().shape[0]
preds_arr_1 = get_reshaped_cnt_preds(all_preds_1, n_samples_1,
input_time_length, n_sample_preds)
series_preds = [preds_arr_0, preds_arr_1]
assert len(series_preds[0]) == test_series_lengths_resampled[0]
assert len(series_preds[1]) == test_series_lengths_resampled[1]
assert False, ("TODO: here only duplicate if resample half is true for the dataset.. "
"also take care how to create submission cv if trained on all subjects")
series_preds_duplicated = [np.repeat(preds, 2,axis=0) for preds in series_preds]
n_classes = preds_arr_0.shape[1]
# pad missing ones with zeros
missing_0 = test_series_lengths[0] - len(series_preds_duplicated[0])
full_preds_0 = np.append(np.zeros((missing_0, n_classes), dtype=np.float32),
series_preds_duplicated[0], axis=0)
missing_1 = test_series_lengths[1] - len(series_preds_duplicated[1])
full_preds_1 = np.append(np.zeros((missing_1, n_classes), dtype=np.float32),
series_preds_duplicated[1], axis=0)
assert len(full_preds_0) == test_series_lengths[0]
assert len(full_preds_1) == test_series_lengths[1]
full_series_preds = [full_preds_0, full_preds_1]
assert sum([len(a) for a in full_series_preds]) == np.sum(test_series_lengths)
### Create csv
log.info("Create csv...")
csv_filename = "{:02d}".format(submission_id) + '.csv'
csv_filename = os.path.join(folder_name, csv_filename)
cols = ['HandStart','FirstDigitTouch',
'BothStartLoadPhase','LiftOff',
'Replace','BothReleased']
# collect ids
all_ids = []
all_preds = []
for i_series in (9,10):
id_prefix = "subj{:d}_series{:d}_".format(kaggle_set.i_subject, i_series)
this_preds = full_series_preds[i_series-9] # respect offsets
all_preds.extend(this_preds)
this_ids = [id_prefix + str(i_sample) for i_sample in range(this_preds.shape[0])]
all_ids.extend(this_ids)
all_ids = np.array(all_ids)
all_preds = np.array(all_preds)
submission = pd.DataFrame(index=all_ids,
columns=cols,
data=all_preds)
submission.to_csv(csv_filename, index_label='id',float_format='%.3f')
log.info("Done")
def create_submission_csv_for_all_subjects(folder):
all_lines = []
for i in xrange(1,13,1):
content = open(os.path.join(folder, '{:02d}.csv'.format(i)), 'r').readlines()
if i == 1:
all_lines.append(content[0])
all_lines.extend(content[1:])
csv_str = "".join(all_lines)
submission_zip_file = ZipFile(os.path.join(folder,'all_submission.zip'), 'w', ZIP_DEFLATED)
submission_zip_file.writestr("submission.csv", csv_str)
submission_zip_file.close()
def create_submission_csv_for_all_subject_model(folder_name,
all_sub_kaggle_set, dataset_provider, iterator, final_layer,
submission_id):
all_sub_kaggle_set.load()
assert all_sub_kaggle_set.resample_half == False, ("Not implemented for "
"resample half")
all_sub_kaggle_set.load_test()
# following line will just do the preprocessing already on the train set...
dataset_provider.get_train_merged_valid_test(all_sub_kaggle_set)
test_sets_per_subj = []
for i_subject in range(12):
kaggle_set = all_sub_kaggle_set.kaggle_sets[i_subject]
this_sets = []
for i_test_series in range(2):
# Get input
X_test = kaggle_set.test_X_series[i_test_series][:,:,np.newaxis,np.newaxis]
fake_test_y = np.ones((len(X_test), 6))
test_set = DenseDesignMatrixWrapper(
topo_view=X_test,
y=fake_test_y)
if dataset_provider.preprocessor is not None:
dataset_provider.preprocessor.apply(test_set, can_fit=False)
this_sets.append(test_set)
assert len(this_sets) == 2
test_sets_per_subj.append(this_sets)
### Create prediction function and create predictions
log.info("Create prediction functions...")
input_var = lasagne.layers.get_all_layers(final_layer)[0].input_var
predictions = lasagne.layers.get_output(final_layer, deterministic=True)
pred_fn = theano.function([input_var], predictions)
log.info("Setup iterator...")
n_sample_preds = get_n_sample_preds(final_layer)
iterator.n_sample_preds = n_sample_preds
log.info("Make predictions...")
preds_per_subject = []
for i_subject in range(12):
log.info("Predictions for Subject {:d}...".format(i_subject + 1))
test_sets_subj = test_sets_per_subj[i_subject]
preds = get_y_for_subject(pred_fn, test_sets_subj[0], test_sets_subj[1],
iterator, final_layer)
preds_per_subject.append(preds)
log.info("Done")
log.info("Create csv...")
cols = ['HandStart','FirstDigitTouch',
'BothStartLoadPhase','LiftOff',
'Replace','BothReleased']
# collect ids
all_ids = []
all_preds = []
for i_subject in range(12):
pred_subj_per_series = preds_per_subject[i_subject]
for i_series in (9,10):
id_prefix = "subj{:d}_series{:d}_".format(i_subject+1, i_series)
this_preds = pred_subj_per_series[i_series-9] # respect offsets
all_preds.extend(this_preds)
this_ids = [id_prefix + str(i_sample) for i_sample in range(this_preds.shape[0])]
all_ids.extend(this_ids)
all_ids = np.array(all_ids)
all_preds = np.array(all_preds)
assert all_ids.shape == (3144171,)
assert all_preds.shape == (3144171,6)
submission = pd.DataFrame(index=all_ids,
columns=cols,
data=all_preds)
csv_output = StringIO.StringIO()
submission.to_csv(csv_output, index_label='id',float_format='%.3f')
csv_str = csv_output.getvalue()
log.info("Create zip...")
zip_file_name = os.path.join(folder_name, "{:d}.zip".format(submission_id))
submission_zip_file = ZipFile(zip_file_name, 'w', ZIP_DEFLATED)
submission_zip_file.writestr("submission.csv", csv_str)
submission_zip_file.close()
log.info("Done")
def get_y_for_subject(pred_fn, test_set_0, test_set_1, iterator, final_layer):
"""Assumes there was no resampling!!"""
batch_gen_0 = iterator.get_batches(test_set_0, shuffle=False)
all_preds_0 = [pred_fn(batch[0]) for batch in batch_gen_0]
batch_gen_1 = iterator.get_batches(test_set_1, shuffle=False)
all_preds_1 = [pred_fn(batch[0]) for batch in batch_gen_1]
n_sample_preds = get_n_sample_preds(final_layer)
input_time_length = lasagne.layers.get_all_layers(final_layer)[0].shape[2]
n_samples_0 = test_set_0.get_topological_view().shape[0]
preds_arr_0 = get_reshaped_cnt_preds(all_preds_0, n_samples_0,
input_time_length, n_sample_preds)
n_samples_1 = test_set_1.get_topological_view().shape[0]
preds_arr_1 = get_reshaped_cnt_preds(all_preds_1, n_samples_1,
input_time_length, n_sample_preds)
series_preds = [preds_arr_0, preds_arr_1]
return series_preds |
"""
Routes and views for the flask application.
"""
from datetime import datetime
from flask import Flask,render_template, request
from globalsuperstore import app
import tweepy
import time
import os
from textblob import TextBlob
import pandas as pd
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
return render_template(
'index.html',
title='Home Page',
year=datetime.now().year,
)
@app.route('/trends')
def trends():
"""Renders the home page."""
return render_template(
'trends.html',
title='Business Trends',
year=datetime.now().year
)
@app.route('/sentiment')
def sentiment():
"""Renders the contact page."""
return render_template(
'sentiment.html',
title='Sentiment Analysis',
year=datetime.now().year
# message='Your contact page.'
)
@app.route('/forecast')
def forecast():
"""Renders the contact page."""
return render_template(
'forecast.html',
title='Machine Learning Forecasting',
year=datetime.now().year
# message='Your Financial report page.'
)
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About HMI-DaTa Analytics',
year=datetime.now().year,
# message='Your application description page.'
)
@app.route('/tweepy_search', methods=("POST", "GET"))
def tweepy_search():
search_term = request.args.get('search_term', '')
df = pd.DataFrame(columns = ['Tweet Text', 'Sentiment Analysis'])
def scrape_tweet(search_term):
auth = tweepy.OAuthHandler("qD4fVgQwYBIKXoRXLYw0wp1cq","5gSkicVE0K8eiUSRFuKVqZXFNWaS4lIpAy7xXwrD4yHjKld7i4")
auth.set_access_token("1256612855635476484-dD2le9cw3cYxoRwyzey5i1TiOD7EFT","j1k4bDswUzvw6Xr0uxbwUvzzrGB3shahXUGKkrXejhawX")
api = tweepy.API(auth)
try:
api.verify_credentials()
print("Authentication OK")
except:
print("Error during authentication")
public_tweets = api.search(search_term)
for tweet in public_tweets:
print(tweet.text)
analysis = TextBlob(tweet.text)
print(analysis.sentiment)
df.loc[len(df)] = [tweet.text, analysis.sentiment.polarity]
scrape_tweet(search_term)
return render_template(
'tweepy_search.html',
term='Tweepy Results for ' + search_term,
tables = [df.to_html(classes='data', header="true", index=False)]
)
|
import pickle
import numpy as np
import os
from datetime import datetime
def get_pickle_file_content(full_path_pickle_file):
pickle_file = open(full_path_pickle_file,'rb')
pickle_list = pickle.load(pickle_file, encoding='latin1')
pickle_file.close()
return pickle_list
def get_ret_type_dict(pickle_list):
ret_type_set = set()
for content in pickle_list:
for int_seq, ret_type in content:
#print(f'int_seq: {int_seq} ret_type:{ret_type}')
ret_type_set.add(ret_type)
### build ret_type dict
ret_type_dict = {k:v for v,k in enumerate(ret_type_set, start=1)}
return ret_type_dict
def main():
path_to_int_seq_pickle = "../../../ubuntu-20-04-datasets/full_dataset_att_int_seq.pickle"
#path_to_return_type_dict_file = "../../ubuntu-20-04-datasets/full_dataset_att_int_seq_ret_type_dict.pickle"
path_to_return_type_dict_file = "/tmp/full_dataset_att_int_seq_ret_type_dict.pickle"
###read out full ds pickle
if not os.path.isfile(path_to_int_seq_pickle):
print(f'No file: {path_to_int_seq_pickle} there ?')
exit()
pickle_file_content = get_pickle_file_content(path_to_int_seq_pickle)
### get return type dict
ret_type_dict = get_ret_type_dict(pickle_file_content)
print(f'ret-type-dict: {ret_type_dict}')
### save return type dict to file
ret_file = open(path_to_return_type_dict_file, 'wb+')
pickle_list = pickle.dump(ret_type_dict, ret_file)
ret_file.close()
print(f'Saved {path_to_return_type_dict_file} file.')
if __name__ == "__main__":
main()
|
from typing import KeysView, Dict
from datetime import datetime, timedelta
import logging
from threading import Timer, Lock
from ..fleet import member_info
from ...storage.database import CrestFleet, FleetTime, Character, FleetTimeLastTracked, FleetTimeByHull
from ...base import db
from ..swagger.eve.fleet.models import FleetMember
logger = logging.getLogger(__name__)
class TimeTrackerCache:
def __init__(self, members: Dict[int, FleetMember], expires: datetime,
fleet: CrestFleet):
self.members: Dict[int, FleetMember] = members
self.expires: datetime = expires
self.last_time_tracked: Dict[int, datetime] = {}
self.fleet_registration_time: datetime = fleet.registrationTime
def rejoined_fleet(self, member: FleetMember) -> bool:
'''Check if this member rejoined Fleet'''
return self.members[member.character_id()].join_datetime() < member.join_datetime()
def changed_ship_type(self, member: FleetMember) -> bool:
'''Check if this member changed his hull type'''
return self.members[member.character_id()].ship_type_id() != member.ship_type_id()
def get_last_time_tracked(self, member: FleetMember) -> datetime:
if member.character_id() in self.last_time_tracked:
return self.last_time_tracked[member.character_id()]
return self.fleet_registration_time
def __load_last_time_tracked(self) -> None:
member_ids = list(self.members.keys())
data_query = db.session.query(FleetTimeLastTracked)\
.filter(FleetTimeLastTracked.characterID.in_(member_ids))
for track_info in data_query:
self.last_time_tracked[track_info.characterID] = track_info.lastTimeTracked
def update_last_time_tracked(self, member: FleetMember, time: datetime) -> None:
if member.character_id() in self.last_time_tracked:
db.session.update(FleetTimeLastTracked)\
.where(FleetTimeLastTracked.characterID == member.character_id())\
.values(lastTimeTracked=time)
else:
ftlt: FleetTimeLastTracked = FleetTimeLastTracked(
characterID=member.character_id(),
lastTimeTracked=time)
db.session.add(ftlt)
self.last_time_tracked[member.character_id()] = time
class FleetTimeTracker:
def __init__(self):
self.cache: Dict[int, TimeTrackerCache] = {}
self.timer = None
self.stopped = True
self.state_lock = Lock()
def check_fleets(self):
try:
if self.stopped:
logger.info('Not running because tracker is stopped')
return
logger.info('check_fleets executing')
fleet_ids: KeysView = member_info.get_fleet_ids()
# lets check if any fleets are gone that we still have data of
for fleet_id in self.cache.keys():
logger.debug('Checking fleet_id=%s if it still exists', fleet_id)
if fleet_id not in fleet_ids:
logger.info('Fleet with id=%s is not in cache anymore, removing', fleet_id)
# the fleet disappeared register remaining mebers time
self.register_fleet_time(fleet_id, self.cache[fleet_id])
del self.cache[fleet_id]
for fleet_id in self.cache.keys():
logger.debug('Checking members in fleet with id=%s', fleet_id)
if fleet_id in fleet_ids:
# these ones we need to check for missing members, because they left the fleet
# we also need to check all none missing members if their fleet join time maybe changed because if it did, it means they left and rejoined between the last check and now
fleet: CrestFleet = db.session.query(CrestFleet).get(fleet_id)
fleet_new_data: Dict[int, FleetMember] = member_info.get_fleet_members(fleet_id, fleet.comp)
fleet_expires = member_info.get_expires(fleet_id)
tt_data: TimeTrackerCache = self.cache[fleet_id]
# if we get stale data, because e.g. we have no valid api key
# just skip this fleet
if fleet_expires == tt_data.expires:
logger.debug('Skipping fleet with id=%s because cache data is stale', fleet_id)
continue
# find members not in new data (they left the fleet)
# or members that have newer join time (they rejoined)
# or members that have have a different hull (they switched ship)
for member_id in tt_data.members.keys():
logger.debug('Checking memeber with character_id=%s', member_id)
if member_id in fleet_new_data:
logger.debug('Member is still in fleet')
# now check his join time, if it changed he rejoined
# and we need to add his time from before
# so that time does not disappear
if tt_data.rejoined_fleet(fleet_new_data[member_id]):
logger.debug('Member rejoined fleet since last check')
self.register_member_time(
fleet_id,
tt_data.members[member_id],
tt_data.expires,
tt_data)
# we must only track if he did not rejoin
elif tt_data.changed_ship_type(fleet_new_data[member_id]):
logger.debug('Member %s changed hull', tt_data.members[member_id].character_id())
self.register_member_time(
fleet_id,
tt_data.members[member_id],
tt_data.expires,
tt_data)
else: # he left fleet
logger.debug('Member left fleet')
self.register_member_time(fleet_id,
tt_data.members[member_id],
tt_data.expires,
tt_data)
# we don't need to care about new members,
# because we handle all members when they leave,
# because only then we know the duration they stayed for
# now we can replace the data
self.cache[fleet_id].members = fleet_new_data.copy()
self.cache[fleet_id].expires = fleet_expires
# add new fleets to cache
for fleet_id in fleet_ids:
if fleet_id not in self.cache:
logger.info('Adding new fleet with fleet_id=%s to cache', fleet_id)
fleet: CrestFleet = db.session.query(CrestFleet).get(fleet_id)
member_data = member_info.get_fleet_members(fleet_id, fleet.comp)
expires_data = member_info.get_expires(fleet_id)
self.cache[fleet_id] = TimeTrackerCache(member_data, expires_data, fleet)
db.session.commit()
db.session.remove()
if not self.stopped:
logger.info('Registering new timer')
self.timer = Timer(300, self.check_fleets)
self.timer.start()
else:
logger.info('Not setting up new timer, because Tracker is stopped')
except Exception as e:
logger.exception('Failed')
def fleet_removed(self, fleet_id: int,
registration_time: datetime) -> None:
# add all the time of remaining member, then delete from cache
logger.info('Fleet id=%s was removed', fleet_id)
fleet = self.cache.get(fleet_id)
if fleet is None:
logger.info('Fleet id=%s was attempted to be removed but it wasn\'t found in the cache', fleet_id)
return
self.register_fleet_time(fleet_id, fleet)
del self.cache[fleet_id]
def start_tracking(self) -> None:
self.state_lock.acquire(True)
logger.info('Starting time tracking')
if not self.stopped:
self.state_lock.release()
logger.info('Time tracking was already running')
return
self.stopped = False
self.cache = {}
# just set the first soon
self.timer = Timer(1, self.check_fleets)
self.timer.start()
self.state_lock.release()
def stop_tracking(self) -> None:
self.state_lock.acquire(True)
logger.info('Stopping time tracking')
if self.stopped:
logger.info('Time tracking was already stopped')
self.state_lock.release()
return
self.stopped = True
if self.timer is not None:
self.timer.cancel()
else:
logger.info('Timer was not set, can not cancel')
self.state_lock.release()
def register_fleet_time(self, fleet_id: int, fleet_cache: TimeTrackerCache) -> None:
for member_id in fleet_cache.members:
self.register_member_time(fleet_id, fleet_cache.members[member_id],
fleet_cache.expires, fleet_cache)
def register_member_time(self, fleet_id: int, member: FleetMember,
until: datetime, cache: TimeTrackerCache):
join_datetime = member.join_datetime()
if join_datetime.tzinfo is not None:
join_datetime = join_datetime.replace(tzinfo=None) - join_datetime.utcoffset()
duration_in_fleet: timedelta = until - max(join_datetime, cache.get_last_time_tracked(member))
if logger.isEnabledFor(logging.DEBUG):
character: Character = db.session.query(Character).get(member.character_id())
logger.debug('Registering %s seconds for member with name=%s character_id=%s hull=%s',
duration_in_fleet.total_seconds(),
character.get_eve_name(),
member.character_id(),
member.ship_type_id())
if db.session.query(FleetTime).filter_by(characterID=member.character_id()).count() <= 0:
# we need to create entries
ft = FleetTime(characterID=member.character_id(),
duration=duration_in_fleet.total_seconds())
db.session.add(ft)
else:
db.session.query(FleetTime).\
filter_by(characterID=member.character_id()).\
update({'duration': FleetTime.duration + duration_in_fleet.total_seconds()})
if db.session.query(FleetTimeByHull).filter_by(characterID=member.character_id(), hullType=member.ship_type_id()).count() <= 0:
ftbh = FleetTimeByHull(
characterID=member.character_id(),
hullType=member.ship_type_id(),
duration=duration_in_fleet.total_seconds()
)
db.session.add(ftbh)
else:
db.session.query(FleetTimeByHull)\
.filter_by(characterID=member.character_id(), hullType=member.ship_type_id())\
.update({'duration': FleetTimeByHull.duration + duration_in_fleet.total_seconds()})
cache.update_last_time_tracked(member, until)
|
notes = range(25)
notes_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
modes = range(7)
modes_names = ["Ionian", "Dorian", "Phrygian", "Lydian", "Mixolydian", "Aeolian", "Locrian"]
scales = [0, 2, 4, 5, 7, 9, 11, 12, 14, 16, 17, 19, 21, 23, 24]
chords = {
"maj" : [0, 4, 7],
"min" : [0, 3, 7],
"dim" : [0, 3, 6],
"maj7" : [0, 4, 7, 11],
"min7" : [0, 3, 7, 10],
"dom7" : [0, 4, 7, 10],
"dim7" : [0, 3, 6, 10]
}
current_scale = []
def give_scale(tonic, mode):
del current_scale[0:len(current_scale)]
for x in range(8):
current_scale.append(tonic + scales[x + mode] - scales[mode])
return
def find_chords(note):
print("Chords for " + notes_names[note % 12] + ":")
for chord_name in chords:
included = True
for x in chords[chord_name]:
if (note + x) not in current_scale and (note + x) % 12 not in current_scale:
included = False
if included:
print chord_name
def main():
give_scale(0, 0)
return
main()
|
__author__ = 'shannonjaeger'
from Exceptions import *
import csv
class Member(object):
def __init__(self, imis, first_name=None, last_name=None, active=False, dates_selected=[]):
try:
assert(isinstance(imis, int))
self.imis = imis
except:
self.imis = int(imis)
try:
assert(isinstance(active, bool))
self.active = active
except:
self.active = bool(active)
if isinstance(dates_selected, list):
self.dates_selected = dates_selected
elif isinstance(dates_selected, str):
self.dates_selected = dates_selected.split(':')
self.first_name = str(first_name)
self.last_name = str(last_name)
self.dates_selected = dates_selected
def as_list(self):
active = '1' if self.active else '0'
return [ str(self.imis), self.last_name, self.first_name, active, self.dates_selected]
def getKey(self):
return str(self.active)+':'+self.last_name+':'+self.first_name
def __cmp__(self, other):
if hasattr(other, 'getKey'):
return self.getKey().__cmp__(other.getKey())
def __eq__(self, other):
return self.imis == other.imis
def __ne__(self, other):
return self.last_name.lower() != other.last_name.lower() \
or self.first_name.lower() != other.first_name.lower()
def __lt__(self, other):
if self.active != other.active and self.active:
return False
elif self.active != other.active:
return True
elif self.last_name.lower() != other.last_name.lower():
return self.last_name.lower() < other.last_name.lower()
else:
return self.first_name.lower() < other.first_name.lower()
def __gt__(self, other):
if self.active != other.active and not self.active:
return False
elif self.active != other.active:
return True
elif self.last_name.lower() != other.last_name.lower():
return self.last_name.lower() > other.last_name.lower()
else:
return self.first_name.lower() > other.first_name.lower()
def __le__(self, other):
if self.active != other.active and self.active:
return False
elif self.active != other.active:
return True
elif self.last_name.lower() != other.last_name.lower():
return self.last_name.lower() <= other.last_name.lower()
else:
return self.first_name.lower() <= other.first_name.lower()
def __ge__(self, other):
if self.active != other.active and not self.active:
return False
elif self.active != other.active:
return True
elif self.last_name.lower() != other.last_name.lower():
return self.last_name.lower() >= other.last_name.lower()
else:
return self.first_name.lower() >= other.first_name.lower()
def __str__(self):
return '{0} {1: >8}: {2: >20} {3: >15} - {4}'.format(str(self.active),
str(self.imis),
self.last_name,
self.first_name,
self.dates_selected)
class ImisFile():
"""
read, write and merge csv files containing iMIS information.
Attributes:
"""
def __init__(self, file_path=None):
self.imis_header = 'iMIS'
self.last_name_header = 'Last Name'
self.first_name_header = 'First Name'
self.active_header = 'Active'
self.dates_selected_header = 'Dates Selected'
self.active_member_list = []
self.inactive_member_list = []
self.num_active_selected = 0
self.num_inactive_selected = 0
self.file_path = None
if file_path is not None:
self.set_file_path(file_path)
self.read()
def set_file_path(self, file_path):
"""
Set the file path to be read/written from
:param file_path: A fully specified file path on the file system to an iMIS data file.
:return: None
"""
# Do our variable sanity checks
assert(file_path is not None)
self.file_path = file_path
def get_file_path(self):
"""
Return the file path being used.
:return: The fully specified path where data is written/read from.
"""
return self.file_path
def _parse_headings(self, headings):
"""
Find which column the various potential headers are in the given set of
headings. Note that only the iMIS number is the only column that must be
there.
:param headings: the headings as a list
:return heading_columns:
"""
import re
heading_columns = {'imis': -1,
'last_name': -1,
'first_name': -1,
'active': -1,
'dates_selected': -1
}
for i in range(0,len(headings)):
headings[i] = headings[i].lower();
headings[i] = re.sub('\s+', ' ', headings[i] ).strip()
headings[i] = headings[i].replace('\r\n', '')
headings[i] = headings[i].replace('\n', '')
for key in heading_columns.keys():
search_item = key.lower().replace('_', ' ')
heading_columns[key] = headings.index(search_item) if search_item in headings else -1
if heading_columns['imis'] == -1:
raise InvalidImisFile('File "{0}" does not have an iMIS number column.'.format(str(self.file_path)))
return heading_columns
def read(self):
"""
Read CSV file containing iMIS numbers with or without names, and with or without
The data columns are expected to be in the following order:
iMIS Number
Last Name
First Name
Membership Activity Status
Selection Dates
If the file is successfully read a list of active and inactive members is
created.
:return None:
"""
if self.file_path is None:
raise NoImisFile("An iMIS file path has not been provided.")
column_locations = {}
with open(self.file_path, 'rU') as fp:
reader = csv.reader(fp, delimiter=",", quoting=csv.QUOTE_NONE)
for line in reader:
if len(column_locations) < 1:
column_locations = self._parse_headings(line)
continue
if len(line) < 1: continue # Empty line
if not line[column_locations['imis']].isdigit():
# No iMIS number on line so skip it
# TODO verify this is not an error
continue
# If we've made it here we have a new member!
new_member = Member(imis=line[column_locations['imis']])
new_member.first_name = line[column_locations['first_name']] \
if column_locations['first_name'] != -1 else ''
new_member.last_name = line[column_locations['last_name']] \
if column_locations['last_name'] != -1 else ''
new_member.active = bool(line[column_locations['active']]) \
if column_locations['active'] != -1 else True
new_member.dates_selected = line[column_locations['dates_selected']] \
if column_locations['dates_selected'] != -1 else ''
# Now lets add this member to the active or inactive member list
# TODO if a duplicate is found make sure to not lose any data
if new_member.active and new_member not in self.active_member_list:
self.active_member_list.append(new_member)
elif not new_member.active and new_member not in self.inactive_member_list:
self.inactive_member_list.append(new_member)
def _get_default_header_(self, ):
"""
The default headers ...
iMIS, Last Name, First Name, Active, Dates Selected
:return: the header strings in a list
"""
return [self.imis_header, self.last_name_header, self.first_name_header,
self.active_header, self.dates_selected_header]
def write(self, file_path=None):
"""
Write the inactive and active member lists to a file.
:param file_path: The path to the file where the data is to be written.
:return: None
"""
if file_path is None and self.file_path is None:
raise NoImisFile('A file path for the iMIS data must be specified before the data can be written.')
if file_path is None:
file_path = self.file_path
full_list = self.active_member_list + self.inactive_member_list
#sorted_list = sorted(full_list)
with open(file_path, 'w') as fp:
csv_writer = csv.writer( fp, delimiter=",", quoting=csv.QUOTE_NONE)
csv_writer.writerow(self._get_default_header_())
for member in full_list:
csv_writer.writerow(member.as_list())
def merge(self, new_file_obj):
"""
Merge this iMIS file object with a new file. It is assumed that the new
file contains a complete list of the current active members. It is the
Provincial Council Membership list of current members only.
After the merge the internal data structures will contain the "merged"
data, each iMIS number will appear on either the active or inactive list
exactly once.
:param new_file (str/ImisFile): If it is a string then it's assumed to be a
fully specified file path, if it is an ImisFile object who's file_path has
been set.
:return: True if the merge was successful, False otherwise
"""
if self.file_path is None:
raise NoImisFile('Must set ')
new_file = ImisFile()
if isinstance(new_file_obj, str):
# new_file is a file_path
new_file.set_file_path(new_file_obj)
elif new_file_obj.__class__ == ImisFile:
# nothing to do
pass
else:
raise ValueError('file_path must be a string or ImisFile type.')
# If we haven't read in the files then read them in
if len(self.inactive_member_list) == 0 and len(self.active_member_list) == 0:
self.read()
if len(new_file.inactive_member_list) == 0 and len(new_file.active_member_list) == 0:
new_file.read()
# Mark all of the old (self) active members as inactive
for member in self.active_member_list:
member.active = False
self.inactive_member_list.append(member)
self.active_member_list=[]
# Go through the active member list from the new_file. If a member
# is found in the inactive list then move this member to the active
# member list. Note that any dates that the members iMIS number was
# selected will be in the old data
new_file.active_member_list.sort()
for new_member in new_file.active_member_list:
try:
pos = self.inactive_member_list.index(new_member)
old_member = self.inactive_member_list.pop(pos)
except ValueError:
# The member isn't in the list
old_member = None
# Update the old member to active and verify the name
old_member.active = True
if len(new_member.last_name) > 0 \
and new_member.last_name != old_member.last_name:
old_member.last_name = new_member.last_name
if len(new_member.first_name) > 0 \
and new_member.first_name != old_member.first_name:
old_member.first_name = new_member.first_name
self.active_member_list.append(old_member)
self.inactive_member_list = self.inactive_member_list + new_file.inactive_member_list
self.inactive_member_list.sort()
|
p = (4, 5)
x, y = p
print x
print y
data = ['ACME', 50, 91.1, (2016, 05, 16)]
name, shares, price, date = data
print name
print shares
print price
print date
name, shares, price, (year, month, day) = data
print name
print shares
print price
print year
print month
print day
s = 'hello'
a, b, c, d, e = s
print a, b, c, d, e
_, shares, price, _ = data
print shares
print price
|
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import numpy as np
from numpy import linalg as la
import pandas as pd
from scipy import stats
from scipy.special import logsumexp
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import KFold
import math
from scipy.stats import multivariate_normal as mvnorm
from matplotlib import pyplot as plt
import sys
# In[25]:
def find_gmm(x_data, num_gmm):
gmm_info = []
#find mean and covariance
mean = np.mean(x_data, axis = 0)
cov = np.cov(x_data.T)
gau_mean = np.random.multivariate_normal(mean, cov, num_gmm)
for i in gau_mean:
gmm_gau_mean = i
gmm_gau = [i, cov]
gmm_info.append(gmm_gau)
return gmm_info
# In[26]:
#find sigma
def find_sigma(data, phi_list, uk, nk):
sum = np.repeat(float(0), 10*10).reshape((10,10))
for r in range(data.shape[0]):
multiply = ((data[r, :] - uk).reshape(data[r, :].shape[0],1)).dot((data[r, :]-uk).reshape(1, data[r, :].shape[0]))
multiply = phi_list[r]*multiply
sum += multiply
sigmak = (1 / nk)*sum
return sigmak
# In[27]:
def get_phi(data,gmm,mix_weight,k):
prob = mvnorm.pdf(data, gmm[k][0], gmm[k][1],allow_singular = True)
# print(prob)
numerator = mix_weight[k] * prob
norm_list = []
for j in gmm:
norm_list.append(mvnorm.pdf(data, j[0], j[1],allow_singular = True))
norm_list = np.array(norm_list)
mix_weight = np.array(mix_weight)
denominator = mix_weight.dot(norm_list.T)
phi_k = numerator / denominator
return phi_k
# In[28]:
def get_multip(data,num_gmm,mix_weight,gmm):
multipl = 0
for k in range(num_gmm):
multipl += mix_weight[k] * mvnorm.pdf(data, gmm[k][0], gmm[k][1])
return np.log(multipl)
# In[29]:
def get_multip_pred(data, num_gmm, mix_weight, gmm):
multipl = 0
for k in range(num_gmm):
multipl += mix_weight[k]*mvnorm.pdf(data,gmm[k][0], gmm[k][1])
return multipl
# In[30]:
def find_TP(y_true, y_pred):
return sum((y_true == 1)& (y_pred == 1))
def find_TN(y_true, y_pred):
return sum((y_true == 0) & (y_pred == 0))
def find_FP(y_true, y_pred):
return sum((y_true == 0) & (y_pred == 1))
def find_FN(y_true, y_pred):
return sum((y_true == 1) & (y_pred == 0))
# In[8]:
num_gmm = 3
x_array = np.genfromtxt('hw3-data/Prob3_Xtrain.csv',delimiter = ',')
y_array = np.genfromtxt('hw3-data/Prob3_ytrain.csv',delimiter = ',')
index1 = np.argwhere(y_array == 1)
index0 = np.argwhere(y_array == 0)
index1 = index1.flatten()
x1_data = x_array[index1]
index0 = index0.flatten()
x0_data = x_array[index0]
# In[9]:
iteration = 30
run = 10
def cal_objective_function(data, iteration, gmm, num_gmm, mix_weight):
total_L = []
for iter in range(iteration):
phi_list = []
for i in range(num_gmm):
phi_list.append(np.apply_along_axis(get_phi, 1, data, gmm, mix_weight, i))
for k in range(num_gmm):
nk = phi_list[k].sum()
mix_weight[k] = nk / len(phi_list[k])
gmm[k][0] = (1/nk)*np.sum(((phi_list[k]*data.T).T), axis = 0)
gmm[k][1] = find_sigma(data, phi_list[k], gmm[k][0], nk)
prob_list = np.apply_along_axis(get_multip, 1,data,num_gmm, mix_weight, gmm)
L = np.sum(prob_list)
total_L.append(L)
return total_L
# In[10]:
max0 = -sys.float_info.max
max1 = -sys.float_info.max
final_weight0 = []
final_weight1 = []
final_gmm0 = []
final_gmm1 = []
for times in range(run):
print('run', times)
gmm0 = find_gmm(x0_data, num_gmm)
mix_weight0 = [1 / num_gmm for i in range (num_gmm)]
plt.figure(1)
Loss0 = cal_objective_function(x0_data, iteration, gmm0, num_gmm, mix_weight0)
if Loss0[-1] > max0:
max0 = Loss0[-1]
final_gmm0 = gmm0
final_weight0 = mix_weight0
plt.plot(range(5, 31), Loss0[4:30], label = 'run %d'%(times))
plt.xticks([int(x) for x in np.linspace(5, 30)])
plt.xlabel('Iterations')
plt.ylabel('Log marginal objective function')
plt.title ('Class 0 data log marginal objective function for 10 runs')
plt.legend()
plt.show()
# In[11]:
for times in range(run):
print("run", times)
gmm1 = find_gmm(x1_data, num_gmm)
mix_weight1 = [1 / num_gmm for i in range(num_gmm)]
plt.figure(2)
Loss1 = cal_objective_function(x1_data, iteration, gmm1, num_gmm, mix_weight1)
if Loss1[-1] > max1:
max1 = Loss1[-1]
final_gmm1 = gmm1
final_weight1 = mix_weight1
plt.plot(range(5, 31), Loss1[4:30], label = 'run %d' %(times))
plt.xticks([int(x) for x in np.linspace(5,30)])
plt.xlabel('Iterations')
plt.ylabel('Log marginal objective function')
plt.title('Class 1 data log marginal objective function for 10 runs')
plt.legend()
plt.show()
# In[12]:
#part b: Using the best run for each class after 30 iterations, predict the testing data using a Bayes classifier
#and show the result in a 2 2 confusion matrix, along with the accuracy percentage. Repeat
#this process for a 1-, 2-, 3- and 4-Gaussian mixture model. Show all results nearby each other,
#and don’t repeat Part (a) for these other cases
print(len(index0))
print(len(index1))
prior0 = len(index0) / x_array.shape[0]
prior1 = len(index1) / x_array.shape[0]
print(prior0)
print(prior1)
x_test = np.genfromtxt('hw3-data/Prob3_Xtest.csv',delimiter = ',')
y_test = np.genfromtxt('hw3-data/Prob3_ytest.csv',delimiter = ',')
likelihood0 = prior0 * np.apply_along_axis(get_multip_pred,1,x_test,num_gmm,final_weight0,final_gmm0)
likelihood1 = prior1 * np.apply_along_axis(get_multip_pred,1,x_test,num_gmm,final_weight1,final_gmm1)
p = np.greater(likelihood1, likelihood0)
y_pred = p.astype(int)
# print(y_pred)
TP = find_TP(y_test, y_pred)
FP = find_FP(y_test, y_pred)
TN = find_TN(y_test, y_pred)
FN = find_FN(y_test, y_pred)
print("TP",TP)
print("FP",FP)
print("TN",TN)
print("FN",FN)
acc = (TP + TN) / x_test.shape[0]
print("Accuracy", acc)
# In[9]:
num_gmm = 1
x_array = np.genfromtxt('hw3-data/Prob3_Xtrain.csv',delimiter = ',')
y_array = np.genfromtxt('hw3-data/Prob3_ytrain.csv',delimiter = ',')
index1 = np.argwhere(y_array == 1)
index0 = np.argwhere(y_array == 0)
index1 = index1.flatten()
x1_data = x_array[index1]
index0 = index0.flatten()
x0_data = x_array[index0]
# In[10]:
iteration = 30
run = 10
def cal_objective_function(data, iteration, gmm, num_gmm, mix_weight):
total_L = []
for iter in range(iteration):
phi_list = []
for i in range(num_gmm):
phi_list.append(np.apply_along_axis(get_phi, 1, data, gmm, mix_weight, i))
for k in range(num_gmm):
nk = phi_list[k].sum()
mix_weight[k] = nk / len(phi_list[k])
gmm[k][0] = (1/nk)*np.sum(((phi_list[k]*data.T).T), axis = 0)
gmm[k][1] = find_sigma(data, phi_list[k], gmm[k][0], nk)
prob_list = np.apply_along_axis(get_multip, 1,data,num_gmm, mix_weight, gmm)
L = np.sum(prob_list)
total_L.append(L)
return total_L
# In[11]:
max0 = -sys.float_info.max
max1 = -sys.float_info.max
final_weight0 = []
final_weight1 = []
final_gmm0 = []
final_gmm1 = []
for times in range(run):
print('run', times)
gmm0 = find_gmm(x0_data, num_gmm)
mix_weight0 = [1 / num_gmm for i in range (num_gmm)]
plt.figure(1)
Loss0 = cal_objective_function(x0_data, iteration, gmm0, num_gmm, mix_weight0)
if Loss0[-1] > max0:
max0 = Loss0[-1]
final_gmm0 = gmm0
final_weight0 = mix_weight0
plt.plot(range(5, 31), Loss0[4:30], label = 'run %d'%(times))
plt.xticks([int(x) for x in np.linspace(5, 30)])
plt.xlabel('Iterations')
plt.ylabel('Log marginal objective function')
plt.title ('Class 0 data log marginal objective function for 10 runs')
plt.legend()
#plt.show()
# In[12]:
for times in range(run):
print("run", times)
gmm1 = find_gmm(x1_data, num_gmm)
mix_weight1 = [1 / num_gmm for i in range(num_gmm)]
plt.figure(2)
Loss1 = cal_objective_function(x1_data, iteration, gmm1, num_gmm, mix_weight1)
if Loss1[-1] > max1:
max1 = Loss1[-1]
final_gmm1 = gmm1
final_weight1 = mix_weight1
plt.plot(range(5, 31), Loss1[4:30], label = 'run %d' %(times))
plt.xticks([int(x) for x in np.linspace(5,30)])
plt.xlabel('Iterations')
plt.ylabel('Log marginal objective function')
plt.title('Class 1 data log marginal objective function for 10 runs')
plt.legend()
# plt.show()
# In[13]:
print(len(index0))
print(len(index1))
prior0 = len(index0) / x_array.shape[0]
prior1 = len(index1) / x_array.shape[0]
print(prior0)
print(prior1)
x_test = np.genfromtxt('hw3-data/Prob3_Xtest.csv',delimiter = ',')
y_test = np.genfromtxt('hw3-data/Prob3_ytest.csv',delimiter = ',')
likelihood0 = prior0 * np.apply_along_axis(get_multip_pred,1,x_test,num_gmm,final_weight0,final_gmm0)
likelihood1 = prior1 * np.apply_along_axis(get_multip_pred,1,x_test,num_gmm,final_weight1,final_gmm1)
p = np.greater(likelihood1, likelihood0)
y_pred = p.astype(int)
# print(y_pred)
TP = find_TP(y_test, y_pred)
FP = find_FP(y_test, y_pred)
TN = find_TN(y_test, y_pred)
FN = find_FN(y_test, y_pred)
print("TP",TP)
print("FP",FP)
print("TN",TN)
print("FN",FN)
acc = (TP + TN) / x_test.shape[0]
print("Accuracy", acc)
# In[14]:
num_gmm = 2
x_array = np.genfromtxt('hw3-data/Prob3_Xtrain.csv',delimiter = ',')
y_array = np.genfromtxt('hw3-data/Prob3_ytrain.csv',delimiter = ',')
index1 = np.argwhere(y_array == 1)
index0 = np.argwhere(y_array == 0)
index1 = index1.flatten()
x1_data = x_array[index1]
index0 = index0.flatten()
x0_data = x_array[index0]
# In[15]:
iteration = 30
run = 10
def cal_objective_function(data, iteration, gmm, num_gmm, mix_weight):
total_L = []
for iter in range(iteration):
phi_list = []
for i in range(num_gmm):
phi_list.append(np.apply_along_axis(get_phi, 1, data, gmm, mix_weight, i))
for k in range(num_gmm):
nk = phi_list[k].sum()
mix_weight[k] = nk / len(phi_list[k])
gmm[k][0] = (1/nk)*np.sum(((phi_list[k]*data.T).T), axis = 0)
gmm[k][1] = find_sigma(data, phi_list[k], gmm[k][0], nk)
prob_list = np.apply_along_axis(get_multip, 1,data,num_gmm, mix_weight, gmm)
L = np.sum(prob_list)
total_L.append(L)
return total_L
# In[16]:
max0 = -sys.float_info.max
max1 = -sys.float_info.max
final_weight0 = []
final_weight1 = []
final_gmm0 = []
final_gmm1 = []
for times in range(run):
print('run', times)
gmm0 = find_gmm(x0_data, num_gmm)
mix_weight0 = [1 / num_gmm for i in range (num_gmm)]
plt.figure(1)
Loss0 = cal_objective_function(x0_data, iteration, gmm0, num_gmm, mix_weight0)
if Loss0[-1] > max0:
max0 = Loss0[-1]
final_gmm0 = gmm0
final_weight0 = mix_weight0
plt.plot(range(5, 31), Loss0[4:30], label = 'run %d'%(times))
plt.xticks([int(x) for x in np.linspace(5, 30)])
plt.xlabel('Iterations')
plt.ylabel('Log marginal objective function')
plt.title ('Class 0 data log marginal objective function for 10 runs')
plt.legend()
#plt.show()
# In[17]:
for times in range(run):
print("run", times)
gmm1 = find_gmm(x1_data, num_gmm)
mix_weight1 = [1 / num_gmm for i in range(num_gmm)]
plt.figure(2)
Loss1 = cal_objective_function(x1_data, iteration, gmm1, num_gmm, mix_weight1)
if Loss1[-1] > max1:
max1 = Loss1[-1]
final_gmm1 = gmm1
final_weight1 = mix_weight1
plt.plot(range(5, 31), Loss1[4:30], label = 'run %d' %(times))
plt.xticks([int(x) for x in np.linspace(5,30)])
plt.xlabel('Iterations')
plt.ylabel('Log marginal objective function')
plt.title('Class 1 data log marginal objective function for 10 runs')
plt.legend()
#plt.show()
# In[18]:
print(len(index0))
print(len(index1))
prior0 = len(index0) / x_array.shape[0]
prior1 = len(index1) / x_array.shape[0]
print(prior0)
print(prior1)
x_test = np.genfromtxt('hw3-data/Prob3_Xtest.csv',delimiter = ',')
y_test = np.genfromtxt('hw3-data/Prob3_ytest.csv',delimiter = ',')
likelihood0 = prior0 * np.apply_along_axis(get_multip_pred,1,x_test,num_gmm,final_weight0,final_gmm0)
likelihood1 = prior1 * np.apply_along_axis(get_multip_pred,1,x_test,num_gmm,final_weight1,final_gmm1)
p = np.greater(likelihood1, likelihood0)
y_pred = p.astype(int)
# print(y_pred)
TP = find_TP(y_test, y_pred)
FP = find_FP(y_test, y_pred)
TN = find_TN(y_test, y_pred)
FN = find_FN(y_test, y_pred)
print("TP",TP)
print("FP",FP)
print("TN",TN)
print("FN",FN)
acc = (TP + TN) / x_test.shape[0]
print("Accuracy", acc)
# In[31]:
num_gmm = 4
x_array = np.genfromtxt('hw3-data/Prob3_Xtrain.csv',delimiter = ',')
y_array = np.genfromtxt('hw3-data/Prob3_ytrain.csv',delimiter = ',')
index1 = np.argwhere(y_array == 1)
index0 = np.argwhere(y_array == 0)
index1 = index1.flatten()
x1_data = x_array[index1]
index0 = index0.flatten()
x0_data = x_array[index0]
# In[32]:
iteration = 30
run = 10
def cal_objective_function(data, iteration, gmm, num_gmm, mix_weight):
total_L = []
for iter in range(iteration):
phi_list = []
for i in range(num_gmm):
phi_list.append(np.apply_along_axis(get_phi, 1, data, gmm, mix_weight, i))
for k in range(num_gmm):
nk = phi_list[k].sum()
mix_weight[k] = nk / len(phi_list[k])
gmm[k][0] = (1/nk)*np.sum(((phi_list[k]*data.T).T), axis = 0)
gmm[k][1] = find_sigma(data, phi_list[k], gmm[k][0], nk)
prob_list = np.apply_along_axis(get_multip, 1,data,num_gmm, mix_weight, gmm)
L = np.sum(prob_list)
total_L.append(L)
return total_L
# In[33]:
max0 = -sys.float_info.max
max1 = -sys.float_info.max
final_weight0 = []
final_weight1 = []
final_gmm0 = []
final_gmm1 = []
for times in range(run):
print('run', times)
gmm0 = find_gmm(x0_data, num_gmm)
mix_weight0 = [1 / num_gmm for i in range (num_gmm)]
plt.figure(1)
Loss0 = cal_objective_function(x0_data, iteration, gmm0, num_gmm, mix_weight0)
if Loss0[-1] > max0:
max0 = Loss0[-1]
final_gmm0 = gmm0
final_weight0 = mix_weight0
plt.plot(range(5, 31), Loss0[4:30], label = 'run %d'%(times))
plt.xticks([int(x) for x in np.linspace(5, 30)])
plt.xlabel('Iterations')
plt.ylabel('Log marginal objective function')
plt.title ('Class 0 data log marginal objective function for 10 runs')
plt.legend()
#plt.show()
# In[34]:
for times in range(run):
print("run", times)
gmm1 = find_gmm(x1_data, num_gmm)
mix_weight1 = [1 / num_gmm for i in range(num_gmm)]
plt.figure(2)
Loss1 = cal_objective_function(x1_data, iteration, gmm1, num_gmm, mix_weight1)
if Loss1[-1] > max1:
max1 = Loss1[-1]
final_gmm1 = gmm1
final_weight1 = mix_weight1
plt.plot(range(5, 31), Loss1[4:30], label = 'run %d' %(times))
plt.xticks([int(x) for x in np.linspace(5,30)])
plt.xlabel('Iterations')
plt.ylabel('Log marginal objective function')
plt.title('Class 1 data log marginal objective function for 10 runs')
plt.legend()
#plt.show()
# In[35]:
print(len(index0))
print(len(index1))
prior0 = len(index0) / x_array.shape[0]
prior1 = len(index1) / x_array.shape[0]
print(prior0)
print(prior1)
x_test = np.genfromtxt('hw3-data/Prob3_Xtest.csv',delimiter = ',')
y_test = np.genfromtxt('hw3-data/Prob3_ytest.csv',delimiter = ',')
likelihood0 = prior0 * np.apply_along_axis(get_multip_pred,1,x_test,num_gmm,final_weight0,final_gmm0)
likelihood1 = prior1 * np.apply_along_axis(get_multip_pred,1,x_test,num_gmm,final_weight1,final_gmm1)
p = np.greater(likelihood1, likelihood0)
y_pred = p.astype(int)
# print(y_pred)
TP = find_TP(y_test, y_pred)
FP = find_FP(y_test, y_pred)
TN = find_TN(y_test, y_pred)
FN = find_FN(y_test, y_pred)
print("TP",TP)
print("FP",FP)
print("TN",TN)
print("FN",FN)
acc = (TP + TN) / x_test.shape[0]
#print("Accuracy", acc)
# In[ ]:
|
from models import RequestItem
from django.http import HttpResponse
class StoreRequestMiddleware(object):
""" This middleware is saving every request object to database """
current_request = None
def process_request(self, request):
self.current_request = RequestItem.objects.create(
method=request.method,
path=request.path,
host=request.get_host(),
get_content=request.GET,
post_content=request.POST,
session=request.session,
meta_info=request.META)
def process_response(self, request, response):
current_request = self.current_request
if current_request and isinstance(response, HttpResponse):
current_request.response = response.status_code
current_request.save()
return response
|
# -*- coding: utf-8 -*-
#num=int(input())
#if (num % 4 ==0) and not(num % 100 == 0):
# if (num % 4 == 0) or (num % 400 == 0):
# print(num,"is a leap year.")
#else:
# print(num,"is not a leap year.")
#year=int(input())
#if year % 4 == 0:
# if year % 400 ==0:
# print(year,"is a leap year.")
#elif year % 400 ==0:
#
year=int(input())
if (year % 400==0)or (year % 4 ==0 and year % 100 != 0):
print(year,"is a leap year.")
else:
print(year,"is not a leap year.") |
# 排序函数
# 重要的是reverse属性
a = [1, 99, 6, 8, 44, -58, -11, -33]
print(sorted(a, key=abs))
print(sorted(a, reverse=True))
b = ['Lisa', 'Bob', 'Adam', 'Bart']
print(sorted(b))
# 全部忽略首字母大小写
print(sorted(b, key=str.lower))
x = (1,)
print(list(x)[0])
|
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
class MixturePrior(object):
def __init__(self, pi, sigma1, sigma2):
self.mu, self.pi, self.sigma1, self.sigma2 = (np.float32(v) for v in (0.0, pi, sigma1, sigma2))
self.dist = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
probs=[1-self.pi, self.pi]),
components_distribution=tfd.Normal(
loc=[0, 0],
scale=[self.sigma1, self.sigma2]))
def sample(self):
return self.dist.sample()
def log_prob(self, x):
x = tf.cast(x, tf.float32)
return self.dist.log_prob(x)
|
while True:
try: d = int(input())
except: break
print(sum([(i*d)**2*d for i in range(600//d)])) |
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
from tkinter import *
import os
import shutil
from tkinter import messagebox
root=Tk()
def compare_file(file1,file2):
f1 = open(file1,'r')
f2 = open(file2,'r')
count = 0
diff = []
for line1 in f1:
line2 = f2.readline()
count +=1
if line1 != line2:
diff.append(count)
f1.close()
f2.close()
return diff
def allrenew():
c2=listfile.get(0,listfile.size())
for i in c2:
text=str(i).split(",")
print(str(i))
if len(text)!=3:
messagebox.showerror(title="error",message="格式错误")
filepath1=text[1].strip()
filepath2=text[2].strip()
if os.path.isfile(filepath1) and os.path.isfile(filepath2):
differ=compare_file(filepath1,filepath2)
if differ==0:
pass
else:
time1=os.path.getmtime(filepath1)
time2=os.path.getmtime(filepath2)
if time1>time2:
shutil.copyfile(filepath1,filepath2)
else:
shutil.copyfile(filepath2,filepath1)
def renew(i):
text = str(i).split(",")
print(str(i))
if len(text) != 3:
messagebox.showerror(title="error", message="格式错误")
filepath1 = text[1].strip()
filepath2 = text[2].strip()
if os.path.isfile(filepath1) and os.path.isfile(filepath2):
differ = compare_file(filepath1, filepath2)
if differ == 0:
pass
else:
time1 = os.path.getmtime(filepath1)
time2 = os.path.getmtime(filepath2)
if time1 > time2:
shutil.copyfile(filepath1, filepath2)
else:
shutil.copyfile(filepath2, filepath1)
def input():
str=var.get().strip()
listfile.insert(0,str)
file = open("log.txt", mode='w')
file.writelines(str)
def close():
file = open("log.txt", mode='w')
file.truncate()
for t in listfile.get(0,listfile.size()):
file.writelines(str(t).strip()+"\n")
root.destroy()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
root.geometry("717x426")
root.protocol("WM_DELETE_WINDOW",close)
listfile=Listbox(root,width=100,height=15)
file=open("log.txt",mode='r')
for t in file:
listfile.insert(0,t)
file.close()
var = StringVar()
var.set("")
entry = Entry(root, textvariable=var,width=100)
entry.pack()
A=Button(root,
text="添加",
command=input)
B=Button(root,
text="删除",
command=lambda x=listfile:x.delete(ACTIVE) )
C=Button(root,
text="更新所有",
command=allrenew)
D=Button(root,text="更新",command=lambda :renew(listfile.get(listfile.curselection())))
sb=Scrollbar(root)
sb.pack(side=RIGHT,fill=Y)
sb.config(command=listfile.yview)
listfile.pack()
A.pack()
B.pack()
C.pack()
D.pack()
mainloop()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
|
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.select import Select
import time
driver=webdriver.Chrome()
driver.get("file:///C:/Users/%E5%9C%9F%E8%B1%86/Desktop/%E6%96%B0%E5%BB%BA%E6%96%87%E6%9C%AC%E6%96%87%E6%A1%A3%20(8).html")
'''mouse=driver.find_element_by_link_text("设置")
ActionChains(driver).move_to_element(mouse).perform()
driver.find_element_by_link_text("搜索设置").click()
driver.implicitly_wait(4)
#s=driver.find_element_by_xpath("//*[id='nr']/option[1]").click()
m=driver.find_element_by_xpath("//*[@id='nr']")
#添加悬停,否则定位不到
#ActionChains(driver).move_to_element(mouse).perform()
Select(m).select_by_index(1)'''
m=driver.find_elements_by_xpath("//*[@type='checkbox']")
for i in m:
i.click()
driver.implicitly_wait(2)
r=driver.find_element_by_id('c1').is_selected()
print(r)
#m=driver.switch_to_alert()
#m.accept()
ActionChains(driver).click_and_hold("target").perform()
ActionChains(driver).move_by_offset(2,0)
|
#!/usr/bin/env python
"""
This returns whether or not a PDB file contains all of the require
backbone heavy atoms: ' CA ', ' CB ', ' C ', ' N ', ' O '
"""
import sys
res_types = (['ALA',
'ARG',
'ASN',
'ASP',
'CYS',
'GLU',
'GLN',
'GLY',
'HIS',
'ILE',
'LEU',
'LYS',
'MET',
'PHE',
'PRO',
'SER',
'THR',
'TRP',
'TYR',
'VAL'])
atoms_found = {' CA ':False,
' CB ':False,
' C ':False,
' N ':False,
' O ':False}
def main():
for line in sys.stdin:
if (line[0:5] == 'ATOM '):
atom_type=line[12:16]
res_type=line[17:20]
#print('atom_type=\"'+atom_type+'\", res_type=\"'+res_type+'\"')
if (res_type in res_types):
atoms_found[atom_type] = True
search_criteria_satisfied = True
for atom_type in atoms_found:
if (not atoms_found[atom_type]):
search_criteria_satisfied = False
if (search_criteria_satisfied):
exit(0) # normal termination indicates all atoms were found
else:
exit(1) # non-zero (abnormal termination) exit code indicates this PDB
# file does not contain very many heavy atoms in amino acids
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import urllib2, os, subprocess, shutil, time, re
from sys import argv, exit
from distutils.version import LooseVersion
script, log_file = argv
class install(object):
def __init__(self):
self.app_name = "Dropbox"
self.the_app = self.app_name + ".app"
self.info_file = "/Applications/%s/Contents/Info.plist" % self.the_app
self.latest_vers = "2.6.7"
self.url = "https://www.dropbox.com/download?plat=mac&full=1"
self.file_name = "/tmp/dropbox.dmg"
self.dmg_path = "/tmp/dropbox.dmg"
self.mnt_cmd = ['/usr/bin/hdiutil', 'attach', '-nobrowse', self.dmg_path]
self.src = "/Volumes/Dropbox Installer/Dropbox.app"
self.dst = "/Applications/Dropbox.app"
self.mnt_path = "/Volumes/Dropbox Installer/"
self.unmnt_cmd = ['/usr/bin/hdiutil', 'detach', self.mnt_path]
def check_inst(self):
return os.path.isdir(self.dst)
def check_version(self):
a = open(self.info_file, "r")
lines = a.readlines()
r = re.compile(r'CFBundleVersion')
for i in range(len(lines)):
if r.search(lines[i]):
c = lines[max(0, i+1)]
d = c.split('<string>')
e = ''.join(d[1])
f = e.split('</string>')
g = f[0]
return LooseVersion(g) >= LooseVersion(self.latest_vers), g
def download(self):
f = urllib2.urlopen(self.url) # open the url
local_file = open(self.file_name, "w") # open the file to write to
local_file.write(f.read()) # write the download to the local file
local_file.close() # close the file
a = os.path.isfile(self.file_name)
if a == True:
return "%s was downloaded.\n" % self.app_name
elif a == False:
return "Something went wrong and %s wasn't downloaded, exiting." % self.app_name
exit(1)
else:
return "error, exiting."
exit(1)
def mount(self):
subprocess.Popen(self.mnt_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return self.check()
def check(self):
a = False
b = 0
while (a == False) and (b < 60):
time.sleep(1)
a = os.path.isdir(self.mnt_path)
print "..."
b += 1
if a == False:
return "Something went wrong and %s still hasn't mounted." % self.app_name
exit(1)
elif a == True:
return "%s was mounted.\n" % self.app_name
else:
return "error, exiting."
exit(1)
def copy(self):
shutil.copytree(self.src, self.dst)
a = os.path.isdir(self.dst)
if a == True:
return "%s was installed.\n" % self.app_name
elif a == False:
return "%s wasn't installed, exiting." % self.app_name
exit(1)
else:
return "error, exiting."
exit(1)
def cleanup(self):
subprocess.Popen(self.unmnt_cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
a = os.path.isdir(self.mnt_path)
b = 0
while (a == True) and (b < 30):
time.sleep(1)
a = os.path.isdir(self.mnt_path)
print "..."
b += 1
if a == False:
z = "Unmounted %s.\n" % self.app_name
elif a == True:
return "Couldn't unmount %s for some reason, exiting." % self.app_name
exit(1)
else:
return "error, exiting."
exit(1)
os.unlink(self.dmg_path)
aa = os.path.isfile(self.dmg_path)
bb = 0
while (aa == True) and (bb < 30):
time.sleep(1)
aa = os.path.isdir(self.mnt_path)
print "..."
bb += 1
if bb == False:
y = "[Sucess], %s has been installed.\n" % self.app_name
elif bb == True:
return "[Error], couldn't delete the %s DMG, exiting." % self.app_name
exit(1)
else:
return "error, exiting."
exit(1)
return z, y
def the_log(self, log_file, g):
a = open(log_file, "a")
for i in g:
a.write(i)
a.close()
a = install()
cc = check_inst()
if cc == True:
aa, bb = a.check_version()
# fix this so that it checks if the program is there
# if the program is there check the version and if it needs updated
# if it's not there install it
if aa == True:
g = "[Sucess], up-to-date\n%s\n" % bb
a.the_log(log_file, g)
elif aa == False:
b = a.download()
c = a.mount()
d = a.copy()
e, f = a.cleanup()
g = [b, c, d, e, f]
a.the_log(log_file, g)
else:
a.the_log(log_file, "error!")
# test
|
names = ["andy", "sue", "pete"]
for name in names:
name.capitalize()
names = [name.capitalize() for name in ("andy", "sue", "pete")]
assert names == ["Andy", "Sue", "Pete"]
some_names = [name for name in names if name[0] != "S"]
assert some_names == ["Andy", "Pete"]
some_names.append(sum([num for num in range(0, 20, 4)]))
assert some_names == ["Andy", "Pete", 40]
names_with_id = {key + 1: value for key, value in enumerate(["Andy", "Sue", "Pete"])}
assert names_with_id == {1: "Andy", 2: "Sue", 3: "Pete"}
numbers = list(range(10)) + list(range(20))
assert numbers == [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
]
two_multiples = {number for number in numbers if number % 2 == 0}
assert two_multiples == {0, 2, 4, 6, 8, 10, 12, 14, 16, 18}
|
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
from bs4 import BeautifulSoup
soup = BeautifulSoup(html_doc, 'html.parser')
title_tag = soup.title
# 通过 .parent 属性来获取某个元素的父节点.
# <head>标签是<title>标签的父节点:
print(title_tag.parent)
print(title_tag.string.parent)
# 文档的顶层节点比如<html>的父节点是 BeautifulSoup 对象:
html_tag = soup.html
print(type(html_tag.parent))
# 通过元素的 .parents 属性可以递归得到元素的所有父辈节点,下面的例子使用了 .parents 方法遍历了<a>标签到根节点的所有节点
link = soup.a
for parent in link.parents:
if parent is None:
print(parent)
else:
print(parent.name)
|
#!/usr/bin/env python3
# Created by: Liam Csiffary
# Created on: May 21, 2021
# This program calculates the factorial of the users number
# main function
def main():
# vars
user_num = input("what is the number: ")
# make sure the users num can be an integer
try:
user_num = int(user_num)
if (user_num >= 0):
if (user_num != 0):
# resetting vars
factorial_num = 1
counter = 0
while True:
counter = counter + 1
factorial_num = factorial_num * counter
if (counter >= user_num):
print("{:,} factorial is {:,}".format
(user_num, factorial_num))
break
else:
print("0 factorial is 1")
else:
print("{} is not positive".format(user_num))
except ValueError:
print("Not valid input")
if __name__ == "__main__":
main()
|
def get_middle(s):
q, r = divmod(len(s), 2)
return s[q - (1 if not r else 0):q + 1]
|
from gym.envs.registration import register
# Env registration
# ==========================
register(
'ObjectDetection-v0',
entry_point='rl_od.envs.rl_od:rl_od'
)
|
def main():
pyksi = input ("Pelaaja 1, syötä valintasi (K/P/S): ")
pkaksi = input("Pelaaja 2, syötä valintasi (K/P/S): ")
if pyksi == "K" and pkaksi == "K":
print ("Tuli tasapeli.")
elif pyksi == "P" and pkaksi == "P":
print ("Tuli tasapeli.")
elif pyksi == "S" and pkaksi == "S":
print("Tuli tasapeli.")
elif pyksi == "K" and pkaksi == "P":
print("Pelaaja 2 voitti!")
elif pyksi == "P" and pkaksi == "S":
print("Pelaaja 2 voitti!")
elif pyksi == "S" and pkaksi == "K":
print("Pelaaja 2 voitti!")
else:
print("Pelaaja 1 voitti!")
main()
|
"""Base for all node resource services.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import collections
import errno
import glob
import io
import logging
import os
import socket
import struct
import tempfile
import time
import six
from treadmill import dirwatch
from treadmill import exc
from treadmill import fs
from treadmill import logcontext as lc
from treadmill import plugin_manager
from treadmill import utils
from treadmill import watchdog
from treadmill import yamlwrapper as yaml
_LOGGER = logging.getLogger(__name__)
#: Name of the directory holding the resources requests
RSRC_DIR = 'resources'
#: Name of request payload file
REQ_FILE = 'request.yml'
#: Name of reply payload file
REP_FILE = 'reply.yml'
#: Default Resource Service timeout
DEFAULT_TIMEOUT = 15 * 60
def wait_for_file(filename, timeout=None):
"""Wait at least ``timeout`` seconds for a file to appear or be modified.
:param ``int`` timeout:
Minimum amount of seconds to wait for the file.
:returns ``bool``:
``True`` if there was an event, ``False`` otherwise (timeout).
"""
if timeout is None:
timeout = DEFAULT_TIMEOUT
elif timeout == 0:
return os.path.exists(filename)
filedir = os.path.dirname(filename)
# TODO: Fine tune the watcher mask for efficiency.
watcher = dirwatch.DirWatcher(filedir)
now = time.time()
end_time = now + timeout
while not os.path.exists(filename):
if watcher.wait_for_events(timeout=max(0, end_time - now)):
watcher.process_events()
now = time.time()
if now > end_time:
return False
return True
class ResourceServiceError(exc.TreadmillError):
"""Base Resource Service error.
"""
__slots__ = ()
class ResourceServiceRequestError(ResourceServiceError):
"""Resource Service Request error.
"""
__slots__ = (
'request',
)
def __init__(self, message, request):
super(ResourceServiceRequestError, self).__init__(message)
self.request = request
class ResourceServiceTimeoutError(ResourceServiceError, socket.timeout):
"""Resource Service timeout.
"""
__slots__ = ()
class ResourceServiceClient(object):
"""Client class for all Treadmill services.
/apps/<container>/rsrc/req-<svc_name>/
request.yml
reply.yml
svc_req_id
"""
_REQ_UID_FILE = 'svc_req_id'
__slots__ = (
'_serviceinst',
'_clientdir',
)
def __init__(self, serviceinst, clientdir):
self._serviceinst = serviceinst
fs.mkdir_safe(clientdir)
self._clientdir = os.path.realpath(clientdir)
def put(self, rsrc_id, rsrc_data):
"""Request creation/update of a resource.
:param `str` rsrc_id:
Unique identifier for the requested resource.
:param `str` rsrc_data:
(New) Parameters for the requested resource.
"""
req_dir = self._req_dirname(rsrc_id)
fs.mkdir_safe(req_dir)
with io.open(os.path.join(req_dir, REQ_FILE), 'w') as f:
if os.name == 'posix':
os.fchmod(f.fileno(), 0o644)
yaml.dump(rsrc_data,
explicit_start=True, explicit_end=True,
default_flow_style=False,
stream=f)
req_uuid_file = os.path.join(req_dir, self._REQ_UID_FILE)
try:
with io.open(req_uuid_file) as f:
svc_req_uuid = f.read().strip()
except IOError as err:
if err.errno == errno.ENOENT:
svc_req_uuid = None
else:
raise
with lc.LogContext(_LOGGER, rsrc_id):
if svc_req_uuid is None:
try:
# New request
svc_req_uuid = self._serviceinst.clt_new_request(rsrc_id,
req_dir)
# Write down the UUID
with io.open(req_uuid_file, 'w') as f:
f.write(svc_req_uuid)
os.fchmod(f.fileno(), 0o644)
except OSError:
# Error registration failed, delete the request.
_LOGGER.exception('Unable to submit request')
fs.rmtree_safe(req_dir)
else:
self._serviceinst.clt_update_request(svc_req_uuid)
def delete(self, rsrc_id):
"""Delete an existing resource.
:param `str` rsrc_id:
Unique identifier for the requested resource.
"""
with lc.LogContext(_LOGGER, rsrc_id,
adapter_cls=lc.ContainerAdapter) as log:
req_dir = self._req_dirname(rsrc_id)
try:
with io.open(os.path.join(req_dir, self._REQ_UID_FILE)) as f:
svc_req_uuid = f.read().strip()
except IOError as err:
if err.errno == errno.ENOENT:
log.warning('Resource %r does not exist', rsrc_id)
return
raise
self._serviceinst.clt_del_request(svc_req_uuid)
os.rename(
req_dir,
self._bck_dirname(svc_req_uuid)
)
def get(self, rsrc_id):
"""Get the result of a resource request.
:param `str` rsrc_id:
Unique identifier for the requested resource.
:raises ``ResourceServiceRequestError``:
If the request resulted in error.
"""
try:
res = self.wait(rsrc_id, timeout=0)
except ResourceServiceTimeoutError:
res = None
return res
def wait(self, rsrc_id, timeout=None):
"""Wait for a requested resource to be ready.
:param `str` rsrc_id:
Unique identifier for the requested resource.
:raises ``ResourceServiceRequestError``:
If the request resulted in error.
:raises ``ResourceServiceTimeoutError``:
If the request was not available before timeout.
"""
req_dir = self._req_dirname(rsrc_id)
rep_file = os.path.join(req_dir, REP_FILE)
if not wait_for_file(rep_file, timeout):
raise ResourceServiceTimeoutError(
'Resource %r not available in time' % rsrc_id
)
try:
with io.open(rep_file) as f:
reply = yaml.load(stream=f)
except (IOError, OSError) as err:
if err.errno == errno.ENOENT:
raise ResourceServiceTimeoutError(
'Resource %r not available in time' % rsrc_id
)
if isinstance(reply, dict) and '_error' in reply:
raise ResourceServiceRequestError(reply['_error']['why'],
reply['_error']['input'])
return reply
def status(self, timeout=30):
"""Query the status of the resource service.
"""
return self._serviceinst.status(timeout=timeout)
def _req_dirname(self, rsrc_id):
"""Request directory name for a given resource id.
:param `str` rsrc_id:
Unique identifier for the requested resource.
"""
req_dir_name = 'req-{name}-{rsrc_id}'.format(
name=self._serviceinst.name,
rsrc_id=rsrc_id
)
req_dir = os.path.join(self._clientdir, req_dir_name)
return req_dir
def _bck_dirname(self, req_uuid):
"""Return a unique backup directory name.
"""
bck_dir_name = 'bck{ts}-{name}-{req_uuid}'.format(
name=self._serviceinst.name,
req_uuid=req_uuid,
ts=int(time.time()),
)
bck_dir = os.path.join(self._clientdir, bck_dir_name)
return bck_dir
@six.add_metaclass(abc.ABCMeta)
class ResourceService(object):
"""Server class for all Treadmill services.
/service_dir/resources/<containerid>-<uid>/ ->
/apps/<containerid>/rsrc/req-<svc_name>/
/apps/<container>/rsrc/<svc_name>/
request.yml
reply.yml
svc_req_id
"""
__slots__ = (
'_is_dead',
'_dir',
'_rsrc_dir',
'_service_impl',
'_service_class',
'_service_name',
)
_IO_EVENT_PENDING = struct.pack('@Q', 1)
def __init__(self, service_dir, impl):
fs.mkdir_safe(service_dir)
self._dir = os.path.realpath(service_dir)
self._rsrc_dir = os.path.join(self._dir, RSRC_DIR)
fs.mkdir_safe(self._rsrc_dir)
self._is_dead = False
self._service_impl = impl
self._service_class = None
# Figure out the service's name
if isinstance(self._service_impl, six.string_types):
svc_name = self._service_impl.rsplit('.', 1)[-1]
else:
svc_name = self._service_impl.__name__
self._service_name = svc_name
@property
def name(self):
"""Name of the service."""
return self._service_name
def make_client(self, client_dir):
"""Create a client using `clientdir` as request dir location.
"""
return ResourceServiceClient(self, client_dir)
@abc.abstractmethod
def status(self, timeout=30):
"""Query the status of the resource service.
:param ``float`` timeout:
Wait at least timeout seconds for the service to reply.
:raises ``ResourceServiceTimeoutError``:
If the requested service does not come up before timeout.
:raises ``socket.error``:
If there is a communication error with the service.
"""
pass
def get(self, req_id):
"""Read the reply of a given request.
"""
rep_file = os.path.join(self._rsrc_dir, req_id, REP_FILE)
with io.open(rep_file) as f:
reply = yaml.load(stream=f)
if isinstance(reply, dict) and '_error' in reply:
raise ResourceServiceRequestError(reply['_error']['why'],
reply['_error']['input'])
return reply
@abc.abstractmethod
def _run(self, impl, watchdog_lease):
"""Implementation specifc run.
"""
def run(self, watchdogs_dir, *impl_args, **impl_kwargs):
"""Run the service.
The run procedure will first initialize the service's implementation,
the setup the service's watchdog, and start the service resource
resynchronization procedure.
This procedure is in 4 phases to handle both fresh starts and restarts.
$ Call the implementation's :function:`initialize` function which
allows the implementation to query and import the backend resource's
state.
$ Setup the service request watcher.
$ Import all existing requests (passing them to the
:function:`on_created` implementation's handler.
$ Call the implementation's :function:`synchronize` function which
expunges anything allocated against the backend resource that doesn't
have a matching request anymore.
The implementation is expected to implement two handlers:
* :function:`on_created` that handles new resource requests or update
to existing resource request (implementation is expected to be
idem-potent.
* :function:`on_deleted` that handlers delation of resource requests.
It should properly handle the case where the backend resource is
already gone.
:param ``str`` watchdogs_dir:
Path to the watchdogs directory.
:param ``tuple`` impl_args:
Arguments passed to the implementation's constructor.
:param ``dict`` impl_kwargs:
Keywords arguments passed to the implementation's constructor.
"""
# Load the implementation
if self._service_class is None:
self._service_class = self._load_impl()
impl = self._service_class(*impl_args, **impl_kwargs)
# Setup the watchdog
watchdogs = watchdog.Watchdog(os.path.realpath(watchdogs_dir))
watchdog_lease = watchdogs.create(
name='svc-{svc_name}'.format(svc_name=self.name),
timeout='{hb:d}s'.format(hb=impl.WATCHDOG_HEARTBEAT_SEC),
content='Service %r failed' % self.name
)
self._run(impl, watchdog_lease)
_LOGGER.info('Shuting down %r service', self.name)
# Remove the service heartbeat
watchdog_lease.remove()
def _load_impl(self):
"""Load the implementation class of the service.
"""
if isinstance(self._service_impl, six.string_types):
impl_class = plugin_manager.load('treadmill.services',
self._service_impl)
else:
impl_class = self._service_impl
assert issubclass(impl_class, BaseResourceServiceImpl), \
'Invalid implementation %r' % impl_class
return impl_class
def clt_new_request(self, req_id, req_data_dir):
"""Add a request data dir as `req_id` to the service.
This should only be called by the client instance.
"""
svc_req_lnk = os.path.join(self._rsrc_dir, req_id)
_LOGGER.info('Registering %r: %r -> %r',
req_id, svc_req_lnk, req_data_dir)
# NOTE(boysson): We use a temporary file + rename behavior to override
# any potential old symlinks.
tmpsymlink = tempfile.mktemp(dir=self._rsrc_dir,
prefix='.tmp' + req_id)
os.symlink(req_data_dir, tmpsymlink)
os.rename(tmpsymlink, svc_req_lnk)
return req_id
def clt_del_request(self, req_id):
"""Remove an existing request.
This should only be called by the client instance.
"""
svc_req_lnk = os.path.join(self._rsrc_dir, req_id)
_LOGGER.info('Unegistering %r: %r', req_id, svc_req_lnk)
fs.rm_safe(svc_req_lnk)
return req_id
@abc.abstractmethod
def clt_update_request(self, req_id):
"""Update an existing request.
This should only be called by the client instance.
"""
pass
def _check_requests(self):
"""Check each existing request and remove stale ones.
"""
svcs = collections.deque()
for svc in glob.glob(os.path.join(self._rsrc_dir, '*')):
try:
os.stat(svc)
svcs.append(svc)
except OSError as err:
if err.errno == errno.ENOENT:
_LOGGER.warning('Deleting stale request: %r', svc)
fs.rm_safe(svc)
else:
raise
return svcs
def _on_created(self, impl, filepath):
"""Private handler for request creation events.
"""
# Avoid triggering on changes to the service directory itself.
if filepath == self._rsrc_dir:
return
req_id = os.path.basename(filepath)
# Avoid triggerring on temporary files
if req_id[0] == '.':
return
req_file = os.path.join(filepath, REQ_FILE)
rep_file = os.path.join(filepath, REP_FILE)
try:
with io.open(req_file) as f:
req_data = yaml.load(stream=f)
except IOError as err:
if (err.errno == errno.ENOENT or
err.errno == errno.ENOTDIR):
_LOGGER.exception('Removing invalid request: %r', req_id)
try:
fs.rm_safe(filepath)
except OSError as rm_err:
if rm_err.errno == errno.EISDIR:
fs.rmtree_safe(filepath)
else:
raise
return
raise
# TODO: We should also validate the req_id format
with lc.LogContext(_LOGGER, req_id,
adapter_cls=lc.ContainerAdapter) as log:
log.debug('created %r: %r', req_id, req_data)
try:
# TODO: We should also validate the req_id format
utils.validate(req_data, impl.PAYLOAD_SCHEMA)
res = impl.on_create_request(req_id, req_data)
except exc.InvalidInputError as err:
log.error('Invalid request data: %r: %s', req_data, err)
res = {'_error': {'input': req_data, 'why': str(err)}}
except Exception as err: # pylint: disable=W0703
log.exception('Unable to process request: %r %r:',
req_id, req_data)
res = {'_error': {'input': req_data, 'why': str(err)}}
if res is None:
# Request was not actioned
return False
fs.write_safe(
rep_file,
lambda f: yaml.dump(
res, explicit_start=True, explicit_end=True,
default_flow_style=False, stream=f
),
mode='w',
permission=0o644
)
# Return True if there were no error
return not bool(res.get('_error', False))
def _on_deleted(self, impl, filepath):
"""Private handler for request deletion events.
"""
req_id = os.path.basename(filepath)
# Avoid triggerring on temporary files
if req_id[0] == '.':
return
# TODO: We should also validate the req_id format
with lc.LogContext(_LOGGER, req_id,
adapter_cls=lc.ContainerAdapter) as log:
log.debug('deleted %r', req_id)
res = impl.on_delete_request(req_id)
return res
@six.add_metaclass(abc.ABCMeta)
class BaseResourceServiceImpl(object):
"""Base interface of Resource Service implementations.
"""
__slots__ = (
'_service_dir',
'_service_rsrc_dir',
)
MAX_REQUEST_PER_CYCLE = 5
PAYLOAD_SCHEMA = ()
WATCHDOG_HEARTBEAT_SEC = 60
def __init__(self):
self._service_dir = None
self._service_rsrc_dir = None
@abc.abstractmethod
def initialize(self, service_dir):
"""Service initialization."""
self._service_dir = service_dir
self._service_rsrc_dir = os.path.join(service_dir, RSRC_DIR)
@abc.abstractmethod
def synchronize(self):
"""Assert that the internal state of the service matches the backend
state.
"""
return
@abc.abstractmethod
def on_create_request(self, rsrc_id, rsrc_data):
"""Call back invoked when a new resource request is received.
Args:
rsrc_id ``str``: Unique resource identifier
rsrc_data ``dict``: Resource request metadata
Returns:
``dict``: Result communicated back to the requestor, ``None``,
``False`` or ``{}`` if no changes to the service were made.
"""
pass
@abc.abstractmethod
def on_delete_request(self, rsrc_id):
"""Call back invoked when a resource is deleted.
Arguments::
rsrc_id ``str``: Unique resource identifier
"""
pass
@abc.abstractmethod
def retry_request(self, rsrc_id):
"""Force re-evaluation of a request.
Arguments::
rsrc_id ``str``: Unique resource identifier
"""
pass
|
import turtle
screen = turtle.Screen()
image = "rock.gif"
screen.register_shape(image)
turtle.shape(image)
screen.bgcolor("lightblue")
move_speed = 10
turn_speed = 10
def forward():
turtle.forward(move_speed)
def backward():
turtle.backward(move_speed)
def left():
turtle.left(turn_speed)
def right():
turtle.right(turn_speed)
turtle.penup()
turtle.speed(0)
turtle.home()
screen.onkey(forward, "Up")
screen.onkey(backward, "Down")
screen.onkey(left, "Left")
screen.onkey(right, "Right")
screen.listen()
turtle.mainLoop()
|
from charm.schemes.grpsig.groupsig_bgls04 import ShortSig as BGLS04
from charm.schemes.grpsig.groupsig_bgls04_var import ShortSig as BGLS04_Var
from charm.toolbox.pairinggroup import PairingGroup
import unittest
debug = False
class BGLS04Test(unittest.TestCase):
def testBGLS04(self):
groupObj = PairingGroup('MNT224')
n = 3 # how manu users in the group
user = 1 # which user's key to sign a message with
sigTest = BGLS04(groupObj)
(gpk, gmsk, gsk) = sigTest.keygen(n)
message = 'Hello World this is a message!'
if debug: print("\n\nSign the following M: '%s'" % (message))
signature = sigTest.sign(gpk, gsk[user], message)
result = sigTest.verify(gpk, message, signature)
#if result:
# print("Verify signers identity...")
# index = sigTest.open(gpk, gmsk, message, signature)
# i = 0
# while i < n:
# if gsk[i][0] == index:
# print('Found index of signer: %d' % i)
# print('A = %s' % index)
# i += 1
assert result, "Signature Failed"
if debug: print('Complete!')
class BGLS04_VarTest(unittest.TestCase):
def testBGLS04_Var(self):
groupObj = PairingGroup('MNT224')
n = 3 # how manu users in the group
user = 1 # which user's key to sign a message with
sigTest = BGLS04_Var(groupObj)
(gpk, gmsk, gsk) = sigTest.keygen(n)
message = 'Hello World this is a message!'
if debug: print("\n\nSign the following M: '%s'" % (message))
signature = sigTest.sign(gpk, gsk[user], message)
result = sigTest.verify(gpk, message, signature)
#if result:
# print("Verify signers identity...")
# index = sigTest.open(gpk, gmsk, message, signature)
# i = 0
# while i < n:
# if gsk[i][0] == index:
# print('Found index of signer: %d' % i)
# print('A = %s' % index)
# i += 1
assert result, "Signature Failed"
if debug: print('Successful Verification!')
if __name__ == "__main__":
unittest.main()
|
import cv2
import numpy as np
class VideoHelper(object):
"""
This class will help us to duel with operations related with videos
such as open/close, read/write video and also we can use this helper
to get attributes of the video
"""
def __init__(self, config):
# video in
self.video_in = cv2.VideoCapture()
self.video_in.open(config.VID_NAME)
# video attributes
self.frame_width = int(self.video_in.get(cv2.CAP_PROP_FRAME_WIDTH)) # 3: cv2.CAP_PROP_FRAME_WIDTH
self.frame_height = int(self.video_in.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 4: cv2.CAP_PROP_FRAME_HEIGHT
self.frame_fps = int(self.video_in.get(cv2.CAP_PROP_FPS)) # 5: cv2.CAP_PROP_FPS
# video output
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
#fourcc = cv2.VideoWriter_fourcc(*'MJPG')
self.video_out = cv2.VideoWriter(config.VID_SAVING_NAME, fourcc, self.frame_fps, (self.frame_width, self.frame_height))
self.video_blob_out = cv2.VideoWriter(config.VID_SAVING_BLOB_NAME, fourcc, self.frame_fps, (config.BACK_RESIZE_WIDTH, config.BACK_RESIZE_HEIGHT))
# cut the video at this num
self.finish_frame_num = config.FINISH_CUT_FRAME # in case the video is too long
def not_finished(self, cur_frame):
"""
This function is used to check whether we finish reading video file or not
Args:
param1: cur_frame: # of current frame
Return:
bool type to check whether we can stop reading the video file or not
"""
if self.video_in.isOpened():
if self.finish_frame_num == 0:
return True
if cur_frame < self.finish_frame_num:
return True
else:
return False
else:
print("Video is NOT opened!")
return False
def get_frame(self):
"""
This function is used to get current frame from the video file
Return:
frame: raw frame
frame_show: use this to show the result
"""
ret, frame = self.video_in.read()
# frame is none
if ret != True:
print("That's all!")
exit()
frame_show = frame
return frame, frame_show
def write_video(self, img):
self.video_out.write(img)
def end(self):
self.video_in.release()
self.video_out.release()
self.video_blob_out.release()
|
import cv2
from streamlit_webrtc import VideoTransformerBase, webrtc_streamer
webrtc_streamer(key="example")
|
"""
TensorFlow integration.
Importing this module registers the TensorFlow backend with `phiml.math`.
Without this, TensorFlow tensors cannot be handled by `phiml.math` functions.
To make TensorFlow the default backend, import `phi.tf.flow`.
"""
from phiml.backend.tensorflow import TENSORFLOW
__all__ = [key for key in globals().keys() if not key.startswith('_')]
|
import time
import math
import datetime
from influxdb import InfluxDBClient
from threading import Event, Thread, Timer
from multiprocessing import Queue
import serial
import socket
import sys
from dbSetting import *
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind the socket to the port
server_address = ('localhost', 10000)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
ser = serial.Serial('/dev/ttyAMA1',57600,timeout=1)
serialLock = False
dataQueue = Queue(300)
class uploadDataThread (Thread):
def __init__(self, ifuser, ifpass, ifdb, ifhost, queue):
Thread.__init__(self)
self.ifuser = ifuser
self.ifpass = ifpass
self.ifdb = ifdb
self.ifhost = ifhost
self.ifport = ifport
self.queue = queue
def run(self):
print("[Upload Thread] Starting")
self.ifclient = InfluxDBClient(ifhost,ifport,ifuser,ifpass,ifdb,timeout=2,retries=3)
while 1:
val = self.queue.get()
try:
self.ifclient.write_points(val)
except Exception as e:
print(e)
uploadDataThread(ifuser, ifpass, ifdb, ifhost, dataQueue).start()
def call_repeatedly(interval, func, *args):
stopped = Event()
print("[call_repeatedly] Starting")
def loop():
while not stopped.wait(interval - time.time() % interval): # the first call is in `interval` secs
func(*args)
Thread(target=loop).start()
return stopped.set
def readData():
global serialLock
while serialLock:
time.sleep(0.01)
try:
time.sleep(0.5)
serialLock = True
ser.flushInput()
ser.write(b'^')
line = ser.readline()
serialLock = False
line = line.decode("utf-8").strip('\r\n')
print(line)
data = line.split(',')
body = [
{
"measurement": "MAC",
"time": datetime.datetime.utcnow(),
"fields": {
"BITE": int(data[0]),
"Version": data[1],
"SerialNumber": data[2],
"TEC Control": int(data[3])/1000,
"RF Control": int(data[4])/10,
"DDS Frequency Center Current": int(data[5])/100,
"CellHeaterCurrent": int(data[6]),
"DCSignal": int(data[7]),
"Temperature": int(data[8])/1000,
"Digital Tuning": int(data[9]),
"Analog Tuning On/Off": int(data[10]),
"Analog Tuning": int(data[11])
}
}
]
print(body)
dataQueue.put(body)
except Exception as e:
print(e)
pass
cancel_future_calls = call_repeatedly(1, readData)
try:
ser.flushInput()
while 1:
try:
print('\nwaiting to receive message')
data, address = sock.recvfrom(4096)
print('<===received {} bytes from {}'.format(len(data), address))
print(data)
while serialLock:
time.sleep(0.01)
serialLock = True
ser.write(data)
line = ser.readline()
print("==>Send:%s"%line)
sent = sock.sendto(line, address)
serialLock = False
pass
except Exception as e:
print(e)
serialLock = False
pass
except KeyboardInterrupt:
sys.exit()
|
# -*- coding: utf-8 -*-
# One Statement per Line
from datetime import datetime
with open('test.txt', 'w') as f:
f.write('Today is ')
f.write(datetime.now().strftime('%Y-%m-%d'))
with open('test.txt', 'r') as f:
s = f.read()
print('open for read...')
print(s)
with open('test.txt', 'rb') as f:
s = f.read()
print('open as binary for read...')
print(s)
# A single line should not exceed the number of 79 characters
with open('test1.txt') as file_1, \
open('test2.txt', 'w') as file_2:
file_2.write(file_1.read())
|
from bottle import request
from .app import app
from .app.auth import sign_in
@app.post("/api/signin")
def post_sign_in(db):
email = request.POST.get("email")
password = request.POST.get("password")
return sign_in(db, email, password)
|
# number conversion
# zio800
# decimal to binary
def nc10_2(num10):
num2 = []
while num10 / 2 != 0:
num2.append(str(num10 % 2))
num10 = num10 // 2
num2.reverse()
ans = int(''.join(num2))
print('binary = ', ans)
# decimal to octal
def nc10_8(num10):
num8 = []
while num10 / 2 != 0:
num8.append(str(num10 % 8))
num10 = num10 // 8
num8.reverse()
ans = int(''.join(num8))
print('octal = ', ans)
# decimal to hexadecimal
def nc10_16(num10):
hexd = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F'}
num16 = []
while num10 / 16 != 0:
num16.append(str(num10 % 16))
num10 = num10 // 16
num16.reverse()
for item in hexd:
num16 = [hexd[item] if x == item else x for x in num16]
ans = ''.join(num16)
print('hexadecimal = ', ans)
# binary to decimal
def nc2_10(num2):
lenn = [item for item in str(num2)]
lenn.reverse()
num10 = []
count = 0
for i in lenn:
num10.append(int(i) * (2 ** count))
count += 1
ans = sum(num10)
print('decimal = ', ans)
# octal to decimal
def nc8_10(num8):
lenn = [item for item in str(num8)]
lenn.reverse()
num10 = []
count = 0
for i in lenn:
num10.append(int(i) * (8 ** count))
count += 1
ans = sum(num10)
print('decimal = ', ans)
# hexadecimal to decimal
def nc16_10(num16):
hexd = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F'}
lenn = [item for item in str(num16)]
lenn.reverse()
lenn2 = []
for item in hexd:
lenn2 = [item if x == hexd[item] else x for x in lenn]
num10 = []
count = 0
for i in lenn2:
num10.append(int(i) * (16 ** count))
count += 1
ans = sum(num10)
print('decimal = ', ans)
# decimal to base 2, 8, 16
def dec_convert(num, base):
hexd = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F'}
con = []
while num/base != 0:
con.append(str(num % base))
num = num // base
con.reverse()
if base == 16:
for item in hexd:
con = [hexd[item] if x == item else x for x in con]
ans = ''.join(con)
print('base', base, 'number = ', ans)
# base 2, 8, 16 to decimal
def boh_convert(num, base):
hexd = {'10': 'A', '11': 'B', '12': 'C', '13': 'D', '14': 'E', '15': 'F'}
lenn = [item for item in str(num)]
lenn.reverse()
con = []
if base == 16:
for item in hexd:
lenn = [item if x == hexd[item] else x for x in lenn]
count = 0
for i in lenn:
con.append(int(i) * (base ** count))
count += 1
ans = sum(con)
print('decimal = ', ans)
# calculate
nc10_2(13)
nc10_8(210)
nc10_16(501)
nc2_10(1101)
nc8_10(322)
nc16_10('1F5')
dec_convert(13, 2)
dec_convert(210, 8)
dec_convert(501, 16)
boh_convert(1101, 2)
boh_convert(322, 8)
boh_convert('1F5', 16)
|
from flask_restful import Resource, reqparse
from models.user import Usermodel
from flask_jwt import jwt_required
import json
import logging
log = logging.getLogger(__name__)
class User(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username', required=True, type=str, help='This field can not be Empty')
parser.add_argument('password', required=True, type=str, help='This field can not be Empty')
def post(self):
data = User.parser.parse_args()
log.info("{}".format(data))
if Usermodel.find_by_name(data['username']):
log.warning("User with name {} is already exists".format(data['username']))
return {"message":"User with name {} is already exists".format(data['username'])}
user = Usermodel(**data)
user.save()
return user.json()
class Getusers(Resource):
def get(self):
data = Usermodel.find()
log.info("{} ".format(json.dumps(data)))
return data
|
import argparse
import re
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import sys
args = dict()
data = dict()
origDir = os.getcwd()
#plt.style.use('ggplot')
## plt.style.use('grayscale')
## plt.style.use('fivethirtyeight')
#print plt.style.available
numInst = re.compile('Number of Instructions: (\d+)')
numFTL = re.compile('Number of FTL Instructions: (\d+)')
typeCheckInfo = re.compile('TypeCheck\[(.+)\]= (\d+)')
fileNameInfo = re.compile('(.+)-pin\.log')
numAccesses = re.compile('Total Accesses: (\d+)')
numHits = re.compile('Total Hits: (\d+)')
numMisses = re.compile('Total Misses: (\d+)')
hitRate = re.compile('Hit Rate: ([0-9\.]+)')
memInfo = re.compile('(.+) (READ|WRITE)')
valuesSeen = dict()
def initializeData(dictionary):
addElementToDictionary(dictionary, "ftl_count", 0)
addElementToDictionary(dictionary, "inst_count", 0)
def parseFilename(filename):
result = fileNameInfo.match(filename)
if result:
return result.group(1)
else:
print "oops"
return None
sys.exit(1)
def parseData(line, dictionary):
# this is for the reading of file info
category = 'memInfo'
result = memInfo.match(line)
if result:
# print "matched on mem info ", result.group(1)
value = int(result.group(1), 16)
# print "int value ", value
value = value & (~63)
# print "masked value ", value
valuesSeen[value] = 1
return None
category = 'accesses'
result = numAccesses.match(line)
if result:
addElementToDictionary(dictionary, category,
result.group(1))
category = 'hits'
result = numHits.match(line)
if result:
addElementToDictionary(dictionary, category,
result.group(1))
category = 'misses'
result = numMisses.match(line)
if result:
addElementToDictionary(dictionary, category,
result.group(1))
category = 'hit_rate'
result = hitRate.match(line)
if result:
addElementToDictionary(dictionary, category,
result.group(1))
def parseAndAdd(expression, line, dictionary, name):
result = expression.match(line)
if result:
addElementToDictionary(dictionary, name,
result.group(1))
def addElementToDictionary(dictionary, key, value):
dictionary[key] = value
def readCommandline():
global args
parser = argparse.ArgumentParser(prog='Plot generator')
parser.add_argument('folder', help='example')
parser.add_argument('output', help='output name')
parser.add_argument('-colors', dest='colors', help='example')
args = parser.parse_args()
def printFormattedData():
os.chdir('/home/tshull226/Documents/school/CS598DHP/project/git/benchmarks')
f = open(args.output, 'w')
columnOrder = ['accesses', 'hits', 'misses', 'hit_rate', 'memory_touched']
f.write('benchname | accesses | hits | misses | hit rate | uniquemem\n')
for filename in sorted(data.keys()):
f.write(filename)
for category in columnOrder:
f.write(' | ')
f.write(str(data[filename][category]))
f.write("\n")
def main():
global data, valuesSeen
readCommandline()
os.chdir(args.folder)
for filename in glob.glob("*.log"):
print filename
valuesSeen = dict()
name = parseFilename(filename)
addElementToDictionary(data,name,dict())
# initializeData(data[filename])
with open(filename) as f:
for line in f:
parseData(line, data[name])
numBytes = len(valuesSeen.keys()) * 64
addElementToDictionary(data[name], 'memory_touched', numBytes)
print data
printFormattedData()
if __name__ == '__main__':
main()
# print data
|
from apps.users.models import UserProfile, UserToken
from apps.jobs.models import Jobs
from apps.users import handler as user_handler
import serializers
from django.http import Http404, HttpResponse
from ipware.ip import get_real_ip, get_ip
from libs.sparrow_handler import Sparrow
from rest_framework.views import APIView
from rest_framework import status
from rest_framework import parsers
from rest_framework import renderers
from rest_framework.permissions import AllowAny
# from rest_framework.response import Response
from rest_framework.authtoken.models import Token
from phonenumber_field.phonenumber import PhoneNumber as intlphone
import logging
import simplejson as json
# Init Logger
logger = logging.getLogger(__name__)
# # Below shoudl be commented out because
# # we do not return list of all users
# class UsersList(APIView):
# def get(self, request, format=None):
# users = UserProfile.objects.all()
# serialized_users = serializers.UserSerializer(users, many=True)
# responsedata = dict(detail=serialized_users.data, success=True)
# return HttpResponse(
# json.dumps(responsedata),
# content_type="application/json",
# status=status.HTTP_200_OK)
class UsersDetail(APIView):
"""
Userdetail resource
"""
def get_object(self, pk):
try:
return UserProfile.objects.get(userref=pk)
except UserProfile.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
"""
Returns detail of a user
---
response_serializer: serializers.UserSerializer
"""
user = self.get_object(pk)
serialized_user = serializers.UserSerializer(user)
responsedata = dict(detail=serialized_user.data, success=True)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json",
status=status.HTTP_200_OK)
class UserSignup(APIView):
"""
User signup resource
"""
permission_classes = (AllowAny,)
def post(self, request, format=None):
"""
Allows a user to signup
---
request_serializer: serializers.UserSignupValidationSerializer
response_serializer: serializers.SignupResponseSerializer
"""
user = request.user
## Error if user is already authenticated
if user.is_authenticated():
responsedata = dict(success=False)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json",
status=status.HTTP_400_BAD_REQUEST)
# Creating an mutable dict from the request.DATA
# so we can add address details to it
data = request.DATA.copy()
## Creating a address object
serialized_user = serializers.UserSignupValidationSerializer(data=data)
if serialized_user.is_valid():
data['address'] = json.dumps(dict(
city=data['city'],
streetaddress=data['streetaddress']))
serialized_user = serializers.UserSignupSerializer(data=data)
if serialized_user.is_valid():
user = serialized_user.save()
# Creating a user transaction token for the user
UserToken.objects.create(user=user)
# Using User handler to send out verification
# code to the user on the phone
um = user_handler.UserManager()
msgstatus = um.sendVerfTextApp(user.id)
logging.warn(msgstatus)
logging.warn("user {0} is created from app".format(user.phone))
# Creating a auth token for the user
# and return the same as response
token = Token.objects.get(user=user)
tokendata = dict(token=token.key)
responsedata = dict(detail=tokendata, success=True)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json",
status=status.HTTP_201_CREATED)
responsedata = dict(detail=serialized_user.errors)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json",
status=status.HTTP_400_BAD_REQUEST)
class ObtainAuthToken(APIView):
"""
User login resource
"""
throttle_classes = ()
permission_classes = ()
parser_classes = (
parsers.FormParser,
parsers.MultiPartParser,
parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = serializers.AuthTokenSerializer
model = Token
def post(self, request):
"""
Allows a user to login
---
request_serializer: serializers.AuthTokenSerializer
response_serializer: serializers.SigninResponseSerializer
"""
serializer = self.serializer_class(data=request.POST)
if serializer.is_valid():
user = serializer.object['user']
token, created = Token.objects.get_or_create(user=user)
tokendata = dict(token=token.key)
responsedata = dict(detail=tokendata, success=True)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json")
responsedata = dict(detail=serializer.errors, success=False)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json",
status=status.HTTP_400_BAD_REQUEST)
obtain_auth_token = ObtainAuthToken.as_view()
class JobDetail(APIView):
"""
Information on a single job
"""
def get_object(self, pk, user):
try:
## For staffs show all
if user.user_type == 0:
return Jobs.objects.get(jobref=pk)
## For handymen, show data only if they were assigned to it
elif user.user_type == 1:
return Jobs.objects.get(jobref=pk, handyman_id=user.id)
## For Customers, show data only if they created it
elif user.user_type == 2:
return Jobs.objects.get(jobref=pk, customer_id=user.id)
else:
raise Http404
except Jobs.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
"""
Returns detail of a job
---
response_serializer: serializers.JobResponseSerializer
"""
user = request.user
job = self.get_object(pk, user)
serialized_user = serializers.JobResponseSerializer(job)
data = serialized_user.data
if job.status == '1':
data['contact_number'] = str(job.handyman.all()[0].phone.as_e164)
data['creation_date'] = str(data['creation_date'])
data['hm_image'] = str(job.handyman.all()[0].get_profile_pic())
data['completion_date'] = str(data['completion_date'])
logger.warn(data)
responsedata = dict(detail=data, success=True)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json")
class JobsDetail(APIView):
"""
Information on all the jobs
"""
def get(self, request, format=None):
user = request.user
if request.GET:
jobstatus = request.GET['jobstatus']
logging.warn(jobstatus)
logging.warn(user.user_type)
## If THM Staffs retrieve all
if user.user_type == 0:
jobs = Jobs.objects.filter(status=jobstatus)
logging.warn(jobs)
## If it's a handymen, only show requests which they were assigned to
elif user.user_type == 1:
jobs = Jobs.objects.filter(handyman_id=user.id, status=jobstatus)
## If it's a customer only show requests that they created
elif user.user_type == 2:
jobs = Jobs.objects.filter(customer_id=user.id, status=jobstatus)
else:
responsedata = dict(status=status.HTTP_400_BAD_REQUEST, success=False)
return HttpResponse(json.dumps(responsedata), content_type="application/json")
else:
if user.user_type == 0:
jobs = Jobs.objects.filter()
## If it's a handymen, only show requests which they were assigned to
elif user.user_type == 1:
jobs = Jobs.objects.filter(handyman_id=user.id)
## If it's a customer only show requests that they created
elif user.user_type == 2:
jobs = Jobs.objects.filter(customer_id=user.id)
else:
responsedata = dict(status=status.HTTP_400_BAD_REQUEST, success=False)
return HttpResponse(json.dumps(responsedata), content_type="application/json")
serialized_jobs = serializers.JobResponseSerializer(jobs, many=True)
data = serialized_jobs.data
for datum in data:
datum['creation_date'] = str(datum['creation_date'])
datum['completion_date'] = str(datum['completion_date'])
responsedata = dict(detail=data, success=True)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json")
def post(self, request, format=None):
"""
Allows a user to create a job
---
request_serializer: serializers.JobSerializer
response_serializer: serializers.JobAPIResponseSerializer
"""
user = request.user
## We only allow a customer or THM staffs to create job requests
if user.user_type == 1:
responsedata = dict(status=status.HTTP_400_BAD_REQUEST, success=False)
return HttpResponse(json.dumps(responsedata), content_type="application/json")
data = request.DATA.copy()
serialized_job = serializers.JobSerializer(data=data)
if serialized_job.is_valid():
data['customer'] = user.id
serialized_job = serializers.NewJobSerializer(data=data)
if serialized_job.is_valid():
job = serialized_job.save()
if job.jobtype == 1:
vas = Sparrow()
msg = "Request for a plumber received and is queued for processing, a plumber would be put in touch with you soon!"
msgstatus = vas.sendDirectMessage(msg, user.phone)
adminmsg = "Request for a plumber received from {0}".format(user.phone.as_national)
adminmsgstatus = vas.sendDirectMessage(adminmsg, intlphone.from_string('+9779802036633'))
logger.warn(msgstatus)
logger.warn(adminmsgstatus)
if job.jobtype == 2:
vas = Sparrow()
msg = "Request for an electrician received and is queued for processing, an electrician would be put in touch with you soon!"
msgstatus = vas.sendDirectMessage(msg, user.phone)
adminmsg = "Request for an electrician received from {0}".format(user.phone.as_national)
adminmsgstatus = vas.sendDirectMessage(adminmsg, intlphone.from_string('+9779802036633'))
logger.warn(msgstatus)
logger.warn(adminmsgstatus)
logging.warn("job {0} is created".format(job.id))
responsedata = dict (status=status.HTTP_201_CREATED, success=True)
return HttpResponse(json.dumps(responsedata), content_type="application/json")
responsedata=dict(data=serialized_job.errors, status=status.HTTP_400_BAD_REQUEST, success=False)
return HttpResponse(json.dumps(responsedata),content_type="application/json")
class VerifyPhone(APIView):
"""
Phone verification resource
"""
def get(self, request, format=None):
"""
Sends the user a text message containig the verification code
"""
user = request.user
client_internal_ip = get_real_ip(request)
client_public_ip = get_ip(request)
if user.phone_status is False:
um = user_handler.UserManager()
UserToken.objects.create(user=user)
### Update User Events
eventhandler = user_handler.UserEventManager()
extrainfo = dict(
client_public_ip=client_public_ip,
client_internal_ip=client_internal_ip)
eventhandler.setevent(request.user, 3, extrainfo)
### Send user a SMS stating that his phone has been verified
um.sendVerfTextApp(user.id)
logger.debug("Verification code sent to the {0}".format(user.phone))
responsedata = dict(success=True)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json")
responsedata = dict(detail="Phone already verified", success=False)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json",
status=status.HTTP_400_BAD_REQUEST,)
def post(self, request):
"""
Allows a user to verify his phone
---
request_serializer: serializers.PhoneVerifySerializer
response_serializer: serializers.JobAPIResponseSerializer
"""
serializer = serializers.PhoneVerifySerializer(
data=request.POST, context={'request': request})
if serializer.is_valid():
user = request.user
user.phone_status = True
user.save()
responsedata = dict(success=True)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json")
responsedata = dict(detail=serializer.errors, success=False)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json",
status=status.HTTP_400_BAD_REQUEST,)
class CheckPhoneStatus(APIView):
"""
Phone status resource
"""
def get(self, request, format=None):
"""
Returns the status of the phone verification
"""
user = request.user
responsedata = dict(success=user.phone_status)
return HttpResponse(
json.dumps(responsedata),
content_type="application/json")
|
"""
合同视图模块
"""
# pylint: disable=invalid-name, too-few-public-methods
from datetime import datetime
from flask import render_template, redirect, url_for, flash, make_response, request
from flask_login import login_required, current_user
from flask_moment import Moment
import pytz
from sqlalchemy import and_, or_, not_
from .. import mydb
from .forms import LabelDict, PayapplyForm, Paydetail, AddPermissionForm, \
Permissiondetail, ContractApplyForm, ContractCVForm, ContractLawForm, ContractAccForm, \
ContractDPTForm, ContractViewForm, ViewPDF
from . import work
from ..models import Payments, Approvers, User, Permissions, Departments, \
Operations, Contracts, Crossvalids, Lawyers
import pdfkit
#from io import BytesIO
@work.route('/contractapply', methods=['GET', 'POST'])
@login_required
def contractapply():
"""contractapply apply"""
#表单实例
contractapply_app = ContractApplyForm()
#调整时区
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
"""
各种判断
1.(超部门长限额 and 申请人不是部门长) or (未超部门长限额 and 申请人是部门长)== 不能申请
2.是否需要交叉复核?根据事项区分,赋值状态字段不同位值【先设置采购大件直接跳过cv】
3.超部门长限额 and 申请人是部门长 == 一级复核设为董事长 【这项判断交给列表处理】
(暂缓)4.是否有二级复核的抽样(3=False)
"""
if contractapply_app.validate_on_submit():
operation = Operations.query.filter_by(idoperations=contractapply_app.contracttype_apply_input.data).first()# pylint: disable=C0301
opcode = operation.opapprvcode
dptoplimit = Permissions.query.order_by(eval("Permissions." + opcode + ".desc()")).filter( \
and_(Permissions.companyid == contractapply_app.company_apply_input.data, \
Permissions.positionid == contractapply_app.applydpt_apply_input.data, \
Permissions.termstart < datetime.now(tz), Permissions.termend > datetime.now(tz), \
Permissions.approved == True, Permissions.valid == True)).first()
incharge = Permissions.query.filter(and_( \
Permissions.companyid == contractapply_app.company_apply_input.data, \
Permissions.positionid == contractapply_app.applydpt_apply_input.data, \
Permissions.termstart < datetime.now(tz), \
Permissions.termend > datetime.now(tz), Permissions.approved == True, \
Permissions.valid == True)).all()
inchargelist = []
for user in incharge:
inchargelist.append(user.puid)
if dptoplimit is not None:
#如果业务金额超过本部门部门长权限,申请人又不是部门长的话就无法提交。
if eval("dptoplimit."+opcode) < float(contractapply_app.amount_apply_input.data) and \
current_user.uid not in inchargelist:
flash('您无法申请超限额合同,请让本部门负责人提交申请。')
#如果业务金额未超过本部门部门长权限,申请人又是部门长的话就无法提交。
elif eval("dptoplimit."+opcode) >= float(contractapply_app.amount_apply_input.data) and\
current_user.uid in inchargelist:
flash('您作为本部门负责人,不可自行提交申请。')
else:
newcontract = Contracts(companyid=contractapply_app.company_apply_input.data,
applieruid=current_user.uid,
applydpt=contractapply_app.applydpt_apply_input.data,
applytime=datetime.now(tz),
opcode=operation.opcode,
content=contractapply_app.content_apply_input.data,
amount=contractapply_app.amount_apply_input.data)
#提交时根据是否为原辅料或者固定资产采购合同决定是否需要交叉复核
if int(contractapply_app.contracttype_apply_input.data) in [5, 8]:
newcontract = Contracts(companyid=contractapply_app.company_apply_input.data,
applieruid=current_user.uid,
applydpt=contractapply_app.applydpt_apply_input.data,
applytime=datetime.now(tz),
opcode=operation.opcode,
content=contractapply_app.content_apply_input.data,
amount=contractapply_app.amount_apply_input.data,
procedure=0b1)
else:
newcontract = Contracts(companyid=contractapply_app.company_apply_input.data,
applieruid=current_user.uid,
applydpt=contractapply_app.applydpt_apply_input.data,
applytime=datetime.now(tz),
opcode=operation.opcode,
content=contractapply_app.content_apply_input.data,
amount=contractapply_app.amount_apply_input.data,
procedure=0b100)
mydb.session.add(newcontract)# pylint: disable=no-member
mydb.session.commit()# pylint: disable=no-member
#flash('提交成功。')
return redirect(url_for('work.allcontractlist'))
else:
flash('本部门尚未设置部门负责人,暂不可提交申请。')
return render_template('work/contractapply.html', contractapply_display=contractapply_app)
@work.route('/allcontractlist')
@login_required
def allcontractlist():
"""全部合同列表"""
#可视化条件
cdlist = []
incharges = Permissions.query.filter(Permissions.puid == current_user.uid).all()
for incharge in incharges:
if incharge.positionid == 2:
for i in range(4, 13):
cdlist.append([incharge.companyid, i])
cdlist.append([incharge.companyid, incharge.positionid])
lawyers = Lawyers.query.filter(Lawyers.consultantuid == current_user.uid).all()
for lawyer in lawyers:
for i in range(4, 13):
cdlist.append([lawyer.companyid, i])
crossvalids = Crossvalids.query.filter(Crossvalids.crossuid == current_user.uid).all()
for crossvalid in crossvalids:
cdlist.append([crossvalid.companyid, crossvalid.crossdpt])
stampers = Lawyers.query.filter(Lawyers.consultantuid == current_user.uid).all()
for stamper in stampers:
for i in range(4, 13):
cdlist.append([stamper.companyid, i])
#集成条件变量
cdconditions = (and_(Contracts.companyid == cd[0], Contracts.applydpt == cd[1]) for cd in cdlist) # pylint: disable=C0301
#allcontract = Contracts.query.order_by(Contracts.applytime.desc()).all()
page = request.args.get('page', 1, type=int)
contract_pagination = Contracts.query.filter(or_(*cdconditions, Contracts.applieruid == current_user.uid)).order_by(Contracts.applytime.desc()).paginate(page, per_page=20, error_out=False) # pylint: disable=C0301
contracts = contract_pagination.items
label_dict = LabelDict()
return render_template('work/allcontractlist.html',
#allcontract=allcontract,
label_dict=label_dict,
contract_pagination=contract_pagination, contracts_disp=contracts)
@work.route('/contractview/<contractid>', methods=['GET', 'POST'])
@login_required
def contractview(contractid):
"""合同审批详情页面"""
contractviewform_app = ContractViewForm()
label_dict = LabelDict()
contract = Contracts.query.filter_by(idcontracts=contractid).first()
operation = Operations.query.filter_by(opcode=contract.opcode).first()# pylint: disable=C0301
if contract is not None:
#可视化条件
cdlist = []
incharges = Permissions.query.filter(Permissions.puid == current_user.uid).all()
for incharge in incharges:
if incharge.positionid == 2:
for i in range(4, 13):
cdlist.append([incharge.companyid, i])
cdlist.append([incharge.companyid, incharge.positionid])
lawyers = Lawyers.query.filter(Lawyers.consultantuid == current_user.uid).all()
for lawyer in lawyers:
for i in range(4, 13):
cdlist.append([lawyer.companyid, i])
crossvalids = Crossvalids.query.filter(Crossvalids.crossuid == current_user.uid).all()
for crossvalid in crossvalids:
cdlist.append([crossvalid.companyid, crossvalid.crossdpt])
stampers = Lawyers.query.filter(Lawyers.consultantuid == current_user.uid).all()
for stamper in stampers:
for i in range(4, 13):
cdlist.append([stamper.companyid, i])
#无权限回弹列表页面
if not([contract.companyid, contract.applydpt] in cdlist or contract.applieruid == current_user.uid):
flash("您没有查看本申请的权限。")
return redirect(url_for('work.allcontractlist'))
#填充数据
contractviewform_app.company_view_input.data = label_dict.all_company_dict[contract.companyid] # pylint: disable=C0301
contractviewform_app.applydpt_view_input.data = label_dict.all_dpt_dict[contract.applydpt]
contractviewform_app.contracttype_view_input.data = operation.opname
contractviewform_app.content_view_input.data = contract.content
contractviewform_app.amount_view_input.data = contract.amount
contractviewform_app.applier_view_input.data = label_dict.all_users_dict[contract.applieruid] # pylint: disable=C0301
contractviewform_app.applytime_view_input.data = contract.applytime
if contract.crossuid is None:
contractviewform_app.cvuser_view_input.data = ""
else:
contractviewform_app.cvuser_view_input.data = label_dict.all_users_dict[contract.crossuid] # pylint: disable=C0301
if contract.procedure|0b1111111110 == 0b1111111111:
contractviewform_app.cvcontent_view_input.data = "[系统提示]具有前置流程的申请无需交叉复核。前置流程完整性暂须财务复核确认。" # pylint: disable=C0301
elif contract.procedure|0b1111111100 == 0b1111111111:
contractviewform_app.cvcontent_view_input.data = "[系统提示]具有前置流程的申请无需交叉复核。前置流程完整性已经财务复核确认。" # pylint: disable=C0301
elif contract.crosscontent is None:
contractviewform_app.cvcontent_view_input.data = ""
else:
contractviewform_app.cvcontent_view_input.data = contract.crosscontent
if contract.crossopinion is None:
contractviewform_app.cvopinion_view_input.data = ""
else:
contractviewform_app.cvopinion_view_input.data = {True:"有异议",False:"无异议"}[contract.crossopinion] # pylint: disable=C0301
if contract.crosstime is None:
contractviewform_app.cvtime_view_input.data = ""
else:
contractviewform_app.cvtime_view_input.data = contract.crosstime
#lawyer
if contract.lawyeruid is None:
contractviewform_app.lawyer_view_input.data = ""
else:
contractviewform_app.lawyer_view_input.data = label_dict.all_users_dict[contract.lawyeruid]
if contract.lawyercontent is None:
contractviewform_app.lawcontent_view_input.data = ""
else:
contractviewform_app.lawcontent_view_input.data = contract.lawyercontent
if contract.lawyeropinion is None:
contractviewform_app.lawopinion_view_input.data = ""
else:
contractviewform_app.lawopinion_view_input.data = {True:"有异议",False:"无异议"}[contract.lawyeropinion]
if contract.lawyertime is None:
contractviewform_app.lawyertime_view_input.data = ""
else:
contractviewform_app.lawyertime_view_input.data = contract.lawyertime
#acc
if contract.accuid is None:
contractviewform_app.acc_view_input.data = ""
else:
contractviewform_app.acc_view_input.data = label_dict.all_users_dict[contract.accuid]
if contract.acccontent is None:
contractviewform_app.acccontent_view_input.data = ""
else:
contractviewform_app.acccontent_view_input.data = contract.acccontent
if contract.accopinion is None:
contractviewform_app.accopinion_view_input.data = ""
else:
contractviewform_app.accopinion_view_input.data = {True:"有异议",False:"无异议"}[contract.accopinion]
if contract.acctime is None:
contractviewform_app.acctime_view_input.data = ""
else:
contractviewform_app.acctime_view_input.data = contract.acctime
#incharge
if contract.authid is None:
contractviewform_app.auth_view_input.data = ""
else:
contractviewform_app.auth_view_input.data = "[授权书] " + str(contract.authid) + " 号"
if contract.approveruid is None:
contractviewform_app.approver_view_input.data = ""
else:
contractviewform_app.approver_view_input.data = label_dict.all_users_dict[contract.approveruid]
if contract.apprvopinion is None:
contractviewform_app.apprvopinion_view_input.data = ""
else:
contractviewform_app.apprvopinion_view_input.data = {True:"同意",False:"不同意"}[contract.apprvopinion]
contractviewform_app.apprvtime_view_input.data = contract.apprvtime
else:
return render_template('404.html'), 404
viewpdf = ViewPDF()
if viewpdf.is_submitted():
htmlfile = render_template('work/contractpdf.html', pdfdata=contractviewform_app, label_dict=label_dict, contractid=contractid)
pdffile = pdfkit.from_string(htmlfile,False)
response = make_response(pdffile)
response.headers['Content-Type'] = 'aplication/pdf'
response.headers['Content-Disposition'] = 'inline; filename=contract_'+contractid+'.pdf'
#只有给出文件的路径,浏览器才能直接打开,否则还是会转到下载。所以要想办法把生成的pdf缓存在某处,等关闭后自动删除。
return response
return render_template('work/contractview.html', contractviewform_disp=contractviewform_app,
viewpdf=viewpdf, label_dict=label_dict)
@work.route('/contractoncv/<contractid>', methods=['GET', 'POST'])
@login_required
def contractoncv(contractid):
"""交叉复核页面"""
#调整时区
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
label_dict = LabelDict()
contract = Contracts.query.filter_by(idcontracts=contractid).first()
if contract is not None:
operation = Operations.query.filter_by(opcode=contract.opcode).first()# pylint: disable=C0301
#如果此合同的状态值第一位是1的话,无需交叉复核。
if contract.procedure|0b1111111110 == 0b1111111111:
flash("本合同无需交叉复核。")
return redirect(url_for('work.addrules'))
#如果第四位是1的话,表示已经交叉复核完毕。
elif contract.procedure|0b1111110111 == 0b1111111111:
flash("本合同已经过交叉复核。")
return redirect(url_for('work.addrules'))
#判断当前用户是否无部门长、董事长权限,且有权限进行交叉复核,且不等于applier
#对应部门长及董事长
inchargetest = Permissions.query.filter(or_(\
and_(Permissions.puid == current_user.uid, \
Permissions.companyid == contract.companyid, \
Permissions.positionid == 2, Permissions.termstart <= datetime.now(tz), \
Permissions.termend >= datetime.now(tz), Permissions.valid == True),\
and_(Permissions.puid == current_user.uid, \
Permissions.companyid == contract.companyid, \
Permissions.positionid == contract.applydpt, Permissions.termstart <= datetime.now(tz),\
Permissions.termend >= datetime.now(tz), Permissions.valid == True))).first()
#对应部门交叉复核人
crosstest = Crossvalids.query.filter(and_(Crossvalids.companyid == contract.companyid,\
Crossvalids.crossdpt == contract.applydpt, \
Crossvalids.crossuid == current_user.uid)).first()
if crosstest is None:
flash("您无此部门业务交叉复核权限。")
return redirect(url_for('work.addrules'))
elif (inchargetest is not None) or (contract.applieruid == current_user.uid):
flash("您有与交叉复核不相容的权限或角色。")
return redirect(url_for('work.addrules'))
#填充表单数据
contractcvform_app = ContractCVForm()
contractcvform_app.company_cv_input.data = label_dict.all_company_dict[contract.companyid]
contractcvform_app.applydpt_cv_input.data = label_dict.all_dpt_dict[contract.applydpt]
contractcvform_app.contracttype_cv_input.data = operation.opname
contractcvform_app.content_cv_input.data = contract.content
contractcvform_app.amount_cv_input.data = contract.amount
contractcvform_app.applier_cv_input.data = label_dict.all_users_dict[contract.applieruid]
contractcvform_app.applytime_cv_input.data = contract.applytime
if contract.lawyeruid is None:
contractcvform_app.lawyer_cv_input.data = ""
else:
contractcvform_app.lawyer_cv_input.data = label_dict.all_users_dict[contract.lawyeruid]
contractcvform_app.lawcontent_cv_input.data = contract.lawyercontent
if contract.lawyeropinion is None:
contractcvform_app.lawopinion_cv_input.data = ""
else:
contractcvform_app.lawopinion_cv_input.data = {True:"有异议",False:"无异议"}[contract.lawyeropinion] # pylint: disable=C0301
contractcvform_app.lawyertime_cv_input.data = contract.lawyertime
if contract.accuid is None:
contractcvform_app.acc_cv_input.data = ""
else:
contractcvform_app.acc_cv_input.data = label_dict.all_users_dict[contract.accuid]
contractcvform_app.acccontent_cv_input.data = contract.acccontent
if contract.accopinion is None:
contractcvform_app.accopinion_cv_input.data = ""
else:
contractcvform_app.accopinion_cv_input.data = {True:"有异议",False:"无异议"}[contract.accopinion]
contractcvform_app.acctime_cv_input.data = contract.acctime
else:
return render_template('404.html'), 404
if contractcvform_app.validate_on_submit():
if contract.procedure|0b1111110111 == 0b1111111111:
flash("本合同已经过交叉复核,请不要重复提交。")
return redirect(url_for('work.addrules'))
if contractcvform_app.cvopinion_cv_input.data == "0":
contract.crossopinion = False
elif contractcvform_app.cvopinion_cv_input.data == "1":
contract.crossopinion = True
contract.procedure = contract.procedure|0b1000
contract.crossuid = current_user.uid
contract.crosscontent = contractcvform_app.cvcontent_cv_input.data
contract.crosstime = datetime.now(tz)
mydb.session.add(contract)# pylint: disable=no-member
mydb.session.commit()# pylint: disable=no-member
return redirect(url_for('work.contractreview'))
return render_template('work/contractoncv.html', contractcvform_disp=contractcvform_app,
contract=contract, label_dict=label_dict)
@work.route('/contractonlaw/<contractid>', methods=['GET', 'POST'])
@login_required
def contractonlaw(contractid):
"""法务复核页面"""
#调整时区
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
label_dict = LabelDict()
contract = Contracts.query.filter_by(idcontracts=contractid).first()
if contract is not None:
operation = Operations.query.filter_by(opcode=contract.opcode).first()# pylint: disable=C0301
#如果第五位是1的话,表示已经法务复核完毕。
if contract.procedure|0b1111101111 == 0b1111111111:
flash("本合同已经过法务复核。")
return redirect(url_for('work.addrules'))
#判断当前用户是否无部门长、董事长权限,且有权限进行法务复核,且不等于applier
#对应部门长及董事长
inchargetest = Permissions.query.filter(or_(\
and_(Permissions.puid == current_user.uid, \
Permissions.companyid == contract.companyid, \
Permissions.positionid == 2, Permissions.termstart <= datetime.now(tz), \
Permissions.termend >= datetime.now(tz), Permissions.valid == True),\
and_(Permissions.puid == current_user.uid, \
Permissions.companyid == contract.companyid, \
Permissions.positionid == contract.applydpt, Permissions.termstart <= datetime.now(tz),\
Permissions.termend >= datetime.now(tz), Permissions.valid == True))).first()
#法务复核人
lawtest = Lawyers.query.filter(and_(Lawyers.companyid == contract.companyid,\
Lawyers.consultant == 1, \
Lawyers.consultantuid == current_user.uid)).first()
if lawtest is None:
flash("您无法务复核权限。")
return redirect(url_for('work.addrules'))
elif (inchargetest is not None) or (contract.applieruid == current_user.uid):
flash("您有与法务复核不相容的权限或角色。")
return redirect(url_for('work.addrules'))
#填充表单数据
contractlawform_app = ContractLawForm()
contractlawform_app.company_law_input.data = label_dict.all_company_dict[contract.companyid]
contractlawform_app.applydpt_law_input.data = label_dict.all_dpt_dict[contract.applydpt]
contractlawform_app.contracttype_law_input.data = operation.opname
contractlawform_app.content_law_input.data = contract.content
contractlawform_app.amount_law_input.data = contract.amount
contractlawform_app.applier_law_input.data = label_dict.all_users_dict[contract.applieruid]
contractlawform_app.applytime_law_input.data = contract.applytime
if contract.crossuid is None:
contractlawform_app.cvuser_law_input.data = ""
else:
contractlawform_app.cvuser_law_input.data = label_dict.all_users_dict[contract.crossuid]
if contract.procedure|0b1111111110 == 0b1111111111:
contractlawform_app.cvcontent_law_input.data = "[系统提示]具有前置流程的申请无需交叉复核。前置流程完整性暂须财务复核确认。"
elif contract.procedure|0b1111111100 == 0b1111111111:
contractlawform_app.cvcontent_law_input.data = "[系统提示]具有前置流程的申请无需交叉复核。前置流程完整性已经财务复核确认。"
else:
contractlawform_app.cvcontent_law_input.data = contract.crosscontent
if contract.crossopinion is None:
contractlawform_app.cvopinion_law_input.data = ""
else:
contractlawform_app.cvopinion_law_input.data = {True:"有异议",False:"无异议"}[contract.crossopinion]
contractlawform_app.cvtime_law_input.data = contract.crosstime
if contract.accuid is None:
contractlawform_app.acc_law_input.data = ""
else:
contractlawform_app.acc_law_input.data = label_dict.all_users_dict[contract.accuid]
contractlawform_app.acccontent_law_input.data = contract.acccontent
if contract.accopinion is None:
contractlawform_app.accopinion_law_input.data = ""
else:
contractlawform_app.accopinion_law_input.data = {True:"有异议",False:"无异议"}[contract.accopinion]
contractlawform_app.acctime_law_input.data = contract.acctime
else:
return render_template('404.html'), 404
if contractlawform_app.validate_on_submit():
if contract.procedure|0b1111101111 == 0b1111111111:
flash("本合同已经过法务复核,请不要重复提交。")
return redirect(url_for('work.addrules'))
if contractlawform_app.lawopinion_law_input.data == "0":
contract.lawyeropinion = False
elif contractlawform_app.lawopinion_law_input.data == "1":
contract.lawyeropinion = True
contract.procedure = contract.procedure|0b10000
contract.lawyeruid = current_user.uid
contract.lawyercontent = contractlawform_app.lawcontent_law_input.data
contract.lawyertime = datetime.now(tz)
mydb.session.add(contract)# pylint: disable=no-member
mydb.session.commit()# pylint: disable=no-member
return redirect(url_for('work.contractreview'))
return render_template('work/contractonlaw.html', contractlawform_disp=contractlawform_app,
contract=contract, label_dict=label_dict)
@work.route('/contractonacc/<contractid>', methods=['GET', 'POST'])
@login_required
def contractonacc(contractid):
"""财务复核页面"""
#调整时区
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
label_dict = LabelDict()
contract = Contracts.query.filter_by(idcontracts=contractid).first()
if contract is not None:
operation = Operations.query.filter_by(opcode=contract.opcode).first()# pylint: disable=C0301
#如果第六位是1的话,表示已经财务复核完毕。
if contract.procedure|0b1111011111 == 0b1111111111:
flash("本合同已经过财务复核。")
return redirect(url_for('work.addrules'))
#判断当前用户是否无部门长、董事长权限,且有权限进行财务复核,且不等于applier
#对应部门长及董事长
inchargetest = Permissions.query.filter(or_(\
and_(Permissions.puid == current_user.uid, \
Permissions.companyid == contract.companyid, \
Permissions.positionid == 2, Permissions.termstart <= datetime.now(tz), \
Permissions.termend >= datetime.now(tz), Permissions.valid == True),\
and_(Permissions.puid == current_user.uid, \
Permissions.companyid == contract.companyid, \
Permissions.positionid == contract.applydpt, Permissions.termstart <= datetime.now(tz),\
Permissions.termend >= datetime.now(tz), Permissions.valid == True))).first()
#法务复核人
acctest = Lawyers.query.filter(and_(Lawyers.companyid == contract.companyid,\
Lawyers.consultant == 2, \
Lawyers.consultantuid == current_user.uid)).first()
if acctest is None:
flash("您无财务复核权限。")
return redirect(url_for('work.addrules'))
elif (inchargetest is not None) or (contract.applieruid == current_user.uid):
flash("您有与财务复核不相容的权限或角色。")
return redirect(url_for('work.addrules'))
#填充表单数据
contractaccform_app = ContractAccForm()
contractaccform_app.company_acc_input.data = label_dict.all_company_dict[contract.companyid]
contractaccform_app.applydpt_acc_input.data = label_dict.all_dpt_dict[contract.applydpt]
contractaccform_app.contracttype_acc_input.data = operation.opname
contractaccform_app.content_acc_input.data = contract.content
contractaccform_app.amount_acc_input.data = contract.amount
contractaccform_app.applier_acc_input.data = label_dict.all_users_dict[contract.applieruid]
contractaccform_app.applytime_acc_input.data = contract.applytime
if contract.crossuid is None:
contractaccform_app.cvuser_acc_input.data = ""
else:
contractaccform_app.cvuser_acc_input.data = label_dict.all_users_dict[contract.crossuid]
if contract.procedure|0b1111111110 == 0b1111111111:
contractaccform_app.cvcontent_acc_input.data = "[系统提示]具有前置流程的申请无需交叉复核。前置流程完整性暂须财务复核确认。"
elif contract.procedure|0b1111111100 == 0b1111111111:
contractaccform_app.cvcontent_acc_input.data = "[系统提示]具有前置流程的申请无需交叉复核。前置流程完整性已经财务复核确认。"
else:
contractaccform_app.cvcontent_acc_input.data = contract.crosscontent
if contract.crossopinion is None:
contractaccform_app.cvopinion_acc_input.data = ""
else:
contractaccform_app.cvopinion_acc_input.data = {True:"有异议",False:"无异议"}[contract.crossopinion]
contractaccform_app.cvtime_acc_input.data = contract.crosstime
#lawyer
if contract.lawyeruid is None:
contractaccform_app.lawyer_acc_input.data = ""
else:
contractaccform_app.lawyer_acc_input.data = label_dict.all_users_dict[contract.lawyeruid]
contractaccform_app.lawcontent_acc_input.data = contract.lawyercontent
if contract.lawyeropinion is None:
contractaccform_app.lawopinion_acc_input.data = ""
else:
contractaccform_app.lawopinion_acc_input.data = {True:"有异议",False:"无异议"}[contract.lawyeropinion]
contractaccform_app.lawyertime_acc_input.data = contract.lawyertime
else:
return render_template('404.html'), 404
if contractaccform_app.validate_on_submit():
if contract.procedure|0b1111011111 == 0b1111111111:
flash("本合同已经过财务复核,请不要重复提交。")
return redirect(url_for('work.addrules'))
if contractaccform_app.accopinion_acc_input.data == "0":
contract.accopinion = False
elif contractaccform_app.accopinion_acc_input.data == "1":
contract.accopinion = True
contract.procedure = contract.procedure|0b100010 #目前暂时把前置流程也交给财务复核通过,第二位变1
contract.accuid = current_user.uid
contract.acccontent = contractaccform_app.acccontent_acc_input.data
contract.acctime = datetime.now(tz)
mydb.session.add(contract)# pylint: disable=no-member
mydb.session.commit()# pylint: disable=no-member
return redirect(url_for('work.contractreview'))
return render_template('work/contractonacc.html', contractaccform_disp=contractaccform_app,
contract=contract, label_dict=label_dict)
@work.route('/contractondpt/<contractid>', methods=['GET', 'POST'])
@login_required
def contractondpt(contractid):
"""部门长一级复核页面"""
#调整时区
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
label_dict = LabelDict()
contract = Contracts.query.filter_by(idcontracts=contractid).first()
operation = Operations.query.filter_by(opcode=contract.opcode).first()# pylint: disable=C0301
contractdptform_app = ContractDPTForm()
#判断是否具有本部门部门长授权范围内的权限,或者董事长权限
ispresident = Permissions.query.filter(and_(Permissions.companyid==contract.companyid,
Permissions.termstart <= datetime.now(tz),
Permissions.termend >= datetime.now(tz),
Permissions.valid == True,
Permissions.puid==current_user.uid)).first()
isincharge = Permissions.query.filter(and_(Permissions.companyid==contract.companyid,
Permissions.positionid==contract.applydpt,
Permissions.termstart <= datetime.now(tz),
Permissions.termend >= datetime.now(tz),
Permissions.valid == True,
Permissions.puid == current_user.uid,
eval("Permissions."+operation.opapprvcode+">=contract.amount"))).first()
dptpermission = Permissions.query.filter(and_(Permissions.companyid==contract.companyid,
Permissions.positionid==contract.applydpt,
Permissions.termstart <= datetime.now(tz),
Permissions.termend >= datetime.now(tz),
Permissions.valid == True,
eval("Permissions."+operation.opapprvcode+">=contract.amount"))).first()
if contract is not None:
#如果第七位是1的话,表示部门长已经审批过。
if contract.procedure|0b1110111111 == 0b1111111111:
flash("本合同已经部门负责人审批。")
return redirect(url_for('work.addrules'))
#如果三复核都通过了才能进行批复,否则跳出。
if not (contract.procedure&0b0000110011 == 0b0000110011 or
contract.procedure&0b0000111100 == 0b0000111100):
flash("本合同尚未流转到批准阶段。")
return redirect(url_for('work.addrules'))
#还要判断部门长是否处于不相容职能中
if current_user.uid in [contract.applieruid, contract.crossuid, contract.lawyeruid, contract.accuid]:
flash("您有与批准订立合同不相容的权限或角色。")
return redirect(url_for('work.addrules'))
if dptpermission is not None:#有可操作的部门长前提下
if ispresident is not None and isincharge is None:#本用户只有董事长权限
flash("本合同应由部门长批准。")
return redirect(url_for('work.addrules'))
elif ispresident is None and isincharge is None:#本用户无董事长权限也无部门长权限
flash("您无批准本合同的相应权限。")
return redirect(url_for('work.addrules'))
elif ispresident is None:#本用户无董事长权限
flash("本合同应由董事长批准。")
return redirect(url_for('work.addrules'))
#准备填充数据
contractdptform_app.company_dpt_input.data = label_dict.all_company_dict[contract.companyid]
contractdptform_app.applydpt_dpt_input.data = label_dict.all_dpt_dict[contract.applydpt]
contractdptform_app.contracttype_dpt_input.data = operation.opname
contractdptform_app.content_dpt_input.data = contract.content
contractdptform_app.amount_dpt_input.data = contract.amount
contractdptform_app.applier_dpt_input.data = label_dict.all_users_dict[contract.applieruid]
contractdptform_app.applytime_dpt_input.data = contract.applytime
if contract.crossuid is None:
contractdptform_app.cvuser_dpt_input.data = ""
else:
contractdptform_app.cvuser_dpt_input.data = label_dict.all_users_dict[contract.crossuid]
if contract.procedure|0b1111111110 == 0b1111111111:
contractdptform_app.cvcontent_dpt_input.data = "[系统提示]具有前置流程的申请无需交叉复核。前置流程完整性暂须财务复核确认。"
elif contract.procedure|0b1111111100 == 0b1111111111:
contractdptform_app.cvcontent_dpt_input.data = "[系统提示]具有前置流程的申请无需交叉复核。前置流程完整性已经财务复核确认。"
else:
contractdptform_app.cvcontent_dpt_input.data = contract.crosscontent
if contract.crossopinion is None:
contractdptform_app.cvopinion_dpt_input.data = ""
else:
contractdptform_app.cvopinion_dpt_input.data = {True:"有异议",False:"无异议"}[contract.crossopinion]
contractdptform_app.cvtime_dpt_input.data = contract.crosstime
#lawyer
if contract.lawyeruid is None:
contractdptform_app.lawyer_dpt_input.data = ""
else:
contractdptform_app.lawyer_dpt_input.data = label_dict.all_users_dict[contract.lawyeruid]
contractdptform_app.lawcontent_dpt_input.data = contract.lawyercontent
if contract.lawyeropinion is None:
contractdptform_app.lawopinion_dpt_input.data = ""
else:
contractdptform_app.lawopinion_dpt_input.data = {True:"有异议",False:"无异议"}[contract.lawyeropinion]
contractdptform_app.lawyertime_dpt_input.data = contract.lawyertime
#acc
if contract.accuid is None:
contractdptform_app.acc_dpt_input.data = ""
else:
contractdptform_app.acc_dpt_input.data = label_dict.all_users_dict[contract.accuid]
contractdptform_app.acccontent_dpt_input.data = contract.acccontent
if contract.accopinion is None:
contractdptform_app.accopinion_dpt_input.data = ""
else:
contractdptform_app.accopinion_dpt_input.data = {True:"有异议",False:"无异议"}[contract.accopinion]
contractdptform_app.acctime_dpt_input.data = contract.acctime
else:
return render_template('404.html'), 404
if contractdptform_app.validate_on_submit():
if contract.procedure|0b1110111111 == 0b1111111111:
flash("本合同已经部门负责人审批。")
return redirect(url_for('work.addrules'))
#如果三复核都通过了才能进行批复,否则跳出。
if not (contract.procedure&0b0000110011 == 0b0000110011 or
contract.procedure&0b0000111100 == 0b0000111100):
flash("本合同尚未流转到批准阶段。")
return redirect(url_for('work.addrules'))
#还要判断部门长是否处于不相容职能中
if current_user.uid in [contract.applieruid, contract.crossuid, contract.lawyeruid, contract.accuid]:
flash("您有与批准订立合同不相容的权限或角色。")
return redirect(url_for('work.addrules'))
if dptpermission is not None:#有可操作的部门长前提下
if ispresident is not None and isincharge is None:#本用户只有董事长权限
flash("本合同应由部门长批准。")
return redirect(url_for('work.addrules'))
elif ispresident is None and isincharge is None:#本用户无董事长权限也无部门长权限
flash("您无批准本合同的相应权限。")
return redirect(url_for('work.addrules'))
elif ispresident is None:#本用户无董事长权限
flash("本合同应由董事长批准。")
return redirect(url_for('work.addrules'))
contract.procedure = contract.procedure|0b1000000 #第七位变1
#这里要预留一个激发二级审批的开关
if dptpermission is None and ispresident is not None:
contract.authid = ispresident.idpermission
elif isincharge is not None:
contract.authid = isincharge.idpermission
contract.approveruid = current_user.uid
if contractdptform_app.apprvopinion_dpt_input.data == "0":
contract.apprvopinion = False
elif contractdptform_app.apprvopinion_dpt_input.data == "1":
contract.apprvopinion = True
contract.apprvtime = datetime.now(tz)
mydb.session.add(contract)# pylint: disable=no-member
mydb.session.commit()# pylint: disable=no-member
return redirect(url_for('work.contractreview'))
return render_template('work/contractondpt.html', contractdptform_disp=contractdptform_app,
contract=contract, label_dict=label_dict)
@work.route('/contractreview')
@login_required
def contractreview():
"""待审批合同列表"""
#调整时区
pytz.country_timezones('cn')
tz = pytz.timezone('Asia/Shanghai')
dptreview = []#None
cvreview = []#None
lawreview = []#None
accreview = []#None
# 鉴定同公司董事长权限
president = Permissions.query.filter(Permissions.positionid==2, Permissions.puid==current_user.uid, \
Permissions.termstart <= datetime.now(tz), Permissions.termend >= datetime.now(tz), \
Permissions.valid == True).all()
president_list = [pd.companyid for pd in president]
# 鉴定同公司同部门长权限
inchargedpt = Permissions.query.filter(Permissions.puid==current_user.uid, \
Permissions.termstart <= datetime.now(tz), Permissions.termend >= datetime.now(tz), \
Permissions.valid == True).all()
incharge_list = [[incharge.companyid, incharge.positionid] for incharge in inchargedpt]
# 鉴定cv部门权限
crossvalid = Crossvalids.query.filter(Crossvalids.crossuid==current_user.uid).all()
cv_list = [[cv.companyid, cv.crossdpt] for cv in crossvalid]
# 鉴定law权限
lawyer = Lawyers.query.filter(and_(Lawyers.consultantuid==current_user.uid, Lawyers.consultant==1)).all()
lawyer_list = [law.companyid for law in lawyer]
# 鉴定acc权限
accountant = Lawyers.query.filter(and_(Lawyers.consultantuid==current_user.uid, Lawyers.consultant==2)).all()
acc_list = [acc.companyid for acc in accountant]
allcontract = Contracts.query.order_by(Contracts.applytime.desc()).all()
#可见一级审批表
for contract in allcontract:
if ((contract.procedure&0b0000110011 == 0b0000110011 or contract.procedure&0b0000111100 == 0b0000111100) and #需要审批
contract.procedure|0b1110111111==0b1110111111 and #尚未审批
contract.applieruid!=current_user.uid and #当前用户不是申请人
contract.crossuid!=current_user.uid and #当前用户不是交叉复核人
contract.lawyeruid!=current_user.uid and #当前用户不是法务
contract.accuid!=current_user.uid and #当前用户不是财务
(([contract.companyid, contract.applydpt] in incharge_list) or contract.companyid in president_list)):
dptreview.append(contract)
#可见cv表
for contract in allcontract:
if (contract.procedure|0b1111111011==0b1111111111 and #需要cv
contract.procedure|0b1111110111==0b1111110111 and #尚未cv
contract.applieruid!=current_user.uid and #当前用户不是申请人
([contract.companyid, contract.applydpt] in cv_list) and #有cv权限
contract.companyid not in president_list and #无董事长权限
([contract.companyid, contract.applydpt] not in incharge_list)): #无部门长权限
cvreview.append(contract)
#可见law表
for contract in allcontract:
if (contract.procedure|0b1111101111==0b1111101111 and #尚未law
contract.applieruid!=current_user.uid and #当前用户不是申请人
(contract.companyid in lawyer_list) and #有lawyer权限
(contract.companyid not in president_list) and #无董事长权限
([contract.companyid, contract.applydpt] not in incharge_list)): #无部门长权限
lawreview.append(contract)
#可见acc表
for contract in allcontract:
if (contract.procedure|0b1111011111==0b1111011111 and #尚未acc
contract.applieruid!=current_user.uid and #当前用户不是申请人
(contract.companyid in acc_list) and #有acc权限
(contract.companyid not in president_list) and #无董事长权限
([contract.companyid, contract.applydpt] not in incharge_list)): #无部门长权限
accreview.append(contract)
label_dict = LabelDict()
return render_template('work/contractreview.html', dptreview=dptreview,
cvreview=cvreview, lawreview=lawreview, accreview=accreview,
label_dict=label_dict)
@work.route('/contractrules')
@login_required
def contractrules():
"""合同申请流程说明"""
return render_template('work/contractrules.html')
|
from django.core.exceptions import ObjectDoesNotExist
from django.utils.timezone import localdate
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from ..models import Menu, Option
class OptionSerializer(serializers.ModelSerializer):
class Meta:
model = Option
fields = ('id', 'name', 'description', 'price')
def create(self, validated_data):
return Option.objects.create_option(**validated_data)
def update(self, instance, validated_data):
menus_pk = validated_data.pop('menus_pk')
if Menu.objects.is_editable(pk=menus_pk):
for key, value in validated_data.items():
setattr(instance, key, value)
instance.save()
return instance
class MenuSerializer(serializers.ModelSerializer):
options = OptionSerializer(required=False, many=True)
class Meta:
model = Menu
fields = ('id', 'name', 'description', 'available_date', 'options')
def validate_available_date(self, value):
"""
Validates the available_date field to be greater or equal than today
"""
today = localdate()
if value < today:
raise ValidationError(
{'detail': f'You cannot set a menu in the past, it must start at least from {today}'})
return value
def create(self, validated_data):
# Calling the custom create_menu method
return Menu.objects.create_menu(**validated_data)
def update(self, instance, validated_data):
if instance.is_editable():
if 'options' in validated_data:
del validated_data['options']
if 'available_date' in validated_data:
Menu.objects.check_at_date(
validated_data.get('available_date'), instance.id)
for key, value in validated_data.items():
setattr(instance, key, value)
instance.save()
return instance
class OptionNestedSerializer(OptionSerializer):
class Meta(OptionSerializer.Meta):
fields = OptionSerializer.Meta.fields + ('menu',)
depth = 1 |
from view import View
view = View()
|
import time
import pandas as pd
import numpy as np
# PLEASE USE THE GIVEN FUNCTION NAME, DO NOT CHANGE IT
def read_csv(filepath):
'''
TODO : This function needs to be completed.
Read the events.csv and mortality_events.csv files.
Variables returned from this function are passed as input to the metric functions.
'''
events = pd.read_csv(filepath + 'events.csv')
mortality = pd.read_csv(filepath + 'mortality_events.csv')
return events, mortality
def event_count_metrics(events, mortality):
'''
TODO : Implement this function to return the event count metrics.
Event count is defined as the number of events recorded for a given patient.
'''
events['group'] = np.where(events['patient_id'].isin(mortality['patient_id']), 'dead', 'alive')
alive = events[events['group'] == 'alive']
dead = events[events['group'] == 'dead']
alive_counts = alive['patient_id'].value_counts()
dead_counts = dead['patient_id'].value_counts()
avg_dead_event_count = dead_counts.mean()
max_dead_event_count = dead_counts.max()
min_dead_event_count = dead_counts.min()
avg_alive_event_count = alive_counts.mean()
max_alive_event_count = alive_counts.max()
min_alive_event_count = alive_counts.min()
return min_dead_event_count, max_dead_event_count, avg_dead_event_count, min_alive_event_count, max_alive_event_count, avg_alive_event_count
def encounter_count_metrics(events, mortality):
'''
TODO : Implement this function to return the encounter count metrics.
Encounter count is defined as the count of unique dates on which a given patient visited the ICU.
'''
events['group'] = np.where(events['patient_id'].isin(mortality['patient_id']), 'dead', 'alive')
alive = events[events['group'] == 'alive']
dead = events[events['group'] == 'dead']
alive_encounter = alive[['patient_id', 'timestamp']].drop_duplicates()
dead_encounter = dead[['patient_id', 'timestamp']].drop_duplicates()
alive_encounter_count = alive_encounter['patient_id'].value_counts()
dead_encounter_count = dead_encounter['patient_id'].value_counts()
avg_dead_encounter_count = dead_encounter_count.mean()
max_dead_encounter_count = dead_encounter_count.max()
min_dead_encounter_count = dead_encounter_count.min()
avg_alive_encounter_count = alive_encounter_count.mean()
max_alive_encounter_count = alive_encounter_count.max()
min_alive_encounter_count = alive_encounter_count.min()
return min_dead_encounter_count, max_dead_encounter_count, avg_dead_encounter_count, min_alive_encounter_count, max_alive_encounter_count, avg_alive_encounter_count
def record_length_metrics(events, mortality):
'''
TODO: Implement this function to return the record length metrics.
Record length is the duration between the first event and the last event for a given patient.
'''
events['group'] = np.where(events['patient_id'].isin(mortality['patient_id']), 'dead', 'alive')
alive = events[events['group'] == 'alive']
dead = events[events['group'] == 'dead']
alive_df = alive[['patient_id', 'timestamp']].drop_duplicates()
dead_df = dead[['patient_id', 'timestamp']].drop_duplicates()
alive_df['timestamp'] = pd.to_datetime(alive_df['timestamp'])
alive_df['timestamp'] = alive_df['timestamp'].dt.date
dead_df['timestamp'] = pd.to_datetime(dead_df['timestamp'])
dead_df['timestamp'] = dead_df['timestamp'].dt.date
alive_startdate = alive_df.groupby('patient_id').min()['timestamp']
alive_enddate = alive_df.groupby('patient_id').max()['timestamp']
dead_startdate = dead_df.groupby('patient_id').min()['timestamp']
dead_enddate = dead_df.groupby('patient_id').max()['timestamp']
# dead_enddate.columns = ['patient_id', 'timestamp']
alive_daterange = alive_enddate.sub(alive_startdate).dt.days
dead_daterange = dead_enddate.sub(dead_startdate).dt.days
avg_dead_rec_len = dead_daterange.mean()
max_dead_rec_len = dead_daterange.max()
min_dead_rec_len = dead_daterange.min()
avg_alive_rec_len = alive_daterange.mean()
max_alive_rec_len = alive_daterange.max()
min_alive_rec_len = alive_daterange.min()
return min_dead_rec_len, max_dead_rec_len, avg_dead_rec_len, min_alive_rec_len, max_alive_rec_len, avg_alive_rec_len
def main():
'''
DO NOT MODIFY THIS FUNCTION.
'''
# You may change the following path variable in coding but switch it back when submission.
train_path = '../data/train/'
# DO NOT CHANGE ANYTHING BELOW THIS ----------------------------
events, mortality = read_csv(train_path)
#Compute the event count metrics
start_time = time.time()
event_count = event_count_metrics(events, mortality)
end_time = time.time()
print(("Time to compute event count metrics: " + str(end_time - start_time) + "s"))
print(event_count)
#Compute the encounter count metrics
start_time = time.time()
encounter_count = encounter_count_metrics(events, mortality)
end_time = time.time()
print(("Time to compute encounter count metrics: " + str(end_time - start_time) + "s"))
print(encounter_count)
#Compute record length metrics
start_time = time.time()
record_length = record_length_metrics(events, mortality)
end_time = time.time()
print(("Time to compute record length metrics: " + str(end_time - start_time) + "s"))
print(record_length)
if __name__ == "__main__":
main()
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
#封装成函数
def get_page_content(request_url):
#得到页面内容
headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'}
html=requests.get(request_url,headers=headers,timeout=10) #headers=headers不懂?
content=html.text #中间码转为字符串
soup=BeautifulSoup(content,'html.parser',from_encoding='utf-8') #'html.parser',from_encoding='utf-8'分别是什么作用?
return soup
#分析当前页面投诉信息
def analysis(soup):
df=pd.DataFrame(columns=['id','brand','car_model','type', 'desc', 'problem', 'datetime', 'status'])
# 找到完整的信息框,'div'是标签,
temp=soup.find('div',class_='tslb_b') #网页上是class='tslb_b',区别于class_='tslb_b',怎么解释?
# 找出所有的tr,即行
tr_list=temp.find_all('tr') #怎么解读?
for tr in tr_list:
td_list=tr.find_all('td')
if len(td_list)>0:
id, brand, car_model, type, desc, problem, datetime, status = \
td_list[0].text, td_list[1].text, td_list[2].text, td_list[3].text, td_list[4].text, \
td_list[5].text, td_list[6].text, td_list[7].text
print(id, brand, car_model, type, desc, problem, datetime, status)
temp={} #这里变量名必须是temp吗?
temp['id'] = id
temp['brand'] = brand
temp['car_model'] = car_model
temp['type'] = type
temp['desc'] = desc
temp['problem'] = problem
temp['datetime'] = datetime
temp['status'] = status
df=df.append(temp,ignore_index=True) #ignore_index=False什么意思?
return df
result=pd.DataFrame(columns=['id','brand','car_model','type', 'desc', 'problem', 'datetime', 'status'])
#请求URL
first_url = 'http://www.12365auto.com/zlts/0-0-0-0-0-0_0-0-0-0-0-0-0-'
page_num=10
for i in range(page_num):
#拼接当前页面url
request_url=first_url+str(i+1)+'.shtml'
#得到soup解析
soup=get_page_content(request_url)
#得到当前页面的df
df=analysis(soup) #这里df和前面的df数组变量名重复没有影响吗?
result=result.append(df)
print(result)
result.to_excel('Homework_Lesson02.xlsx')
|
import keras.models
def biard_net(in_shape, n_classes):
return keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=(5, 5), padding="same", input_shape=in_shape, activation='relu'),
keras.layers.Conv2D(filters=64, kernel_size=(5, 5), padding="same", input_shape=in_shape, activation='relu'),
keras.layers.Conv2D(filters=64, kernel_size=(5, 5), padding="same", input_shape=in_shape, activation='relu'),
keras.layers.Conv2D(filters=n_classes, kernel_size=(5, 5), padding="same", input_shape=in_shape, activation='softmax')
])
|
from django.contrib import admin
from django.contrib.auth.models import User
from django.contrib.auth.admin import UserAdmin
from .models import *
class SearchIssue(admin.ModelAdmin):
search_fields = ["title"]
class SearchNewsEvents(admin.ModelAdmin):
search_fields = ["title"]
class SearchPeople(admin.ModelAdmin):
search_fields = ["name"]
admin.site.register(Issue,SearchIssue)
admin.site.register(NewsEvents,SearchNewsEvents)
admin.site.register(People,SearchPeople)
admin.site.register(Requests)
admin.site.register(SiteFace)
admin.site.register(SiteFaceEn)
admin.site.register(Structure)
admin.site.register(Vacanc)
|
#!/usr/bin/env python
from twisted.application import service
from consider import server
master = server.MasterService()
application = service.Application("consider-server")
master.setServiceParent(application)
|
#for qaudratic equation of these form ax^2 + bx +c=0
#import complex math module
import cmath
#using the integer and input text
a= int(input("a ="))
b= int(input("b = "))
c= int(input("c = "))
# to calculate the discriminant
d = (b**2) - (4*a*c)
# find two solutions
x1 = (-b-cmath.sqrt(d))/(2*a)
x2 = (-b+cmath.sqrt(d))/(2*a)
print(d)
print("X1 :",x1)
print("X2 :",x2)
|
# Generated by Django 2.0.7 on 2018-09-14 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('TraverMsg', '0002_auto_20180914_1419'),
]
operations = [
migrations.RemoveField(
model_name='travermsg',
name='end_city',
),
migrations.RemoveField(
model_name='travermsg',
name='start_city',
),
migrations.RemoveField(
model_name='travermsg',
name='user_id',
),
migrations.RemoveField(
model_name='citymsg',
name='city_content',
),
migrations.RemoveField(
model_name='citymsg',
name='province',
),
migrations.RemoveField(
model_name='provincemsg',
name='province_content',
),
migrations.AddField(
model_name='citymsg',
name='province_name',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='所属省份'),
),
migrations.AddField(
model_name='scenicmsg',
name='img_url',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='图片url'),
),
migrations.AlterField(
model_name='scenicmsg',
name='city_name',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='所属城市'),
),
migrations.DeleteModel(
name='TraverMsg',
),
]
|
from __future__ import absolute_import
from numbers import Number
from plotly import exceptions, optional_imports
from plotly.figure_factory import utils
from plotly.graph_objs import graph_objs
from plotly.tools import make_subplots
pd = optional_imports.get_module('pandas')
np = optional_imports.get_module('numpy')
scipy_stats = optional_imports.get_module('scipy.stats')
def calc_stats(data):
"""
Calculate statistics for use in violin plot.
"""
x = np.asarray(data, np.float)
vals_min = np.min(x)
vals_max = np.max(x)
q2 = np.percentile(x, 50, interpolation='linear')
q1 = np.percentile(x, 25, interpolation='lower')
q3 = np.percentile(x, 75, interpolation='higher')
iqr = q3 - q1
whisker_dist = 1.5 * iqr
# in order to prevent drawing whiskers outside the interval
# of data one defines the whisker positions as:
d1 = np.min(x[x >= (q1 - whisker_dist)])
d2 = np.max(x[x <= (q3 + whisker_dist)])
return {
'min': vals_min,
'max': vals_max,
'q1': q1,
'q2': q2,
'q3': q3,
'd1': d1,
'd2': d2
}
def make_half_violin(x, y, fillcolor='#1f77b4', linecolor='rgb(0, 0, 0)'):
"""
Produces a sideways probability distribution fig violin plot.
"""
text = ['(pdf(y), y)=(' + '{:0.2f}'.format(x[i]) +
', ' + '{:0.2f}'.format(y[i]) + ')'
for i in range(len(x))]
return graph_objs.Scatter(
x=x,
y=y,
mode='lines',
name='',
text=text,
fill='tozerox',
fillcolor=fillcolor,
line=graph_objs.Line(width=0.5, color=linecolor, shape='spline'),
hoverinfo='text',
opacity=0.5
)
def make_non_outlier_interval(s, d1, d2):
"""
Returns the scatterplot fig of most of a violin plot.
"""
return graph_objs.Scatter(
x=[s, s],
y=[d1, d2],
name='',
mode='lines',
line=graph_objs.Line(width=1.5,
color='rgb(0,0,0)')
)
def make_quartiles(s, q1, q3):
"""
Makes the upper and lower quartiles for a violin plot.
"""
return graph_objs.Scatter(
x=[s, s],
y=[q1, q3],
text=['lower-quartile: ' + '{:0.2f}'.format(q1),
'upper-quartile: ' + '{:0.2f}'.format(q3)],
mode='lines',
line=graph_objs.Line(
width=4,
color='rgb(0,0,0)'
),
hoverinfo='text'
)
def make_diff(m1, m2):
"""
Makes the difference of medians for a violin plot.
"""
return graph_objs.Scatter(
x=[0, 0],
y=[m1, m2],
text=['median 1: ' + '{:0.2f}'.format(m1),
'median 2: ' + '{:0.2f}'.format(m2)],
mode='lines',
line=graph_objs.Line(
width=2,
color='rgb(0,0,0)'
),
hoverinfo='text'
)
def make_delta(m1, m2):
"""
Formats the 'delta of medians' hovertext for a violin plot.
"""
return graph_objs.Scatter(
x=[0],
y=[(m1 + m2) / 2.0],
text=['delta: ' + '{:0.2f}'.format(abs(m1 - m2))],
mode='markers',
marker=dict(symbol='square',
color='rgb(255,255,255)'),
hoverinfo='text'
)
def make_median(s, q2):
"""
Formats the 'median' hovertext for a violin plot.
"""
return graph_objs.Scatter(
x=[s],
y=[q2],
text=['median: ' + '{:0.2f}'.format(q2)],
mode='markers',
marker=dict(symbol='square',
color='rgb(255,255,255)'),
hoverinfo='text'
)
def make_XAxis(xaxis_title, xaxis_range):
"""
Makes the x-axis for a violin plot.
"""
xaxis = graph_objs.XAxis(title=xaxis_title,
range=xaxis_range,
showgrid=False,
zeroline=False,
showline=False,
mirror=False,
ticks='',
showticklabels=False)
return xaxis
def make_YAxis(yaxis_title):
"""
Makes the y-axis for a violin plot.
"""
yaxis = graph_objs.YAxis(title=yaxis_title,
showticklabels=True,
autorange=True,
ticklen=4,
showline=True,
zeroline=False,
showgrid=True,
mirror=False)
return yaxis
def violinplot(vals, colors=None):
"""
Refer to FigureFactory.create_violin() for docstring.
"""
if colors is None:
colors = ["#1F77B4", "#FF7F0E"]
sides = [np.asarray(vals[0], np.float), np.asarray(vals[1], np.float)]
# summary statistics
stats = [calc_stats(side) for side in sides]
# kernel density estimation of pdf
pdfs = [scipy_stats.gaussian_kde(side) for side in sides]
# grid over the data interval
xxs = [np.linspace(stat['min'], stat['max'], 100) for stat in stats]
# evaluate the pdf at the grid xx
yys = [pdf(xxs[i]) for i, pdf in enumerate(pdfs)]
max_pdf = np.max([np.max(yy) for yy in yys])
min_pdf = np.min([np.min(yy) for yy in yys])
s_pos = max_pdf / 4.0
# TODO consider min_pdf here
plot_xrange = [min_pdf - 0.1, max_pdf + 0.1]
plot_data = [make_half_violin(-yys[0], xxs[0], fillcolor=colors[0]),
make_half_violin(yys[1], xxs[1], fillcolor=colors[1]),
make_non_outlier_interval(-s_pos, stats[0]['d1'], stats[0]['d2']),
make_non_outlier_interval( s_pos, stats[1]['d1'], stats[1]['d2']),
make_quartiles(-s_pos, stats[0]['q1'], stats[0]['q3']),
make_quartiles( s_pos, stats[1]['q1'], stats[1]['q3']),
make_median(-s_pos, stats[0]['q2']),
make_median( s_pos, stats[1]['q2']),
make_diff(stats[0]['q2'], stats[1]['q2']),
make_delta(stats[0]['q2'], stats[1]['q2'])]
return plot_data, plot_xrange
def violin2(data, labels, colors=None, height=450, width=600, title=None):
"""
Split violin plot
"""
if len(data) != len(labels):
raise exceptions.PlotlyError("Data and Labels must be the same length")
else:
L = len(labels)
fig = make_subplots(rows=1, cols=L,
shared_yaxes=True,
horizontal_spacing=0.025,
print_grid=False)
color_index = 0
for k, gr in enumerate(data):
plot_data, plot_xrange = violinplot(gr, colors=colors)
layout = graph_objs.Layout()
for item in plot_data:
fig.append_trace(item, 1, k + 1)
# add violin plot labels
fig['layout'].update(
{'xaxis{}'.format(k + 1): make_XAxis(labels[k], plot_xrange)}
)
# set the sharey axis style
fig['layout'].update({'yaxis{}'.format(1): make_YAxis('')})
fig['layout'].update(
title=title,
showlegend=False,
hovermode='closest',
autosize=False,
height=height,
width=width
)
return fig
|
import pytesseract
from PIL import Image,ImageDraw
#pytesseract.pytesseract.tesseract_cmd = 'F:/Tesseract-OCR/tesseract.exe'
#C:\\Users\\Administrator\\Desktop\\企业雷达\\百度信用公司图标\\getCapImg.jpg
#F:\\baiduimg\\0ERuBLlQ_imges.png
#F:\\yzm\\1412260-20180701125834481-1681474414.png
img = Image.open('F:\\yzm\\txtimg.png')
img = img.convert('L')
#img = img.point(lambda x: 0 if x<100 else x>=100, '1')
#img.show()
text = pytesseract.image_to_string(img,lang='chi_sim')
print(text)
|
from django.db import models
from catalog.managers import TLEManager
class TLE(models.Model):
class Meta:
verbose_name = "Two Line Element"
verbose_name_plural = "Two Line Elements"
first_line = models.CharField(
max_length=70,
null=True
)
second_line = models.CharField(
max_length=70,
null=True
)
third_line = models.CharField(
max_length=70,
default=""
)
satellite_number = models.ForeignKey(
"CatalogEntry",
models.SET_NULL,
blank=True,
null=True
)
classification = models.ForeignKey(
"OperationalStatus",
models.SET_NULL,
blank=True,
null=True
)
international_designator_year = models.CharField(
max_length=2
)
international_designator_number = models.CharField(
max_length=3
)
international_designator_piece = models.CharField(
max_length=3
)
epoch_year = models.CharField(
max_length=2
)
epoch_day = models.DecimalField(
max_digits=11,
decimal_places=8
)
first_derivative_mean_motion = models.DecimalField(
max_digits=9,
decimal_places=8
)
second_derivative_mean_motion = models.DecimalField(
max_digits=9,
decimal_places=8
)
drag = models.DecimalField(
max_digits=10,
decimal_places=9
)
set_number = models.PositiveIntegerField()
first_checksum = models.PositiveSmallIntegerField()
inclination = models.DecimalField(
max_digits=7,
decimal_places=4
)
ascending_node = models.DecimalField(
max_digits=7,
decimal_places=4
)
eccentricity = models.DecimalField(
max_digits=8,
decimal_places=7
)
perigee_argument = models.DecimalField(
max_digits=7,
decimal_places=4
)
mean_anomaly = models.DecimalField(
max_digits=7,
decimal_places=4
)
mean_motion = models.DecimalField(
max_digits=10,
decimal_places=8
)
revolution_number = models.PositiveIntegerField()
second_checksum = models.PositiveSmallIntegerField()
added = models.DateTimeField(
null=True
)
objects = TLEManager()
def __str__(self):
return '%s:%i' % (self.satellite_number.international_designator , self.set_number)
|
#!/usr/bin/env python
"""
billy.py
Compare models using CROW features w/ those using bilinear features
!! Need to clean up the ordering
"""
import os
import sys
import bcolz
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.svm import LinearSVC
from sklearn.preprocessing import normalize
import keras
from keras.layers import *
from keras.models import Sequential
from keras import backend as K
if K._BACKEND == 'tensorflow':
def limit_mem():
cfg = K.tf.ConfigProto()
cfg.gpu_options.allow_growth = True
cfg.gpu_options.visible_device_list="0"
K.set_session(K.tf.Session(config=cfg))
limit_mem()
# --
# Meta
meta = pd.read_csv('./data/cub/meta.tsv', header=None, sep='\t')
meta.columns = ('id', 'fname', 'train')
meta['lab'] = meta.fname.apply(lambda x: x.split('/')[0])
train_sel = np.array(meta.train)
train_labs, test_labs = np.array(meta.lab[meta.train]), np.array(meta.lab[~meta.train])
# --
# Linear classifier
# Metadata IO
crow = pd.read_csv('./data/feats-crow-448', sep='\t', header=None)
crow = np.array(crow[crow.columns[1:]])
ncrow = normalize(crow)
train_ncrow, test_ncrow = ncrow[train_sel], ncrow[~train_sel]
# Train classifier
svc = LinearSVC().fit(train_ncrow, train_labs)
(svc.predict(test_ncrow) == test_labs).mean() # 0.660
# --
# Load bilinear features
# Data IO
bili = bcolz.open('./data/bilinear.bc')[:]
train_bili, test_bili = bili[np.array(meta.train)], bili[np.array(~meta.train)]
# --
# Use PCA to reduce dimensionality, then train SVM
# Problem is that PCA on this matrix seems to be very expensive (~232K columns)
# so we were just computing on subset of rows
rsel = np.random.choice(train_bili.shape[0], 1000, replace=False)
pca = PCA(n_components=512).fit(train_bili[rsel]) # untested
npca_train_bili = normalize(pca.transform(train_bili))
npca_test_bili = normalize(pca.transform(test_bili))
svc = LinearSVC().fit(npca_train_bili, train_labs)
(svc.predict(npca_test_bili) == test_labs).mean()
# 0.757 (normalized, unwhiten)
# ^^ Basically as good as using full bilinear features
# --
# Train model w/ bilinear features
model = Sequential()
model.add(Dense(n_classes, input_shape=(train_bili.shape[1],)))
model.add(Dropout(0.5))
model.add(Dense(n_classes, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['acc'])
fitist = model.fit(
train_bili, y_train,
verbose=True,
batch_size=32,
validation_data=(test_bili, y_test),
epochs=50,
callbacks=[
keras.callbacks.EarlyStopping(patience=10),
keras.callbacks.ReduceLROnPlateau(patience=5)
]
)
# all features
# dropout 0.25 = 0.7477
# dropout 0.50 = ~0.75 @ e11 (got impatient)
# dropout 0.75 = 0.7684
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.goals import check
from pants.backend.go.goals.check import GoCheckFieldSet, GoCheckRequest
from pants.backend.go.target_types import GoModTarget, GoPackageTarget
from pants.backend.go.util_rules import (
assembly,
build_pkg,
build_pkg_target,
first_party_pkg,
go_mod,
import_analysis,
link,
sdk,
third_party_pkg,
)
from pants.core.goals.check import CheckResult, CheckResults
from pants.engine.addresses import Address
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*check.rules(),
*sdk.rules(),
*assembly.rules(),
*build_pkg.rules(),
*build_pkg_target.rules(),
*import_analysis.rules(),
*link.rules(),
*go_mod.rules(),
*first_party_pkg.rules(),
*third_party_pkg.rules(),
*target_type_rules.rules(),
QueryRule(CheckResults, [GoCheckRequest]),
],
target_types=[GoModTarget, GoPackageTarget],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def test_check(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"go.mod": dedent(
"""\
module example.com/greeter
go 1.17
"""
),
"BUILD": "go_mod(name='mod')",
"bad/f.go": "invalid!!!",
"bad/BUILD": "go_package()",
"good/f.go": dedent(
"""\
package greeter
import "fmt"
func Hello() {
fmt.Println("Hello world!")
}
"""
),
"good/BUILD": "go_package()",
}
)
targets = [rule_runner.get_target(Address("bad")), rule_runner.get_target(Address("good"))]
results = rule_runner.request(
CheckResults, [GoCheckRequest(GoCheckFieldSet.create(tgt) for tgt in targets)]
).results
assert set(results) == {CheckResult(1, "", "")}
|
from sklearn.datasets import make_moons
from sklearn.cluster import KMeans
from matplotlib.pylab import plt
x, y = make_moons(200, noise=.05, random_state=0)
print(x, y)
labels = KMeans(2, random_state=0).fit_predict(x)
plt.scatter(x[:,0], x[:,1], c=labels, s=50, cmap='viridis')
plt.show() |
# deps.py -- Portage dependency resolution functions
# Copyright 2003-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
__all__ = [
'Atom', 'best_match_to_list', 'cpvequal',
'dep_getcpv', 'dep_getkey', 'dep_getslot',
'dep_getusedeps', 'dep_opconvert', 'flatten',
'get_operator', 'isjustname', 'isspecific',
'isvalidatom', 'match_from_list', 'match_to_list',
'paren_enclose', 'paren_normalize', 'paren_reduce',
'remove_slot', 'strip_empty', 'use_reduce',
'_repo_separator', '_slot_separator',
]
import re, sys
import warnings
from itertools import chain
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.util:cmp_sort_key,writemsg',
)
from portage import _encodings, _unicode_decode, _unicode_encode
from portage.eapi import _get_eapi_attrs
from portage.exception import InvalidAtom, InvalidData, InvalidDependString
from portage.localization import _
from portage.versions import catpkgsplit, catsplit, \
vercmp, ververify, _cp, _cpv, _pkg_str, _unknown_repo
import portage.cache.mappings
if sys.hexversion >= 0x3000000:
basestring = str
_unicode = str
else:
_unicode = unicode
# \w is [a-zA-Z0-9_]
# PMS 3.1.3: A slot name may contain any of the characters [A-Za-z0-9+_.-].
# It must not begin with a hyphen or a dot.
_slot_separator = ":"
_slot = r'([\w+][\w+.-]*)'
# loosly match SLOT, which may have an optional ABI part
_slot_loose = r'([\w+./*=-]+)'
_use = r'\[.*\]'
_op = r'([=~]|[><]=?)'
_repo_separator = "::"
_repo_name = r'[\w][\w-]*'
_repo_name_re = re.compile('^' + _repo_name + '$', re.UNICODE)
_repo = r'(?:' + _repo_separator + '(' + _repo_name + ')' + ')?'
_extended_cat = r'[\w+*][\w+.*-]*'
_slot_re_cache = {}
def _get_slot_re(eapi_attrs):
cache_key = eapi_attrs.slot_operator
slot_re = _slot_re_cache.get(cache_key)
if slot_re is not None:
return slot_re
if eapi_attrs.slot_operator:
slot_re = _slot + r'(/' + _slot + r'=?)?'
else:
slot_re = _slot
slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
_slot_re_cache[cache_key] = slot_re
return slot_re
_slot_dep_re_cache = {}
def _get_slot_dep_re(eapi_attrs):
cache_key = eapi_attrs.slot_operator
slot_re = _slot_dep_re_cache.get(cache_key)
if slot_re is not None:
return slot_re
if eapi_attrs.slot_operator:
slot_re = _slot + r'?(\*|=|/' + _slot + r'=?)?'
else:
slot_re = _slot
slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
_slot_dep_re_cache[cache_key] = slot_re
return slot_re
def _match_slot(atom, pkg):
if pkg.slot == atom.slot:
if not atom.sub_slot:
return True
elif atom.sub_slot == pkg.sub_slot:
return True
return False
_atom_re_cache = {}
def _get_atom_re(eapi_attrs):
cache_key = eapi_attrs.dots_in_PN
atom_re = _atom_re_cache.get(cache_key)
if atom_re is not None:
return atom_re
if eapi_attrs.dots_in_PN:
cp_re = _cp['dots_allowed_in_PN']
cpv_re = _cpv['dots_allowed_in_PN']
else:
cp_re = _cp['dots_disallowed_in_PN']
cpv_re = _cpv['dots_disallowed_in_PN']
atom_re = re.compile('^(?P<without_use>(?:' +
'(?P<op>' + _op + cpv_re + ')|' +
'(?P<star>=' + cpv_re + r'\*)|' +
'(?P<simple>' + cp_re + '))' +
'(' + _slot_separator + _slot_loose + ')?' +
_repo + ')(' + _use + ')?$', re.VERBOSE | re.UNICODE)
_atom_re_cache[cache_key] = atom_re
return atom_re
_atom_wildcard_re_cache = {}
def _get_atom_wildcard_re(eapi_attrs):
cache_key = eapi_attrs.dots_in_PN
atom_re = _atom_wildcard_re_cache.get(cache_key)
if atom_re is not None:
return atom_re
if eapi_attrs.dots_in_PN:
pkg_re = r'[\w+*][\w+.*-]*?'
else:
pkg_re = r'[\w+*][\w+*-]*?'
atom_re = re.compile(r'((?P<simple>(' +
_extended_cat + r')/(' + pkg_re + r'))' + \
'|(?P<star>=((' + _extended_cat + r')/(' + pkg_re + r'))-(?P<version>\*\w+\*)))' + \
'(:(?P<slot>' + _slot_loose + r'))?(' +
_repo_separator + r'(?P<repo>' + _repo_name + r'))?$', re.UNICODE)
_atom_wildcard_re_cache[cache_key] = atom_re
return atom_re
_usedep_re_cache = {}
def _get_usedep_re(eapi_attrs):
"""
@param eapi_attrs: The EAPI attributes from _get_eapi_attrs
@type eapi_attrs: _eapi_attrs
@rtype: regular expression object
@return: A regular expression object that matches valid USE deps for the
given eapi.
"""
cache_key = eapi_attrs.dots_in_use_flags
usedep_re = _usedep_re_cache.get(cache_key)
if usedep_re is not None:
return usedep_re
if eapi_attrs.dots_in_use_flags:
_flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@.-]*'
else:
_flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@-]*'
usedep_re = re.compile(r'^(?P<prefix>[!-]?)(?P<flag>' +
_flag_re + r')(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$')
_usedep_re_cache[cache_key] = usedep_re
return usedep_re
_useflag_re_cache = {}
def _get_useflag_re(eapi):
"""
When eapi is None then validation is not as strict, since we want the
same to work for multiple EAPIs that may have slightly different rules.
@param eapi: The EAPI
@type eapi: String or None
@rtype: regular expression object
@return: A regular expression object that matches valid USE flags for the
given eapi.
"""
eapi_attrs = _get_eapi_attrs(eapi)
cache_key = eapi_attrs.dots_in_use_flags
useflag_re = _useflag_re_cache.get(cache_key)
if useflag_re is not None:
return useflag_re
if eapi_attrs.dots_in_use_flags:
flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@.-]*'
else:
flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@-]*'
useflag_re = re.compile(r'^' + flag_re + r'$')
_useflag_re_cache[cache_key] = useflag_re
return useflag_re
def cpvequal(cpv1, cpv2):
"""
@param cpv1: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
@type cpv1: String
@param cpv2: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
@type cpv2: String
@rtype: Boolean
@return:
1. True if cpv1 = cpv2
2. False Otherwise
3. Throws PortageException if cpv1 or cpv2 is not a CPV
Example Usage:
>>> from portage.dep import cpvequal
>>> cpvequal("sys-apps/portage-2.1","sys-apps/portage-2.1")
>>> True
"""
try:
try:
split1 = cpv1.cpv_split
except AttributeError:
cpv1 = _pkg_str(cpv1)
split1 = cpv1.cpv_split
try:
split2 = cpv2.cpv_split
except AttributeError:
cpv2 = _pkg_str(cpv2)
split2 = cpv2.cpv_split
except InvalidData:
raise portage.exception.PortageException(_("Invalid data '%s, %s', parameter was not a CPV") % (cpv1, cpv2))
if split1[0] != split2[0] or \
split1[1] != split2[1]:
return False
return vercmp(cpv1.version, cpv2.version) == 0
def strip_empty(myarr):
"""
Strip all empty elements from an array
@param myarr: The list of elements
@type myarr: List
@rtype: Array
@return: The array with empty elements removed
"""
warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
('portage.dep.strip_empty',), DeprecationWarning, stacklevel=2)
return [x for x in myarr if x]
def paren_reduce(mystr):
"""
Take a string and convert all paren enclosed entities into sublists and
split the list elements by spaces. All redundant brackets are removed.
Example usage:
>>> paren_reduce('foobar foo? ( bar baz )')
['foobar', 'foo?', ['bar', 'baz']]
@param mystr: The string to reduce
@type mystr: String
@rtype: Array
@return: The reduced string in an array
"""
if portage._internal_warnings:
warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
('portage.dep.paren_reduce',), DeprecationWarning, stacklevel=2)
mysplit = mystr.split()
level = 0
stack = [[]]
need_bracket = False
for token in mysplit:
if token == "(":
need_bracket = False
stack.append([])
level += 1
elif token == ")":
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % mystr)
if level > 0:
level -= 1
l = stack.pop()
is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
def ends_in_any_of_dep(k):
return k>=0 and stack[k] and stack[k][-1] == "||"
def ends_in_operator(k):
return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
def special_append():
"""
Use extend instead of append if possible. This kills all redundant brackets.
"""
if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
if len(l) == 1 and isinstance(l[0], list):
# l = [[...]]
stack[level].extend(l[0])
else:
stack[level].extend(l)
else:
stack[level].append(l)
if l:
if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
#Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
stack[level].extend(l)
elif not stack[level]:
#An '||' in the level above forces us to keep to brackets.
special_append()
elif len(l) == 1 and ends_in_any_of_dep(level):
#Optimize: || ( A ) -> A
stack[level].pop()
special_append()
elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
#Optimize: || ( || ( ... ) ) -> || ( ... )
# foo? ( foo? ( ... ) ) -> foo? ( ... )
# || ( foo? ( ... ) ) -> foo? ( ... )
stack[level].pop()
special_append()
else:
special_append()
else:
if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
stack[level].pop()
else:
raise InvalidDependString(
_("malformed syntax: '%s'") % mystr)
elif token == "||":
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % mystr)
need_bracket = True
stack[level].append(token)
else:
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % mystr)
if token[-1] == "?":
need_bracket = True
stack[level].append(token)
if level != 0 or need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % mystr)
return stack[0]
class paren_normalize(list):
"""Take a dependency structure as returned by paren_reduce or use_reduce
and generate an equivalent structure that has no redundant lists."""
def __init__(self, src):
if portage._internal_warnings:
warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
('portage.dep.paren_normalize',), DeprecationWarning, stacklevel=2)
list.__init__(self)
self._zap_parens(src, self)
def _zap_parens(self, src, dest, disjunction=False):
if not src:
return dest
i = iter(src)
for x in i:
if isinstance(x, basestring):
if x in ('||', '^^'):
y = self._zap_parens(next(i), [], disjunction=True)
if len(y) == 1:
dest.append(y[0])
else:
dest.append(x)
dest.append(y)
elif x.endswith("?"):
dest.append(x)
dest.append(self._zap_parens(next(i), []))
else:
dest.append(x)
else:
if disjunction:
x = self._zap_parens(x, [])
if len(x) == 1:
dest.append(x[0])
else:
dest.append(x)
else:
self._zap_parens(x, dest)
return dest
def paren_enclose(mylist, unevaluated_atom=False, opconvert=False):
"""
Convert a list to a string with sublists enclosed with parens.
Example usage:
>>> test = ['foobar','foo',['bar','baz']]
>>> paren_enclose(test)
'foobar foo ( bar baz )'
@param mylist: The list
@type mylist: List
@rtype: String
@return: The paren enclosed string
"""
mystrparts = []
for x in mylist:
if isinstance(x, list):
if opconvert and x and x[0] == "||":
mystrparts.append("%s ( %s )" % (x[0], paren_enclose(x[1:])))
else:
mystrparts.append("( %s )" % paren_enclose(x))
else:
if unevaluated_atom:
x = getattr(x, 'unevaluated_atom', x)
mystrparts.append(x)
return " ".join(mystrparts)
def use_reduce(depstr, uselist=[], masklist=[], matchall=False, excludeall=[], is_src_uri=False, \
eapi=None, opconvert=False, flat=False, is_valid_flag=None, token_class=None, matchnone=False):
"""
Takes a dep string and reduces the use? conditionals out, leaving an array
with subarrays. All redundant brackets are removed.
@param deparray: depstring
@type deparray: String
@param uselist: List of use enabled flags
@type uselist: List
@param masklist: List of masked flags (always treated as disabled)
@type masklist: List
@param matchall: Treat all conditionals as active. Used by repoman.
@type matchall: Bool
@param excludeall: List of flags for which negated conditionals are always treated as inactive.
@type excludeall: List
@param is_src_uri: Indicates if depstr represents a SRC_URI
@type is_src_uri: Bool
@param eapi: Indicates the EAPI the dep string has to comply to
@type eapi: String
@param opconvert: Put every operator as first element into it's argument list
@type opconvert: Bool
@param flat: Create a flat list of all tokens
@type flat: Bool
@param is_valid_flag: Function that decides if a given use flag might be used in use conditionals
@type is_valid_flag: Function
@param token_class: Convert all non operator tokens into this class
@type token_class: Class
@param matchnone: Treat all conditionals as inactive. Used by digestgen().
@type matchnone: Bool
@rtype: List
@return: The use reduced depend array
"""
if isinstance(depstr, list):
if portage._internal_warnings:
warnings.warn(_("Passing paren_reduced dep arrays to %s is deprecated. " + \
"Pass the original dep string instead.") % \
('portage.dep.use_reduce',), DeprecationWarning, stacklevel=2)
depstr = paren_enclose(depstr)
if opconvert and flat:
raise ValueError("portage.dep.use_reduce: 'opconvert' and 'flat' are mutually exclusive")
if matchall and matchnone:
raise ValueError("portage.dep.use_reduce: 'matchall' and 'matchnone' are mutually exclusive")
eapi_attrs = _get_eapi_attrs(eapi)
useflag_re = _get_useflag_re(eapi)
def is_active(conditional):
"""
Decides if a given use conditional is active.
"""
if conditional.startswith("!"):
flag = conditional[1:-1]
is_negated = True
else:
flag = conditional[:-1]
is_negated = False
if is_valid_flag:
if not is_valid_flag(flag):
msg = _("USE flag '%s' referenced in " + \
"conditional '%s' is not in IUSE") \
% (flag, conditional)
e = InvalidData(msg, category='IUSE.missing')
raise InvalidDependString(msg, errors=(e,))
else:
if useflag_re.match(flag) is None:
raise InvalidDependString(
_("invalid use flag '%s' in conditional '%s'") % (flag, conditional))
if is_negated and flag in excludeall:
return False
if flag in masklist:
return is_negated
if matchall:
return True
if matchnone:
return False
return (flag in uselist and not is_negated) or \
(flag not in uselist and is_negated)
def missing_white_space_check(token, pos):
"""
Used to generate good error messages for invalid tokens.
"""
for x in (")", "(", "||"):
if token.startswith(x) or token.endswith(x):
raise InvalidDependString(
_("missing whitespace around '%s' at '%s', token %s") % (x, token, pos+1))
mysplit = depstr.split()
#Count the bracket level.
level = 0
#We parse into a stack. Every time we hit a '(', a new empty list is appended to the stack.
#When we hit a ')', the last list in the stack is merged with list one level up.
stack = [[]]
#Set need_bracket to True after use conditionals or ||. Other tokens need to ensure
#that need_bracket is not True.
need_bracket = False
#Set need_simple_token to True after a SRC_URI arrow. Other tokens need to ensure
#that need_simple_token is not True.
need_simple_token = False
for pos, token in enumerate(mysplit):
if token == "(":
if need_simple_token:
raise InvalidDependString(
_("expected: file name, got: '%s', token %s") % (token, pos+1))
if len(mysplit) >= pos+2 and mysplit[pos+1] == ")":
raise InvalidDependString(
_("expected: dependency string, got: ')', token %s") % (pos+1,))
need_bracket = False
stack.append([])
level += 1
elif token == ")":
if need_bracket:
raise InvalidDependString(
_("expected: '(', got: '%s', token %s") % (token, pos+1))
if need_simple_token:
raise InvalidDependString(
_("expected: file name, got: '%s', token %s") % (token, pos+1))
if level > 0:
level -= 1
l = stack.pop()
is_single = len(l) == 1 or \
(opconvert and l and l[0] == "||") or \
(not opconvert and len(l)==2 and l[0] == "||")
ignore = False
if flat:
#In 'flat' mode, we simply merge all lists into a single large one.
if stack[level] and stack[level][-1][-1] == "?":
#The last token before the '(' that matches the current ')'
#was a use conditional. The conditional is removed in any case.
#Merge the current list if needed.
if is_active(stack[level][-1]):
stack[level].pop()
stack[level].extend(l)
else:
stack[level].pop()
else:
stack[level].extend(l)
continue
if stack[level]:
if stack[level][-1] == "||" and not l:
#Optimize: || ( ) -> .
stack[level].pop()
elif stack[level][-1][-1] == "?":
#The last token before the '(' that matches the current ')'
#was a use conditional, remove it and decide if we
#have to keep the current list.
if not is_active(stack[level][-1]):
ignore = True
stack[level].pop()
def ends_in_any_of_dep(k):
return k>=0 and stack[k] and stack[k][-1] == "||"
def starts_with_any_of_dep(k):
#'ends_in_any_of_dep' for opconvert
return k>=0 and stack[k] and stack[k][0] == "||"
def last_any_of_operator_level(k):
#Returns the level of the last || operator if it is in effect for
#the current level. It is not in effect, if there is a level, that
#ends in a non-operator. This is almost equivalent to stack[level][-1]=="||",
#expect that it skips empty levels.
while k>=0:
if stack[k]:
if stack[k][-1] == "||":
return k
elif stack[k][-1][-1] != "?":
return -1
k -= 1
return -1
def special_append():
"""
Use extend instead of append if possible. This kills all redundant brackets.
"""
if is_single:
#Either [A], [[...]] or [|| [...]]
if l[0] == "||" and ends_in_any_of_dep(level-1):
if opconvert:
stack[level].extend(l[1:])
else:
stack[level].extend(l[1])
elif len(l) == 1 and isinstance(l[0], list):
# l = [[...]]
last = last_any_of_operator_level(level-1)
if last == -1:
if opconvert and isinstance(l[0], list) \
and l[0] and l[0][0] == '||':
stack[level].append(l[0])
else:
stack[level].extend(l[0])
else:
if opconvert and l[0] and l[0][0] == "||":
stack[level].extend(l[0][1:])
else:
stack[level].append(l[0])
else:
stack[level].extend(l)
else:
if opconvert and stack[level] and stack[level][-1] == '||':
stack[level][-1] = ['||'] + l
else:
stack[level].append(l)
if l and not ignore:
#The current list is not empty and we don't want to ignore it because
#of an inactive use conditional.
if not ends_in_any_of_dep(level-1) and not ends_in_any_of_dep(level):
#Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
stack[level].extend(l)
elif not stack[level]:
#An '||' in the level above forces us to keep to brackets.
special_append()
elif is_single and ends_in_any_of_dep(level):
#Optimize: || ( A ) -> A, || ( || ( ... ) ) -> || ( ... )
stack[level].pop()
special_append()
elif ends_in_any_of_dep(level) and ends_in_any_of_dep(level-1):
#Optimize: || ( A || ( B C ) ) -> || ( A B C )
stack[level].pop()
stack[level].extend(l)
else:
if opconvert and ends_in_any_of_dep(level):
#In opconvert mode, we have to move the operator from the level
#above into the current list.
stack[level].pop()
stack[level].append(["||"] + l)
else:
special_append()
else:
raise InvalidDependString(
_("no matching '%s' for '%s', token %s") % ("(", ")", pos+1))
elif token == "||":
if is_src_uri:
raise InvalidDependString(
_("any-of dependencies are not allowed in SRC_URI: token %s") % (pos+1,))
if need_bracket:
raise InvalidDependString(
_("expected: '(', got: '%s', token %s") % (token, pos+1))
need_bracket = True
stack[level].append(token)
elif token == "->":
if need_simple_token:
raise InvalidDependString(
_("expected: file name, got: '%s', token %s") % (token, pos+1))
if not is_src_uri:
raise InvalidDependString(
_("SRC_URI arrow are only allowed in SRC_URI: token %s") % (pos+1,))
if not eapi_attrs.src_uri_arrows:
raise InvalidDependString(
_("SRC_URI arrow not allowed in EAPI %s: token %s") % (eapi, pos+1))
need_simple_token = True
stack[level].append(token)
else:
missing_white_space_check(token, pos)
if need_bracket:
raise InvalidDependString(
_("expected: '(', got: '%s', token %s") % (token, pos+1))
if need_simple_token and "/" in token:
#The last token was a SRC_URI arrow, make sure we have a simple file name.
raise InvalidDependString(
_("expected: file name, got: '%s', token %s") % (token, pos+1))
if token[-1] == "?":
need_bracket = True
else:
need_simple_token = False
if token_class and not is_src_uri:
#Add a hack for SRC_URI here, to avoid conditional code at the consumer level
try:
token = token_class(token, eapi=eapi,
is_valid_flag=is_valid_flag)
except InvalidAtom as e:
raise InvalidDependString(
_("Invalid atom (%s), token %s") \
% (e, pos+1), errors=(e,))
except SystemExit:
raise
except Exception as e:
raise InvalidDependString(
_("Invalid token '%s', token %s") % (token, pos+1))
if not matchall and \
hasattr(token, 'evaluate_conditionals'):
token = token.evaluate_conditionals(uselist)
stack[level].append(token)
if level != 0:
raise InvalidDependString(
_("Missing '%s' at end of string") % (")",))
if need_bracket:
raise InvalidDependString(
_("Missing '%s' at end of string") % ("(",))
if need_simple_token:
raise InvalidDependString(
_("Missing file name at end of string"))
return stack[0]
def dep_opconvert(deplist):
"""
Iterate recursively through a list of deps, if the
dep is a '||' or '&&' operator, combine it with the
list of deps that follows..
Example usage:
>>> test = ["blah", "||", ["foo", "bar", "baz"]]
>>> dep_opconvert(test)
['blah', ['||', 'foo', 'bar', 'baz']]
@param deplist: A list of deps to format
@type mydep: List
@rtype: List
@return:
The new list with the new ordering
"""
if portage._internal_warnings:
warnings.warn(_("%s is deprecated. Use %s with the opconvert parameter set to True instead.") % \
('portage.dep.dep_opconvert', 'portage.dep.use_reduce'), DeprecationWarning, stacklevel=2)
retlist = []
x = 0
while x != len(deplist):
if isinstance(deplist[x], list):
retlist.append(dep_opconvert(deplist[x]))
elif deplist[x] == "||":
retlist.append([deplist[x]] + dep_opconvert(deplist[x+1]))
x += 1
else:
retlist.append(deplist[x])
x += 1
return retlist
def flatten(mylist):
"""
Recursively traverse nested lists and return a single list containing
all non-list elements that are found.
Example usage:
>>> flatten([1, [2, 3, [4]]])
[1, 2, 3, 4]
@param mylist: A list containing nested lists and non-list elements.
@type mylist: List
@rtype: List
@return: A single list containing only non-list elements.
"""
if portage._internal_warnings:
warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
('portage.dep.flatten',), DeprecationWarning, stacklevel=2)
newlist = []
for x in mylist:
if isinstance(x, list):
newlist.extend(flatten(x))
else:
newlist.append(x)
return newlist
class _use_dep(object):
__slots__ = ("_eapi_attrs", "conditional", "missing_enabled", "missing_disabled",
"disabled", "enabled", "tokens", "required")
class _conditionals_class(object):
__slots__ = ("enabled", "disabled", "equal", "not_equal")
def items(self):
for k in self.__slots__:
v = getattr(self, k, None)
if v:
yield (k, v)
def values(self):
for k in self.__slots__:
v = getattr(self, k, None)
if v:
yield v
# used in InvalidAtom messages
_conditional_strings = {
'enabled' : '%s?',
'disabled': '!%s?',
'equal': '%s=',
'not_equal': '!%s=',
}
def __init__(self, use, eapi_attrs, enabled_flags=None, disabled_flags=None, missing_enabled=None,
missing_disabled=None, conditional=None, required=None):
self._eapi_attrs = eapi_attrs
if enabled_flags is not None:
#A shortcut for the classe's own methods.
self.tokens = use
if not isinstance(self.tokens, tuple):
self.tokens = tuple(self.tokens)
self.required = frozenset(required)
self.enabled = frozenset(enabled_flags)
self.disabled = frozenset(disabled_flags)
self.missing_enabled = frozenset(missing_enabled)
self.missing_disabled = frozenset(missing_disabled)
self.conditional = None
if conditional:
self.conditional = self._conditionals_class()
for k in "enabled", "disabled", "equal", "not_equal":
setattr(self.conditional, k, frozenset(conditional.get(k, [])))
return
enabled_flags = set()
disabled_flags = set()
missing_enabled = set()
missing_disabled = set()
no_default = set()
conditional = {}
usedep_re = _get_usedep_re(self._eapi_attrs)
for x in use:
m = usedep_re.match(x)
if m is None:
raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
operator = m.group("prefix") + m.group("suffix")
flag = m.group("flag")
default = m.group("default")
if not operator:
enabled_flags.add(flag)
elif operator == "-":
disabled_flags.add(flag)
elif operator == "?":
conditional.setdefault("enabled", set()).add(flag)
elif operator == "=":
conditional.setdefault("equal", set()).add(flag)
elif operator == "!=":
conditional.setdefault("not_equal", set()).add(flag)
elif operator == "!?":
conditional.setdefault("disabled", set()).add(flag)
else:
raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
if default:
if default == "(+)":
if flag in missing_disabled or flag in no_default:
raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
missing_enabled.add(flag)
else:
if flag in missing_enabled or flag in no_default:
raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
missing_disabled.add(flag)
else:
if flag in missing_enabled or flag in missing_disabled:
raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
no_default.add(flag)
self.tokens = use
if not isinstance(self.tokens, tuple):
self.tokens = tuple(self.tokens)
self.required = frozenset(no_default)
self.enabled = frozenset(enabled_flags)
self.disabled = frozenset(disabled_flags)
self.missing_enabled = frozenset(missing_enabled)
self.missing_disabled = frozenset(missing_disabled)
self.conditional = None
if conditional:
self.conditional = self._conditionals_class()
for k in "enabled", "disabled", "equal", "not_equal":
setattr(self.conditional, k, frozenset(conditional.get(k, [])))
def __bool__(self):
return bool(self.tokens)
if sys.hexversion < 0x3000000:
__nonzero__ = __bool__
def __str__(self):
if not self.tokens:
return ""
return "[%s]" % (",".join(self.tokens),)
if sys.hexversion < 0x3000000:
__unicode__ = __str__
def __str__(self):
return _unicode_encode(self.__unicode__(),
encoding=_encodings['content'], errors='backslashreplace')
def __repr__(self):
return "portage.dep._use_dep(%s)" % repr(self.tokens)
def evaluate_conditionals(self, use):
"""
Create a new instance with conditionals evaluated.
Conditional evaluation behavior:
parent state conditional result
x x? x
-x x?
x !x?
-x !x? -x
x x= x
-x x= -x
x !x= -x
-x !x= x
Conditional syntax examples:
Compact Form Equivalent Expanded Form
foo[bar?] bar? ( foo[bar] ) !bar? ( foo )
foo[!bar?] bar? ( foo ) !bar? ( foo[-bar] )
foo[bar=] bar? ( foo[bar] ) !bar? ( foo[-bar] )
foo[!bar=] bar? ( foo[-bar] ) !bar? ( foo[bar] )
"""
enabled_flags = set(self.enabled)
disabled_flags = set(self.disabled)
tokens = []
usedep_re = _get_usedep_re(self._eapi_attrs)
for x in self.tokens:
m = usedep_re.match(x)
operator = m.group("prefix") + m.group("suffix")
flag = m.group("flag")
default = m.group("default")
if default is None:
default = ""
if operator == "?":
if flag in use:
enabled_flags.add(flag)
tokens.append(flag+default)
elif operator == "=":
if flag in use:
enabled_flags.add(flag)
tokens.append(flag+default)
else:
disabled_flags.add(flag)
tokens.append("-"+flag+default)
elif operator == "!=":
if flag in use:
disabled_flags.add(flag)
tokens.append("-"+flag+default)
else:
enabled_flags.add(flag)
tokens.append(flag+default)
elif operator == "!?":
if flag not in use:
disabled_flags.add(flag)
tokens.append("-"+flag+default)
else:
tokens.append(x)
return _use_dep(tokens, self._eapi_attrs, enabled_flags=enabled_flags, disabled_flags=disabled_flags,
missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, required=self.required)
def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
"""
Create a new instance with satisfied use deps removed.
"""
if parent_use is None and self.conditional:
raise InvalidAtom("violated_conditionals needs 'parent_use'" + \
" parameter for conditional flags.")
enabled_flags = set()
disabled_flags = set()
conditional = {}
tokens = []
all_defaults = frozenset(chain(self.missing_enabled, self.missing_disabled))
def validate_flag(flag):
return is_valid_flag(flag) or flag in all_defaults
usedep_re = _get_usedep_re(self._eapi_attrs)
for x in self.tokens:
m = usedep_re.match(x)
operator = m.group("prefix") + m.group("suffix")
flag = m.group("flag")
if not validate_flag(flag):
tokens.append(x)
if not operator:
enabled_flags.add(flag)
elif operator == "-":
disabled_flags.add(flag)
elif operator == "?":
conditional.setdefault("enabled", set()).add(flag)
elif operator == "=":
conditional.setdefault("equal", set()).add(flag)
elif operator == "!=":
conditional.setdefault("not_equal", set()).add(flag)
elif operator == "!?":
conditional.setdefault("disabled", set()).add(flag)
continue
if not operator:
if flag not in other_use:
if is_valid_flag(flag) or flag in self.missing_disabled:
tokens.append(x)
enabled_flags.add(flag)
elif operator == "-":
if flag not in other_use:
if not is_valid_flag(flag):
if flag in self.missing_enabled:
tokens.append(x)
disabled_flags.add(flag)
else:
tokens.append(x)
disabled_flags.add(flag)
elif operator == "?":
if flag not in parent_use or flag in other_use:
continue
if is_valid_flag(flag) or flag in self.missing_disabled:
tokens.append(x)
conditional.setdefault("enabled", set()).add(flag)
elif operator == "=":
if flag in parent_use and flag not in other_use:
if is_valid_flag(flag):
tokens.append(x)
conditional.setdefault("equal", set()).add(flag)
else:
if flag in self.missing_disabled:
tokens.append(x)
conditional.setdefault("equal", set()).add(flag)
elif flag not in parent_use:
if flag not in other_use:
if not is_valid_flag(flag):
if flag in self.missing_enabled:
tokens.append(x)
conditional.setdefault("equal", set()).add(flag)
else:
tokens.append(x)
conditional.setdefault("equal", set()).add(flag)
elif operator == "!=":
if flag not in parent_use and flag not in other_use:
if is_valid_flag(flag):
tokens.append(x)
conditional.setdefault("not_equal", set()).add(flag)
else:
if flag in self.missing_disabled:
tokens.append(x)
conditional.setdefault("not_equal", set()).add(flag)
elif flag in parent_use:
if flag not in other_use:
if not is_valid_flag(flag):
if flag in self.missing_enabled:
tokens.append(x)
conditional.setdefault("not_equal", set()).add(flag)
else:
tokens.append(x)
conditional.setdefault("not_equal", set()).add(flag)
elif operator == "!?":
if flag not in parent_use:
if flag not in other_use:
if not is_valid_flag(flag) and flag in self.missing_enabled:
tokens.append(x)
conditional.setdefault("disabled", set()).add(flag)
else:
tokens.append(x)
conditional.setdefault("disabled", set()).add(flag)
return _use_dep(tokens, self._eapi_attrs, enabled_flags=enabled_flags, disabled_flags=disabled_flags,
missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, \
conditional=conditional, required=self.required)
def _eval_qa_conditionals(self, use_mask, use_force):
"""
For repoman, evaluate all possible combinations within the constraints
of the given use.force and use.mask settings. The result may seem
ambiguous in the sense that the same flag can be in both the enabled
and disabled sets, but this is useful within the context of how its
intended to be used by repoman. It is assumed that the caller has
already ensured that there is no intersection between the given
use_mask and use_force sets when necessary.
"""
enabled_flags = set(self.enabled)
disabled_flags = set(self.disabled)
missing_enabled = self.missing_enabled
missing_disabled = self.missing_disabled
tokens = []
usedep_re = _get_usedep_re(self._eapi_attrs)
for x in self.tokens:
m = usedep_re.match(x)
operator = m.group("prefix") + m.group("suffix")
flag = m.group("flag")
default = m.group("default")
if default is None:
default = ""
if operator == "?":
if flag not in use_mask:
enabled_flags.add(flag)
tokens.append(flag+default)
elif operator == "=":
if flag not in use_mask:
enabled_flags.add(flag)
tokens.append(flag+default)
if flag not in use_force:
disabled_flags.add(flag)
tokens.append("-"+flag+default)
elif operator == "!=":
if flag not in use_force:
enabled_flags.add(flag)
tokens.append(flag+default)
if flag not in use_mask:
disabled_flags.add(flag)
tokens.append("-"+flag+default)
elif operator == "!?":
if flag not in use_force:
disabled_flags.add(flag)
tokens.append("-"+flag+default)
else:
tokens.append(x)
return _use_dep(tokens, self._eapi_attrs, enabled_flags=enabled_flags, disabled_flags=disabled_flags,
missing_enabled=missing_enabled, missing_disabled=missing_disabled, required=self.required)
class Atom(_unicode):
"""
For compatibility with existing atom string manipulation code, this
class emulates most of the str methods that are useful with atoms.
"""
class _blocker(object):
__slots__ = ("overlap",)
class _overlap(object):
__slots__ = ("forbid",)
def __init__(self, forbid=False):
self.forbid = forbid
def __init__(self, forbid_overlap=False):
self.overlap = self._overlap(forbid=forbid_overlap)
def __new__(cls, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=None,
_use=None, eapi=None, is_valid_flag=None):
return _unicode.__new__(cls, s)
def __init__(self, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=None,
_use=None, eapi=None, is_valid_flag=None):
if isinstance(s, Atom):
# This is an efficiency assertion, to ensure that the Atom
# constructor is not called redundantly.
raise TypeError(_("Expected %s, got %s") % \
(_unicode, type(s)))
if not isinstance(s, _unicode):
# Avoid TypeError from _unicode.__init__ with PyPy.
s = _unicode_decode(s)
_unicode.__init__(s)
eapi_attrs = _get_eapi_attrs(eapi)
atom_re = _get_atom_re(eapi_attrs)
self.__dict__['eapi'] = eapi
if eapi is not None:
# Ignore allow_repo when eapi is specified.
allow_repo = eapi_attrs.repo_deps
else:
if allow_repo is None:
allow_repo = True
if "!" == s[:1]:
blocker = self._blocker(forbid_overlap=("!" == s[1:2]))
if blocker.overlap.forbid:
s = s[2:]
else:
s = s[1:]
else:
blocker = False
self.__dict__['blocker'] = blocker
m = atom_re.match(s)
extended_syntax = False
extended_version = None
if m is None:
if allow_wildcard:
atom_re = _get_atom_wildcard_re(eapi_attrs)
m = atom_re.match(s)
if m is None:
raise InvalidAtom(self)
gdict = m.groupdict()
if m.group('star') is not None:
op = '=*'
base = atom_re.groupindex['star']
cp = m.group(base + 1)
cpv = m.group('star')[1:]
extended_version = m.group(base + 4)
else:
op = None
cpv = cp = m.group('simple')
if cpv.find("**") != -1:
raise InvalidAtom(self)
slot = m.group('slot')
repo = m.group('repo')
use_str = None
extended_syntax = True
else:
raise InvalidAtom(self)
elif m.group('op') is not None:
base = atom_re.groupindex['op']
op = m.group(base + 1)
cpv = m.group(base + 2)
cp = m.group(base + 3)
slot = m.group(atom_re.groups - 2)
repo = m.group(atom_re.groups - 1)
use_str = m.group(atom_re.groups)
if m.group(base + 4) is not None:
raise InvalidAtom(self)
elif m.group('star') is not None:
base = atom_re.groupindex['star']
op = '=*'
cpv = m.group(base + 1)
cp = m.group(base + 2)
slot = m.group(atom_re.groups - 2)
repo = m.group(atom_re.groups - 1)
use_str = m.group(atom_re.groups)
if m.group(base + 3) is not None:
raise InvalidAtom(self)
elif m.group('simple') is not None:
op = None
cpv = cp = m.group(atom_re.groupindex['simple'] + 1)
slot = m.group(atom_re.groups - 2)
repo = m.group(atom_re.groups - 1)
use_str = m.group(atom_re.groups)
if m.group(atom_re.groupindex['simple'] + 2) is not None:
raise InvalidAtom(self)
else:
raise AssertionError(_("required group not found in atom: '%s'") % self)
self.__dict__['cp'] = cp
try:
self.__dict__['cpv'] = _pkg_str(cpv)
self.__dict__['version'] = self.cpv.version
except InvalidData:
# plain cp, wildcard, or something
self.__dict__['cpv'] = cpv
self.__dict__['version'] = extended_version
self.__dict__['repo'] = repo
if slot is None:
self.__dict__['slot'] = None
self.__dict__['sub_slot'] = None
self.__dict__['slot_operator'] = None
else:
slot_re = _get_slot_dep_re(eapi_attrs)
slot_match = slot_re.match(slot)
if slot_match is None:
raise InvalidAtom(self)
if eapi_attrs.slot_operator:
self.__dict__['slot'] = slot_match.group(1)
sub_slot = slot_match.group(2)
if sub_slot is not None:
sub_slot = sub_slot.lstrip("/")
if sub_slot in ("*", "="):
self.__dict__['sub_slot'] = None
self.__dict__['slot_operator'] = sub_slot
else:
slot_operator = None
if sub_slot is not None and sub_slot[-1:] == "=":
slot_operator = sub_slot[-1:]
sub_slot = sub_slot[:-1]
self.__dict__['sub_slot'] = sub_slot
self.__dict__['slot_operator'] = slot_operator
if self.slot is not None and self.slot_operator == "*":
raise InvalidAtom(self)
else:
self.__dict__['slot'] = slot
self.__dict__['sub_slot'] = None
self.__dict__['slot_operator'] = None
self.__dict__['operator'] = op
self.__dict__['extended_syntax'] = extended_syntax
if not (repo is None or allow_repo):
raise InvalidAtom(self)
if use_str is not None:
if _use is not None:
use = _use
else:
use = _use_dep(use_str[1:-1].split(","), eapi_attrs)
without_use = Atom(m.group('without_use'), allow_repo=allow_repo)
else:
use = None
if unevaluated_atom is not None and \
unevaluated_atom.use is not None:
# unevaluated_atom.use is used for IUSE checks when matching
# packages, so it must not propagate to without_use
without_use = Atom(s, allow_wildcard=allow_wildcard,
allow_repo=allow_repo)
else:
without_use = self
self.__dict__['use'] = use
self.__dict__['without_use'] = without_use
if unevaluated_atom:
self.__dict__['unevaluated_atom'] = unevaluated_atom
else:
self.__dict__['unevaluated_atom'] = self
if eapi is not None:
if not isinstance(eapi, basestring):
raise TypeError('expected eapi argument of ' + \
'%s, got %s: %s' % (basestring, type(eapi), eapi,))
if self.slot and not eapi_attrs.slot_deps:
raise InvalidAtom(
_("Slot deps are not allowed in EAPI %s: '%s'") \
% (eapi, self), category='EAPI.incompatible')
if self.use:
if not eapi_attrs.use_deps:
raise InvalidAtom(
_("Use deps are not allowed in EAPI %s: '%s'") \
% (eapi, self), category='EAPI.incompatible')
elif not eapi_attrs.use_dep_defaults and \
(self.use.missing_enabled or self.use.missing_disabled):
raise InvalidAtom(
_("Use dep defaults are not allowed in EAPI %s: '%s'") \
% (eapi, self), category='EAPI.incompatible')
if is_valid_flag is not None and self.use.conditional:
invalid_flag = None
try:
for conditional_type, flags in \
self.use.conditional.items():
for flag in flags:
if not is_valid_flag(flag):
invalid_flag = (conditional_type, flag)
raise StopIteration()
except StopIteration:
pass
if invalid_flag is not None:
conditional_type, flag = invalid_flag
conditional_str = _use_dep._conditional_strings[conditional_type]
msg = _("USE flag '%s' referenced in " + \
"conditional '%s' in atom '%s' is not in IUSE") \
% (flag, conditional_str % flag, self)
raise InvalidAtom(msg, category='IUSE.missing')
if self.blocker and self.blocker.overlap.forbid and not eapi_attrs.strong_blocks:
raise InvalidAtom(
_("Strong blocks are not allowed in EAPI %s: '%s'") \
% (eapi, self), category='EAPI.incompatible')
@property
def slot_operator_built(self):
"""
Returns True if slot_operator == "=" and sub_slot is not None.
NOTE: foo/bar:2= is unbuilt and returns False, whereas foo/bar:2/2=
is built and returns True.
"""
return self.slot_operator == "=" and self.sub_slot is not None
@property
def without_repo(self):
if self.repo is None:
return self
return Atom(self.replace(_repo_separator + self.repo, '', 1),
allow_wildcard=True)
@property
def without_slot(self):
if self.slot is None and self.slot_operator is None:
return self
atom = remove_slot(self)
if self.repo is not None:
atom += _repo_separator + self.repo
if self.use is not None:
atom += _unicode(self.use)
return Atom(atom,
allow_repo=True, allow_wildcard=True)
def with_repo(self, repo):
atom = remove_slot(self)
if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
if self.sub_slot is not None:
atom += "/%s" % self.sub_slot
if self.slot_operator is not None:
atom += self.slot_operator
atom += _repo_separator + repo
if self.use is not None:
atom += _unicode(self.use)
return Atom(atom, allow_repo=True, allow_wildcard=True)
def with_slot(self, slot):
atom = remove_slot(self) + _slot_separator + slot
if self.repo is not None:
atom += _repo_separator + self.repo
if self.use is not None:
atom += _unicode(self.use)
return Atom(atom, allow_repo=True, allow_wildcard=True)
def __setattr__(self, name, value):
raise AttributeError("Atom instances are immutable",
self.__class__, name, value)
def intersects(self, other):
"""
Atoms with different cpv, operator or use attributes cause this method
to return False even though there may actually be some intersection.
TODO: Detect more forms of intersection.
@param other: The package atom to match
@type other: Atom
@rtype: Boolean
@return: True if this atom and the other atom intersect,
False otherwise.
"""
if not isinstance(other, Atom):
raise TypeError("expected %s, got %s" % \
(Atom, type(other)))
if self == other:
return True
if self.cp != other.cp or \
self.use != other.use or \
self.operator != other.operator or \
self.cpv != other.cpv:
return False
if self.slot is None or \
other.slot is None or \
self.slot == other.slot:
return True
return False
def evaluate_conditionals(self, use):
"""
Create an atom instance with any USE conditionals evaluated.
@param use: The set of enabled USE flags
@type use: set
@rtype: Atom
@return: an atom instance with any USE conditionals evaluated
"""
if not (self.use and self.use.conditional):
return self
atom = remove_slot(self)
if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
if self.sub_slot is not None:
atom += "/%s" % self.sub_slot
if self.slot_operator is not None:
atom += self.slot_operator
use_dep = self.use.evaluate_conditionals(use)
atom += _unicode(use_dep)
return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
"""
Create an atom instance with any USE conditional removed, that is
satisfied by other_use.
@param other_use: The set of enabled USE flags
@type other_use: set
@param is_valid_flag: Function that decides if a use flag is referenceable in use deps
@type is_valid_flag: function
@param parent_use: Set of enabled use flags of the package requiring this atom
@type parent_use: set
@rtype: Atom
@return: an atom instance with any satisfied USE conditionals removed
"""
if not self.use:
return self
atom = remove_slot(self)
if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
if self.sub_slot is not None:
atom += "/%s" % self.sub_slot
if self.slot_operator is not None:
atom += self.slot_operator
use_dep = self.use.violated_conditionals(other_use, is_valid_flag, parent_use)
atom += _unicode(use_dep)
return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
def _eval_qa_conditionals(self, use_mask, use_force):
if not (self.use and self.use.conditional):
return self
atom = remove_slot(self)
if self.slot is not None or self.slot_operator is not None:
atom += _slot_separator
if self.slot is not None:
atom += self.slot
if self.sub_slot is not None:
atom += "/%s" % self.sub_slot
if self.slot_operator is not None:
atom += self.slot_operator
use_dep = self.use._eval_qa_conditionals(use_mask, use_force)
atom += _unicode(use_dep)
return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
def __copy__(self):
"""Immutable, so returns self."""
return self
def __deepcopy__(self, memo=None):
"""Immutable, so returns self."""
memo[id(self)] = self
return self
_extended_cp_re_cache = {}
def extended_cp_match(extended_cp, other_cp):
"""
Checks if an extended syntax cp matches a non extended cp
"""
# Escape special '+' and '.' characters which are allowed in atoms,
# and convert '*' to regex equivalent.
global _extended_cp_re_cache
extended_cp_re = _extended_cp_re_cache.get(extended_cp)
if extended_cp_re is None:
extended_cp_re = re.compile("^" + re.escape(extended_cp).replace(
r'\*', '[^/]*') + "$", re.UNICODE)
_extended_cp_re_cache[extended_cp] = extended_cp_re
return extended_cp_re.match(other_cp) is not None
class ExtendedAtomDict(portage.cache.mappings.MutableMapping):
"""
dict() wrapper that supports extended atoms as keys and allows lookup
of a normal cp against other normal cp and extended cp.
The value type has to be given to __init__ and is assumed to be the same
for all values.
"""
__slots__ = ('_extended', '_normal', '_value_class')
def __init__(self, value_class):
self._extended = {}
self._normal = {}
self._value_class = value_class
def copy(self):
result = self.__class__(self._value_class)
result._extended.update(self._extended)
result._normal.update(self._normal)
return result
def __iter__(self):
for k in self._normal:
yield k
for k in self._extended:
yield k
def iteritems(self):
try:
for item in self._normal.items():
yield item
for item in self._extended.items():
yield item
except AttributeError:
pass # FEATURES=python-trace
def __delitem__(self, cp):
if "*" in cp:
return self._extended.__delitem__(cp)
else:
return self._normal.__delitem__(cp)
if sys.hexversion >= 0x3000000:
keys = __iter__
items = iteritems
def __len__(self):
return len(self._normal) + len(self._extended)
def setdefault(self, cp, default=None):
if "*" in cp:
return self._extended.setdefault(cp, default)
else:
return self._normal.setdefault(cp, default)
def __getitem__(self, cp):
if not isinstance(cp, basestring):
raise KeyError(cp)
if '*' in cp:
return self._extended[cp]
ret = self._value_class()
normal_match = self._normal.get(cp)
match = False
if normal_match is not None:
match = True
if hasattr(ret, "update"):
ret.update(normal_match)
elif hasattr(ret, "extend"):
ret.extend(normal_match)
else:
raise NotImplementedError()
for extended_cp in self._extended:
if extended_cp_match(extended_cp, cp):
match = True
if hasattr(ret, "update"):
ret.update(self._extended[extended_cp])
elif hasattr(ret, "extend"):
ret.extend(self._extended[extended_cp])
else:
raise NotImplementedError()
if not match:
raise KeyError(cp)
return ret
def __setitem__(self, cp, val):
if "*" in cp:
self._extended[cp] = val
else:
self._normal[cp] = val
def __eq__(self, other):
return self._value_class == other._value_class and \
self._extended == other._extended and \
self._normal == other._normal
def clear(self):
self._extended.clear()
self._normal.clear()
def get_operator(mydep):
"""
Return the operator used in a depstring.
Example usage:
>>> from portage.dep import *
>>> get_operator(">=test-1.0")
'>='
@param mydep: The dep string to check
@type mydep: String
@rtype: String
@return: The operator. One of:
'~', '=', '>', '<', '=*', '>=', or '<='
"""
if not isinstance(mydep, Atom):
mydep = Atom(mydep)
return mydep.operator
def dep_getcpv(mydep):
"""
Return the category-package-version with any operators/slot specifications stripped off
Example usage:
>>> dep_getcpv('>=media-libs/test-3.0')
'media-libs/test-3.0'
@param mydep: The depstring
@type mydep: String
@rtype: String
@return: The depstring with the operator removed
"""
if not isinstance(mydep, Atom):
mydep = Atom(mydep)
return mydep.cpv
def dep_getslot(mydep):
"""
Retrieve the slot on a depend.
Example usage:
>>> dep_getslot('app-misc/test:3')
'3'
@param mydep: The depstring to retrieve the slot of
@type mydep: String
@rtype: String
@return: The slot
"""
slot = getattr(mydep, "slot", False)
if slot is not False:
return slot
#remove repo_name if present
mydep = mydep.split(_repo_separator)[0]
colon = mydep.find(_slot_separator)
if colon != -1:
bracket = mydep.find("[", colon)
if bracket == -1:
return mydep[colon+1:]
else:
return mydep[colon+1:bracket]
return None
def dep_getrepo(mydep):
"""
Retrieve the repo on a depend.
Example usage:
>>> dep_getrepo('app-misc/test::repository')
'repository'
@param mydep: The depstring to retrieve the repository of
@type mydep: String
@rtype: String
@return: The repository name
"""
repo = getattr(mydep, "repo", False)
if repo is not False:
return repo
metadata = getattr(mydep, "metadata", False)
if metadata:
repo = metadata.get('repository', False)
if repo is not False:
return repo
colon = mydep.find(_repo_separator)
if colon != -1:
bracket = mydep.find("[", colon)
if bracket == -1:
return mydep[colon+2:]
else:
return mydep[colon+2:bracket]
return None
def remove_slot(mydep):
"""
Removes dep components from the right side of an atom:
* slot
* use
* repo
And repo_name from the left side.
"""
colon = mydep.find(_slot_separator)
if colon != -1:
mydep = mydep[:colon]
else:
bracket = mydep.find("[")
if bracket != -1:
mydep = mydep[:bracket]
return mydep
def dep_getusedeps( depend ):
"""
Pull a listing of USE Dependencies out of a dep atom.
Example usage:
>>> dep_getusedeps('app-misc/test:3[foo,-bar]')
('foo', '-bar')
@param depend: The depstring to process
@type depend: String
@rtype: List
@return: List of use flags ( or [] if no flags exist )
"""
use_list = []
open_bracket = depend.find('[')
# -1 = failure (think c++ string::npos)
comma_separated = False
bracket_count = 0
while( open_bracket != -1 ):
bracket_count += 1
if bracket_count > 1:
raise InvalidAtom(_("USE Dependency with more "
"than one set of brackets: %s") % (depend,))
close_bracket = depend.find(']', open_bracket )
if close_bracket == -1:
raise InvalidAtom(_("USE Dependency with no closing bracket: %s") % depend )
use = depend[open_bracket + 1: close_bracket]
# foo[1:1] may return '' instead of None, we don't want '' in the result
if not use:
raise InvalidAtom(_("USE Dependency with "
"no use flag ([]): %s") % depend )
if not comma_separated:
comma_separated = "," in use
if comma_separated and bracket_count > 1:
raise InvalidAtom(_("USE Dependency contains a mixture of "
"comma and bracket separators: %s") % depend )
if comma_separated:
for x in use.split(","):
if x:
use_list.append(x)
else:
raise InvalidAtom(_("USE Dependency with no use "
"flag next to comma: %s") % depend )
else:
use_list.append(use)
# Find next use flag
open_bracket = depend.find( '[', open_bracket+1 )
return tuple(use_list)
def isvalidatom(atom, allow_blockers=False, allow_wildcard=False,
allow_repo=False, eapi=None):
"""
Check to see if a depend atom is valid
Example usage:
>>> isvalidatom('media-libs/test-3.0')
False
>>> isvalidatom('>=media-libs/test-3.0')
True
@param atom: The depend atom to check against
@type atom: String or Atom
@rtype: Boolean
@return: One of the following:
1) False if the atom is invalid
2) True if the atom is valid
"""
if eapi is not None and isinstance(atom, Atom) and atom.eapi != eapi:
# We'll construct a new atom with the given eapi.
atom = _unicode(atom)
try:
if not isinstance(atom, Atom):
atom = Atom(atom, allow_wildcard=allow_wildcard,
allow_repo=allow_repo, eapi=eapi)
if not allow_blockers and atom.blocker:
return False
return True
except InvalidAtom:
return False
def isjustname(mypkg):
"""
Checks to see if the atom is only the package name (no version parts).
Example usage:
>>> isjustname('=media-libs/test-3.0')
False
>>> isjustname('media-libs/test')
True
@param mypkg: The package atom to check
@param mypkg: String or Atom
@rtype: Integer
@return: One of the following:
1) False if the package string is not just the package name
2) True if it is
"""
try:
if not isinstance(mypkg, Atom):
mypkg = Atom(mypkg)
return mypkg == mypkg.cp
except InvalidAtom:
pass
for x in mypkg.split('-')[-2:]:
if ververify(x):
return False
return True
def isspecific(mypkg):
"""
Checks to see if a package is in =category/package-version or
package-version format.
Example usage:
>>> isspecific('media-libs/test')
False
>>> isspecific('=media-libs/test-3.0')
True
@param mypkg: The package depstring to check against
@type mypkg: String
@rtype: Boolean
@return: One of the following:
1) False if the package string is not specific
2) True if it is
"""
try:
if not isinstance(mypkg, Atom):
mypkg = Atom(mypkg)
return mypkg != mypkg.cp
except InvalidAtom:
pass
# Fall back to legacy code for backward compatibility.
return not isjustname(mypkg)
def dep_getkey(mydep):
"""
Return the category/package-name of a depstring.
Example usage:
>>> dep_getkey('=media-libs/test-3.0')
'media-libs/test'
@param mydep: The depstring to retrieve the category/package-name of
@type mydep: String
@rtype: String
@return: The package category/package-name
"""
if not isinstance(mydep, Atom):
mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
return mydep.cp
def match_to_list(mypkg, mylist):
"""
Searches list for entries that matches the package.
@param mypkg: The package atom to match
@type mypkg: String
@param mylist: The list of package atoms to compare against
@param mylist: List
@rtype: List
@return: A unique list of package atoms that match the given package atom
"""
matches = set()
result = []
pkgs = [mypkg]
for x in mylist:
if x not in matches and match_from_list(x, pkgs):
matches.add(x)
result.append(x)
return result
def best_match_to_list(mypkg, mylist):
"""
Returns the most specific entry that matches the package given.
@param mypkg: The package atom to check
@type mypkg: String
@param mylist: The list of package atoms to check against
@type mylist: List
@rtype: String
@return: The package atom which best matches given the following ordering:
- =cpv 6
- ~cpv 5
- =cpv* 4
- cp:slot 3
- >cpv 2
- <cpv 2
- >=cpv 2
- <=cpv 2
- cp 1
- cp:slot with extended syntax 0
- cp with extended syntax -1
"""
operator_values = {'=':6, '~':5, '=*':4,
'>':2, '<':2, '>=':2, '<=':2, None:1}
maxvalue = -99
bestm = None
mypkg_cpv = None
for x in match_to_list(mypkg, mylist):
if x.extended_syntax:
if x.operator == '=*':
if maxvalue < 0:
maxvalue = 0
bestm = x
elif x.slot is not None:
if maxvalue < -1:
maxvalue = -1
bestm = x
else:
if maxvalue < -2:
maxvalue = -2
bestm = x
continue
if dep_getslot(x) is not None:
if maxvalue < 3:
maxvalue = 3
bestm = x
op_val = operator_values[x.operator]
if op_val > maxvalue:
maxvalue = op_val
bestm = x
elif op_val == maxvalue and op_val == 2:
# For >, <, >=, and <=, the one with the version
# closest to mypkg is the best match.
if mypkg_cpv is None:
try:
mypkg_cpv = mypkg.cpv
except AttributeError:
mypkg_cpv = _pkg_str(remove_slot(mypkg))
if bestm.cpv == mypkg_cpv or bestm.cpv == x.cpv:
pass
elif x.cpv == mypkg_cpv:
bestm = x
else:
# Sort the cpvs to find the one closest to mypkg_cpv
cpv_list = [bestm.cpv, mypkg_cpv, x.cpv]
def cmp_cpv(cpv1, cpv2):
return vercmp(cpv1.version, cpv2.version)
cpv_list.sort(key=cmp_sort_key(cmp_cpv))
if cpv_list[0] is mypkg_cpv or cpv_list[-1] is mypkg_cpv:
if cpv_list[1] is x.cpv:
bestm = x
else:
# TODO: handle the case where mypkg_cpv is in the middle
pass
return bestm
def match_from_list(mydep, candidate_list):
"""
Searches list for entries that matches the package.
@param mydep: The package atom to match
@type mydep: String
@param candidate_list: The list of package atoms to compare against
@param candidate_list: List
@rtype: List
@return: A list of package atoms that match the given package atom
"""
if not candidate_list:
return []
if "!" == mydep[:1]:
if "!" == mydep[1:2]:
mydep = mydep[2:]
else:
mydep = mydep[1:]
if not isinstance(mydep, Atom):
mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
mycpv = mydep.cpv
mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
slot = mydep.slot
if not mycpv_cps:
cat, pkg = catsplit(mycpv)
ver = None
rev = None
else:
cat, pkg, ver, rev = mycpv_cps
if mydep == mycpv:
raise KeyError(_("Specific key requires an operator"
" (%s) (try adding an '=')") % (mydep))
if ver and rev:
operator = mydep.operator
if not operator:
writemsg(_("!!! Invalid atom: %s\n") % mydep, noiselevel=-1)
return []
else:
operator = None
mylist = []
if mydep.extended_syntax:
for x in candidate_list:
cp = getattr(x, "cp", None)
if cp is None:
mysplit = catpkgsplit(remove_slot(x))
if mysplit is not None:
cp = mysplit[0] + '/' + mysplit[1]
if cp is None:
continue
if cp == mycpv or extended_cp_match(mydep.cp, cp):
mylist.append(x)
if mylist and mydep.operator == "=*":
candidate_list = mylist
mylist = []
# Currently, only \*\w+\* is supported.
ver = mydep.version[1:-1]
for x in candidate_list:
x_ver = getattr(x, "version", None)
if x_ver is None:
xs = catpkgsplit(remove_slot(x))
if xs is None:
continue
x_ver = "-".join(xs[-2:])
if ver in x_ver:
mylist.append(x)
elif operator is None:
for x in candidate_list:
cp = getattr(x, "cp", None)
if cp is None:
mysplit = catpkgsplit(remove_slot(x))
if mysplit is not None:
cp = mysplit[0] + '/' + mysplit[1]
if cp is None:
continue
if cp == mydep.cp:
mylist.append(x)
elif operator == "=": # Exact match
for x in candidate_list:
xcpv = getattr(x, "cpv", None)
if xcpv is None:
xcpv = remove_slot(x)
if not cpvequal(xcpv, mycpv):
continue
mylist.append(x)
elif operator == "=*": # glob match
# XXX: Nasty special casing for leading zeros
# Required as =* is a literal prefix match, so can't
# use vercmp
myver = mycpv_cps[2].lstrip("0")
if not myver or not myver[0].isdigit():
myver = "0"+myver
mycpv_cmp = mycpv_cps[0] + "/" + mycpv_cps[1] + "-" + myver
for x in candidate_list:
xs = getattr(x, "cpv_split", None)
if xs is None:
xs = catpkgsplit(remove_slot(x))
myver = xs[2].lstrip("0")
if not myver or not myver[0].isdigit():
myver = "0"+myver
xcpv = xs[0]+"/"+xs[1]+"-"+myver
if xcpv.startswith(mycpv_cmp):
mylist.append(x)
elif operator == "~": # version, any revision, match
for x in candidate_list:
xs = getattr(x, "cpv_split", None)
if xs is None:
xs = catpkgsplit(remove_slot(x))
if xs is None:
raise InvalidData(x)
if not cpvequal(xs[0]+"/"+xs[1]+"-"+xs[2], mycpv_cps[0]+"/"+mycpv_cps[1]+"-"+mycpv_cps[2]):
continue
if xs[2] != ver:
continue
mylist.append(x)
elif operator in [">", ">=", "<", "<="]:
for x in candidate_list:
if hasattr(x, 'cp'):
pkg = x
else:
try:
pkg = _pkg_str(remove_slot(x))
except InvalidData:
continue
if pkg.cp != mydep.cp:
continue
try:
result = vercmp(pkg.version, mydep.version)
except ValueError: # pkgcmp may return ValueError during int() conversion
writemsg(_("\nInvalid package name: %s\n") % x, noiselevel=-1)
raise
if result is None:
continue
elif operator == ">":
if result > 0:
mylist.append(x)
elif operator == ">=":
if result >= 0:
mylist.append(x)
elif operator == "<":
if result < 0:
mylist.append(x)
elif operator == "<=":
if result <= 0:
mylist.append(x)
else:
raise KeyError(_("Unknown operator: %s") % mydep)
else:
raise KeyError(_("Unknown operator: %s") % mydep)
if mydep.slot is not None:
candidate_list = mylist
mylist = []
for x in candidate_list:
x_pkg = None
try:
x.cpv
except AttributeError:
xslot = dep_getslot(x)
if xslot is not None:
try:
x_pkg = _pkg_str(remove_slot(x), slot=xslot)
except InvalidData:
continue
else:
x_pkg = x
if x_pkg is None:
mylist.append(x)
else:
try:
x_pkg.slot
except AttributeError:
mylist.append(x)
else:
if _match_slot(mydep, x_pkg):
mylist.append(x)
if mydep.unevaluated_atom.use:
candidate_list = mylist
mylist = []
for x in candidate_list:
use = getattr(x, "use", None)
if use is not None:
if mydep.unevaluated_atom.use and \
not x.iuse.is_valid_flag(
mydep.unevaluated_atom.use.required):
continue
if mydep.use:
missing_enabled = mydep.use.missing_enabled.difference(x.iuse.all)
missing_disabled = mydep.use.missing_disabled.difference(x.iuse.all)
if mydep.use.enabled:
if any(f in mydep.use.enabled for f in missing_disabled):
continue
need_enabled = mydep.use.enabled.difference(use.enabled)
if need_enabled:
if any(f not in missing_enabled for f in need_enabled):
continue
if mydep.use.disabled:
if any(f in mydep.use.disabled for f in missing_enabled):
continue
need_disabled = mydep.use.disabled.intersection(use.enabled)
if need_disabled:
if any(f not in missing_disabled for f in need_disabled):
continue
mylist.append(x)
if mydep.repo:
candidate_list = mylist
mylist = []
for x in candidate_list:
repo = getattr(x, "repo", False)
if repo is False:
repo = dep_getrepo(x)
if repo is not None and repo != _unknown_repo and \
repo != mydep.repo:
continue
mylist.append(x)
return mylist
def human_readable_required_use(required_use):
return required_use.replace("^^", "exactly-one-of").replace("||", "any-of").replace("??", "at-most-one-of")
def get_required_use_flags(required_use, eapi=None):
"""
Returns a set of use flags that are used in the given REQUIRED_USE string
@param required_use: REQUIRED_USE string
@type required_use: String
@rtype: Set
@return: Set of use flags that are used in the given REQUIRED_USE string
"""
eapi_attrs = _get_eapi_attrs(eapi)
if eapi_attrs.required_use_at_most_one_of:
valid_operators = ("||", "^^", "??")
else:
valid_operators = ("||", "^^")
mysplit = required_use.split()
level = 0
stack = [[]]
need_bracket = False
used_flags = set()
def register_token(token):
if token.endswith("?"):
token = token[:-1]
if token.startswith("!"):
token = token[1:]
used_flags.add(token)
for token in mysplit:
if token == "(":
need_bracket = False
stack.append([])
level += 1
elif token == ")":
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
if level > 0:
level -= 1
l = stack.pop()
ignore = False
if stack[level]:
if stack[level][-1] in valid_operators or \
(not isinstance(stack[level][-1], bool) and \
stack[level][-1][-1] == "?"):
ignore = True
stack[level].pop()
stack[level].append(True)
if l and not ignore:
stack[level].append(all(x for x in l))
else:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
elif token in valid_operators:
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
need_bracket = True
stack[level].append(token)
else:
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
if token[-1] == "?":
need_bracket = True
stack[level].append(token)
else:
stack[level].append(True)
register_token(token)
if level != 0 or need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
return frozenset(used_flags)
class _RequiredUseLeaf(object):
__slots__ = ('_satisfied', '_token')
def __init__(self, token, satisfied):
self._token = token
self._satisfied = satisfied
def tounicode(self):
return self._token
class _RequiredUseBranch(object):
__slots__ = ('_children', '_operator', '_parent', '_satisfied')
def __init__(self, operator=None, parent=None):
self._children = []
self._operator = operator
self._parent = parent
self._satisfied = False
def __bool__(self):
return self._satisfied
def tounicode(self):
include_parens = self._parent is not None
tokens = []
if self._operator is not None:
tokens.append(self._operator)
if include_parens:
tokens.append("(")
complex_nesting = False
node = self
while node != None and not complex_nesting:
if node._operator in ("||", "^^", "??"):
complex_nesting = True
else:
node = node._parent
if complex_nesting:
for child in self._children:
tokens.append(child.tounicode())
else:
for child in self._children:
if not child._satisfied:
tokens.append(child.tounicode())
if include_parens:
tokens.append(")")
return " ".join(tokens)
if sys.hexversion < 0x3000000:
__nonzero__ = __bool__
def check_required_use(required_use, use, iuse_match, eapi=None):
"""
Checks if the use flags listed in 'use' satisfy all
constraints specified in 'constraints'.
@param required_use: REQUIRED_USE string
@type required_use: String
@param use: Enabled use flags
@param use: List
@param iuse_match: Callable that takes a single flag argument and returns
True if the flag is matched, false otherwise,
@param iuse_match: Callable
@rtype: Bool
@return: Indicates if REQUIRED_USE constraints are satisfied
"""
eapi_attrs = _get_eapi_attrs(eapi)
if eapi_attrs.required_use_at_most_one_of:
valid_operators = ("||", "^^", "??")
else:
valid_operators = ("||", "^^")
def is_active(token):
if token.startswith("!"):
flag = token[1:]
is_negated = True
else:
flag = token
is_negated = False
if not flag or not iuse_match(flag):
if not eapi_attrs.required_use_at_most_one_of and flag == "?":
msg = _("Operator '??' is not supported with EAPI '%s'") \
% (eapi,)
e = InvalidData(msg, category='EAPI.incompatible')
raise InvalidDependString(msg, errors=(e,))
msg = _("USE flag '%s' is not in IUSE") \
% (flag,)
e = InvalidData(msg, category='IUSE.missing')
raise InvalidDependString(msg, errors=(e,))
return (flag in use and not is_negated) or \
(flag not in use and is_negated)
def is_satisfied(operator, argument):
if not argument:
#|| ( ) -> True
return True
if operator == "||":
return (True in argument)
elif operator == "^^":
return (argument.count(True) == 1)
elif operator == "??":
return (argument.count(True) <= 1)
elif operator[-1] == "?":
return (False not in argument)
mysplit = required_use.split()
level = 0
stack = [[]]
tree = _RequiredUseBranch()
node = tree
need_bracket = False
for token in mysplit:
if token == "(":
if not need_bracket:
child = _RequiredUseBranch(parent=node)
node._children.append(child)
node = child
need_bracket = False
stack.append([])
level += 1
elif token == ")":
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
if level > 0:
level -= 1
l = stack.pop()
op = None
if stack[level]:
if stack[level][-1] in valid_operators:
op = stack[level].pop()
satisfied = is_satisfied(op, l)
stack[level].append(satisfied)
node._satisfied = satisfied
elif not isinstance(stack[level][-1], bool) and \
stack[level][-1][-1] == "?":
op = stack[level].pop()
if is_active(op[:-1]):
satisfied = is_satisfied(op, l)
stack[level].append(satisfied)
node._satisfied = satisfied
else:
node._satisfied = True
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
"node is not last child of parent")
node = node._parent
continue
if op is None:
satisfied = False not in l
node._satisfied = satisfied
if l:
stack[level].append(satisfied)
if len(node._children) <= 1 or \
node._parent._operator not in valid_operators:
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
"node is not last child of parent")
for child in node._children:
node._parent._children.append(child)
if isinstance(child, _RequiredUseBranch):
child._parent = node._parent
elif not node._children:
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
"node is not last child of parent")
elif len(node._children) == 1 and op in valid_operators:
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
"node is not last child of parent")
node._parent._children.append(node._children[0])
if isinstance(node._children[0], _RequiredUseBranch):
node._children[0]._parent = node._parent
node = node._children[0]
if node._operator is None and \
node._parent._operator not in valid_operators:
last_node = node._parent._children.pop()
if last_node is not node:
raise AssertionError(
"node is not last child of parent")
for child in node._children:
node._parent._children.append(child)
if isinstance(child, _RequiredUseBranch):
child._parent = node._parent
node = node._parent
else:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
elif token in valid_operators:
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
need_bracket = True
stack[level].append(token)
child = _RequiredUseBranch(operator=token, parent=node)
node._children.append(child)
node = child
else:
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
if token[-1] == "?":
need_bracket = True
stack[level].append(token)
child = _RequiredUseBranch(operator=token, parent=node)
node._children.append(child)
node = child
else:
satisfied = is_active(token)
stack[level].append(satisfied)
node._children.append(_RequiredUseLeaf(token, satisfied))
if level != 0 or need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % required_use)
tree._satisfied = False not in stack[0]
return tree
def extract_affecting_use(mystr, atom, eapi=None):
"""
Take a dep string and an atom and return the use flags
that decide if the given atom is in effect.
Example usage:
>>> extract_use_cond('sasl? ( dev-libs/cyrus-sasl ) \
!minimal? ( cxx? ( dev-libs/cyrus-sasl ) )', 'dev-libs/cyrus-sasl')
(['sasl', 'minimal', 'cxx'])
@param dep: The dependency string
@type mystr: String
@param atom: The atom to get into effect
@type atom: String
@rtype: Tuple of two lists of strings
@return: List of use flags that need to be enabled, List of use flag that need to be disabled
"""
useflag_re = _get_useflag_re(eapi)
mysplit = mystr.split()
level = 0
stack = [[]]
need_bracket = False
affecting_use = set()
def flag(conditional):
if conditional[0] == "!":
flag = conditional[1:-1]
else:
flag = conditional[:-1]
if useflag_re.match(flag) is None:
raise InvalidDependString(
_("invalid use flag '%s' in conditional '%s'") % \
(flag, conditional))
return flag
for token in mysplit:
if token == "(":
need_bracket = False
stack.append([])
level += 1
elif token == ")":
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % mystr)
if level > 0:
level -= 1
l = stack.pop()
is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
def ends_in_any_of_dep(k):
return k>=0 and stack[k] and stack[k][-1] == "||"
def ends_in_operator(k):
return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
def special_append():
"""
Use extend instead of append if possible. This kills all redundant brackets.
"""
if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
if len(l) == 1 and isinstance(l[0], list):
# l = [[...]]
stack[level].extend(l[0])
else:
stack[level].extend(l)
else:
stack[level].append(l)
if l:
if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
#Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
stack[level].extend(l)
elif not stack[level]:
#An '||' in the level above forces us to keep to brackets.
special_append()
elif len(l) == 1 and ends_in_any_of_dep(level):
#Optimize: || ( A ) -> A
stack[level].pop()
special_append()
elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
#Optimize: || ( || ( ... ) ) -> || ( ... )
# foo? ( foo? ( ... ) ) -> foo? ( ... )
# || ( foo? ( ... ) ) -> foo? ( ... )
stack[level].pop()
special_append()
if l[0][-1] == "?":
affecting_use.add(flag(l[0]))
else:
if stack[level] and stack[level][-1][-1] == "?":
affecting_use.add(flag(stack[level][-1]))
special_append()
else:
if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
stack[level].pop()
else:
raise InvalidDependString(
_("malformed syntax: '%s'") % mystr)
elif token == "||":
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % mystr)
need_bracket = True
stack[level].append(token)
else:
if need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % mystr)
if token[-1] == "?":
need_bracket = True
stack[level].append(token)
elif token == atom:
stack[level].append(token)
if level != 0 or need_bracket:
raise InvalidDependString(
_("malformed syntax: '%s'") % mystr)
return affecting_use
|
#If the bill was $150.00, split between 5 people, with 12% tip.
#Each person should pay (150.00 / 5) * 1.12 = 33.6
#Format the result to 2 decimal places = 33.60
#Tip: There are 2 ways to round a number. You might have to do some Googling to solve this.💪
#HINT 1: https://www.google.com/search?q=how+to+round+number+to+2+decimal+places+python&oq=how+to+round+number+to+2+decimal
#HINT 2: https://www.kite.com/python/answers/how-to-limit-a-float-to-two-decimal-places-in-python
print("Welcome to your bill splitter calculator!")
bill_total = input("What is the bill total? $")
tip = input("What percentage tip would you all like to give? i.e. 10, 15, or 20? ")
how_many_ways = input("How many people are splitting this bill? ")
tip_float = float(tip) / 100 + 1.00
calculation = (float(bill_total) / float(how_many_ways)) * tip_float
calculation_two_decimals = "{:.2f}".format(calculation)
print(f"Each person sould pay: $ {calculation_two_decimals}")
|
from flask import current_app
from werkzeug.local import LocalProxy
from werkzeug.utils import import_string
def import_shop_object(key):
shop = current_app.config['SHOP_ID']
shop_settings = current_app.config['SHOPS'][shop]
return import_string(shop_settings[key])
def get_order_class():
key = 'order'
return LocalProxy(import_shop_object(key))
def get_cart_class():
key = 'cart'
return LocalProxy(import_shop_object(key))
|
import numpy as np
import random
from utils import *
from math import sqrt
class replay_buffer:
def init(self):
self.n = 0
self.data = []
self.s = []
self.a = []
self.r = []
self.s2 = []
self.d = []
self.i_episode = []
self.ban = []
self.now_i_episode = 0
self.cnt = 0
def __init__(self, limit, w_gamma, last_protect):
self.n = 0
self.w_gamma = w_gamma
self.w_limit = 0.01
self.s,self.a,self.r,self.s2,self.d = [],[],[],[],[]
self.init()
self.limit = limit # 暂时没有limit功能
self.last_protect = last_protect
def set_now_i_episode(self, now_i_episode):
self.now_i_episode = now_i_episode
def clean(self):
# print('clean',self.w_gamma,np.log(self.w_gamma))
if self.w_gamma == 1.0:limit = 2 ** 30
else:limit = np.log(self.w_limit)/np.log(self.w_gamma)
nn = 0
for i in range(self.n):
if self.now_i_episode - self.i_episode[i] <= limit and (self.ban[i] == 0 or self.i_episode[i] >= self.now_i_episode - self.last_protect + 1):
self.s[nn] = self.s[i]
self.a[nn] = self.a[i]
self.r[nn] = self.r[i]
self.s2[nn] = self.s2[i]
self.d[nn] = self.d[i]
self.i_episode[nn] = self.i_episode[i]
self.ban[nn] = 0
nn += 1
self.s
print('clean',self.n - nn)
while self.n > nn:
self.s.pop()
self.a.pop()
self.r.pop()
self.s2.pop()
self.d.pop()
self.i_episode.pop()
self.ban.pop()
self.n -= 1
def store(self,s,a,r,s2,d, i_episode):
self.s.append(s)
self.a.append(a)
self.r.append(r)
self.s2.append(s2)
self.d.append(d)
self.i_episode.append(i_episode)
self.ban.append(0)
self.n += 1
def delay_delete(self, x):
self.ban[x] = 1
def size(self):
return self.n
def get_minibatch(self, n):
self.cnt += 1
if self.cnt % 100 == 0:
self.clean()
if n > self.n:
return None,None
id = random.sample(range(self.n),n)
return [[self.s[id[i]],self.a[id[i]],self.r[id[i]],self.s2[id[i]],self.d[id[i]],self.i_episode[id[i]]] for i in range(n)],id
def get_lastminibatch(self, lower_bound_episode):
id = []
for i in range(self.n - 1,-1,-1):
if self.i_episode[i] < lower_bound_episode:break #默认数据是有序的
id.append(i)
return [[self.s[id[i]],self.a[id[i]],self.r[id[i]],self.s2[id[i]],self.d[id[i]],self.i_episode[id[i]]] for i in range(len(id))]
|
#!/usr/bin/env python
# encoding: utf-8
import urllib
import logging
from tornado.gen import coroutine
from tornado.httpclient import AsyncHTTPClient
from settings import ALERT_VOICE_API, ALERT_VOICE_TOKEN
@coroutine
def sender(mobiles, content):
mobile = ','.join(map(str, mobiles))
logging.info("tel will be call: {0}".format(mobile))
paras = dict(token=ALERT_VOICE_TOKEN, mobile=mobile,
msg=content)
phone_url = '%s?%s' % (ALERT_VOICE_API, urllib.urlencode(paras))
yield AsyncHTTPClient().fetch(phone_url, raise_error=False)
if __name__ == '__main__':
from tornado.ioloop import IOLoop
ALERT_VOICE_API = 'http://ump.letv.cn:8080/alarm/voice'
ALERT_VOICE_TOKEN = 'ad21fd9b78c48d7ff367090eaad3e264'
IOLoop.current().run_sync(lambda: sender(['18349178100'], "电话测试呀!"))
|
from fashion.models import Researcher
from django import forms
from mongodbforms import DocumentForm,EmbeddedDocumentForm, CharField
class ResearcherForm(DocumentForm):
lattes_ids = CharField()
class Meta:
model = Researcher
class LoadIDForm(forms.Form):
lattes_ids = CharField(widget=forms.Textarea)
start_year = CharField()
end_year = CharField()
|
"""
Problem Statement
Джеймс раздобыл любовное письмо, которое его друг Гарри написал своей девуш-
ке. Будучи шутником, Джеймс решил изменить его. Он превратил все слова в палинд-
ромы. В каждом слове он изменял буквы только на меньшие, например, 'd' он мог
превратить в 'c' и это считалось одной операцией. ( Он мог изменять значение
только до буквы 'a', 'a' не может быть изменено в 'z') Найдите минимальное коли-
чество операций для превращения слова в палиндром.
Формат ввода
Первая строка содержит целое число T - количество тестов. Следующие T строк
содержат по одному слову.
Формат вывода
В каждой стоке содержится одно число, соответствующее минимальному количес-
тву операций для данного теста.
Ограничения
1 ≤ T ≤ 10
1 ≤ длина слова ≤ 104
Все символы - только строчные буквы английского алфавита от 'a' до 'z'
Пример.
Ввод
3
abc
abcba
abcd
Вывод
2
0
4
"""
def distance_to_equal_chars(char1, char2):
return abs(ord(char1)-ord(char2))
def calc_distance_to_palindrome(s):
if len(s) <= 1:
return 0
dist = 0
for char in range(len(s)//2):
dist += distance_to_equal_chars(s[char], s[-char-1])
return dist
# ---------------------------------------------------------------------------- #
T = int(input()) # number of test cases
tests = []
for i in range(T):
N = input() # number of cycles in test scenario
tests.append(N)
#tests = ['a', 'ab', 'aba', 'baba', 'cacad']
#tests = ['abc', 'abcba', 'abcd']
for i in tests:
print(calc_distance_to_palindrome(i))
|
# https://www.hackerrank.com/contests/june-world-codesprint/challenges/minimum-distances
import itertools
def difference(a_pair):
x, y = a_pair
return abs(x-y)
def list_duplicates(source, item):
return [index_x for index_x, x in enumerate(source) if x == item]
def min_dist(A):
min_dist_list = []
seen = set(A)
for num in seen:
indices_of_num = sorted(list_duplicates(A, num) ,reverse=True)
if len(indices_of_num) <= 1:
continue
elif len(indices_of_num) == 2:
i, j = indices_of_num
min_dist_list.append(abs(i-j))
else:
dist_list = map(difference, list(itertools.combinations(indices_of_num, 2)))
min_dist_list.append(min(dist_list))
if min_dist_list:
return min(min_dist_list)
else:
return -1
|
# 水题
grades = []
N = int(input())
minv, maxv = 101, -1
s = 0
for _ in range(N):
t = int(input())
minv = min(t, minv)
maxv = max(t, maxv)
s += t
print(maxv)
print(minv)
print("%.2f"%(s/N)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.