text stringlengths 8 6.05M |
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/11/14 12:23
# @Author : Lucas Ma
# @File : process_test2
# 进程间通信
# multiprocessing 模块包装了底层的机制,提供了 Queue,Pipes 等多种方式来交换数据
# 以Queue 为例,在父进程中创建 2 个子进程,一个往Queue 里写数据,一个从Queue 里读数据
from multiprocessing import Process, Queue
import os, time, random
# 写数据进程执行的代码
def write(q):
print("进程开始写数据:%s" % os.getpid())
for value in ['a', 'b', 'c']:
print('写入 %s 到 queue ...' % value)
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码
def read(q):
print('进程开始读数据:%s' % os.getpid())
while True:
value = q.get(True)
print('从 queue 获取到 %s' % value)
if __name__ == '__main__':
# 父进程创建queue,并传给各个子进程
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
# 启动子进程 pw 写入
pw.start()
# 启动子进程 pr 读取
pr.start()
# 等待 pw 结束
pw.join()
# pr 进程里是死循环,无法等待期结束,只能强行终止
pr.terminate()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 13 22:37:44 2020
@author: Ferna
"""
import pandas as pd
import math
#Constantes Debye-Hückel
A= 0.50917
B= 0.32832
#Carga y radio de los iones
#0-Na+, 1-Cl-
Zi= 1.0
ai= [4.0, 3.5]
bi= [0.075, 0.015]
#Concentración
MM= 58.44 #g/mol NaCl
C_H = 35 #g/L
C_L = 5 #g/L
df = pd.DataFrame({'Ci (g/L)': [C_L, C_H]})
def Concentracion(fila):
return fila["Ci (g/L)"]/MM
df["Ci (mol/L)"] = df.apply(Concentracion,axis=1)
#Fuerza iónica Carga =1
def Io(fila):
return 0.5*(Zi)**2*fila["Ci (mol/L)"]
df["F_I"] = df.apply(Io, axis=1)
#Coeficiente de actividad
def g_Na(fila):
return 10**((-A*(Zi)**2*math.sqrt(fila["F_I"])/(1+B*ai[0]*math.sqrt(fila["F_I"])))+bi[0]*(fila["F_I"]))
df["gNa"] = df.apply(g_Na, axis=1)
def g_Cl(fila):
return 10**((-A*(Zi)**2*math.sqrt(fila["F_I"])/(1+B*ai[1]*math.sqrt(fila["F_I"])))+bi[1]*(fila["F_I"]))
df["gCl"] = df.apply(g_Cl, axis=1)
#Actividad
def A_Na(fila):
return fila["Ci (mol/L)"]*fila["gNa"]
df["A_Na"] = df.apply(A_Na, axis=1)
def A_Cl(fila):
return fila["Ci (mol/L)"]*fila["gCl"]
df["A_Cl"] = df.apply(A_Cl, axis=1)
#Constantes de Ec. Nernst
R= 8.314 #J/mol K
F= 96485 #C/mol
T = 298.15 #K
#Permselectivity
CEM = 0.99
AEM = 0.95
def E_CEM():
return (CEM*R*T/(Zi*F))*math.log(df.loc[1,["A_Na"]]/df.loc[0,["A_Na"]])
print("E_CEM = ")
print(E_CEM())
def E_AEM():
return (AEM*R*T/(Zi*F))*math.log(df.loc[1,["A_Cl"]]/df.loc[0,["A_Cl"]])
print("E_AEM = ")
print(E_AEM())
def Ecell():
return E_AEM()+E_CEM()
print("E_celda = ")
print(Ecell())
N=10
def E():
return N*Ecell()
print("E =")
print(E())
#Conductividad
ksal= 50 #Agua salada (mS/cm)
kdul= 7 #Agua dulce (mS/cm)
#Resistencias
A= 10*10 #cm2
Relec= 54*A #ohm
R_CEM = 2.0*A #ohm
R_AEM = 1.7*A #ohm
dfRmembranas = pd.DataFrame({'Membrana':['CEM','AEM'],
'R (ohm)':[R_CEM, R_AEM]})
print(dfRmembranas)
R_H = 1/(ksal*(1000*10)) #ohm
R_L = 1/(kdul*(1000*10)) #ohm
dfcompart = pd.DataFrame({'Compartimiento':['High','Low'],
'R (ohm)':[R_H,R_L]})
r = R_L + R_H + R_CEM + R_AEM #ohm
print (r)
print("Resistencia interna")
Ri= N*r + Relec #ohm
print(Ri)
df2 = pd.DataFrame({'Rext (ohm)':[92,47,22,10,6.8,5.6,4.7,3.3,
2.2,1.8,1.2,0.56, 0.39,0.22,0.1,0]})
#Intensidad de corriente
def I(fila):
return E()/(Ri + fila["Rext (ohm)"])
df2["I (A)"] = df2.apply(I, axis=1)
def E2(fila):
return fila["Rext (ohm)"]*fila["I (A)"]
df2["Evar (V)"] = df2.apply(E2, axis=1)
def Econst(fila):
return (fila["Rext (ohm)"]+Ri)*fila["I (A)"]
df2["Econst (V)"] = df2.apply(Econst, axis=1)
#Gráficos
import matplotlib.pyplot as plt
plt.plot(df2["I (A)"], df2["Evar (V)"], color= "g", label= '*Rext')
#plt.plot(df2["I (A)"], df2["Econst (V)"], color= "r", label= 'Rext + Ri')
plt.title('E vs I')
plt.xlabel('I (A)')
plt.ylabel('E (V)')
plt.legend(loc='upper right',
ncol=2, fancybox=True, shadow=True)
|
from django.contrib import admin
from phoneuser.models import PhoneUser
admin.site.register(PhoneUser) |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 6 11:43:47 2021
@author: anand
"""
# The algorithm starts at 3
cur_x =5
# Learning Rate
rate = 0.1
# This tells us when to stop the algorithm
precision = 0.5
previous_step_size = 1
# Maximum number of iterations
max_iters = 1000000
# iteration counter
iters = 0
# Gradient of our function
df = lambda x: 2*(x+5)
while previous_step_size > precision and iters < max_iters:
prev_x = cur_x
# Gradient Descent
cur_x = cur_x - rate * df(prev_x)
previous_step_size = abs (cur_x - prev_x)
iters = iters + 1
print(f"Iteration{iters} \nX value is {cur_x}")
print(f"The local minimum occurs at {cur_x}")
# scipy.optimize.minimize() can be used to calculate complex functions
|
n = int(input())
a = ""
up = 0
temp = 0
for i in range(n):
if i%2 == 0:
a += input()
else:
a += input()[::-1]
for i in list(a):
if i == "o":
temp += 1
elif i == "A":
if temp > up:
up = temp
temp = 0
if temp > up:
up = temp
print(up) |
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
env = Environment(undefined=StrictUndefined)
env.loader = FileSystemLoader('.')
vrf_var = {
"VRF_NAME": "blue",
"RD": "100:1",
"IPv4_ENABLED": True,
"IPv6_ENABLED": True
}
template_file = 'ex3.j2'
template = env.get_template(template_file)
output = template.render(**vrf_var)
print(output)
|
# Generated by Django 3.1.4 on 2020-12-14 13:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('film_app', '0002_auto_20201214_1353'),
]
operations = [
migrations.RenameField(
model_name='film',
old_name='contry',
new_name='country',
),
migrations.AlterField(
model_name='film',
name='created_in_country',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='film_app.country'),
),
]
|
"""Test aid views."""
import pytest
from django.urls import reverse
from tags.models import Tag
from tags.factories import TagFactory
from aids.factories import AidFactory
from aids.models import Aid
pytestmark = pytest.mark.django_db
def test_draft_list_is_for_authenticated_users_only(client, contributor):
"""Anonymous users cannot access any draft list."""
drafts_url = reverse('aid_draft_list_view')
res = client.get(drafts_url)
assert res.status_code == 302
client.force_login(contributor)
res = client.get(drafts_url)
assert res.status_code == 200
def test_draft_list_only_display_authors_aids(client, contributor):
"""Don't display aids from other users."""
AidFactory(name='Is this the real life?', author=contributor)
AidFactory(name='Is this just fantasy?')
client.force_login(contributor)
drafts_url = reverse('aid_draft_list_view')
res = client.get(drafts_url)
content = res.content.decode('utf-8')
assert 'Is this the real life?' in content
assert 'Is this just fantasy?' not in content
def test_draft_list_does_not_show_deleted_aids(client, contributor):
"""Deleted aids must be excluded from all queries by default."""
AidFactory(name='Is this the real life?', author=contributor,
status='deleted')
client.force_login(contributor)
drafts_url = reverse('aid_draft_list_view')
res = client.get(drafts_url)
content = res.content.decode('utf-8')
assert 'Is this the real life?' not in content
def test_aid_creation_requires_logged_in_user(client):
"""Anonymous users cannot create new aids."""
form_url = reverse('aid_create_view')
res = client.get(form_url, follow=True)
assert res.status_code == 200
assert len(res.redirect_chain) == 1
assert res.redirect_chain[0][0].startswith('/comptes/demande-connexion/')
def test_aid_creation_requires_contributor(client, user):
"""Anonymous users cannot create new aids."""
client.force_login(user)
form_url = reverse('aid_create_view')
res = client.get(form_url, follow=True)
assert res.status_code == 200
assert len(res.redirect_chain) == 1
assert res.redirect_chain[0][0].startswith('/comptes/profil-contributeur/')
def test_aid_creation_view(client, contributor, aid_form_data):
"""Saving the form creates a new aid."""
form_url = reverse('aid_create_view')
# Logged user, access granted
client.force_login(contributor)
res = client.get(form_url)
assert res.status_code == 200
aids = Aid.objects.filter(author=contributor)
assert aids.count() == 0
aid_form_data['name'] = 'Very unique title'
res = client.post(form_url, data=aid_form_data)
assert res.status_code == 302
assert aids.count() == 1
assert aids[0].name == 'Very unique title'
assert aids[0].author == contributor
def test_aid_edition_view(client, contributor, aid_form_data):
"""Test the aid edition form and view."""
aid = AidFactory(name='First title', author=contributor)
# Anonymous, no access
form_url = reverse('aid_edit_view', args=[aid.slug])
res = client.get(form_url)
assert res.status_code == 302
# Logged contributor, access granted
client.force_login(contributor)
res = client.get(form_url)
assert res.status_code == 200
aids = Aid.objects.filter(author=contributor)
assert aids.count() == 1
aid_form_data['name'] = 'New title'
res = client.post(form_url, data=aid_form_data)
assert res.status_code == 302
assert aids.count() == 1
assert aids[0].name == 'New title'
assert aids[0].author == contributor
def test_aid_edition_with_existing_tags(client, contributor, aid_form_data):
"""Aid form uses existing tags."""
aid = AidFactory(name='First title', author=contributor)
form_url = reverse('aid_edit_view', args=[aid.slug])
client.force_login(contributor)
TagFactory(name='pizza')
TagFactory(name='tartiflette')
TagFactory(name='gratin')
tags = Tag.objects.all()
assert tags.count() == 3
aid_form_data['tags'] = ['pizza', 'tartiflette', 'gratin']
res = client.post(form_url, data=aid_form_data)
assert res.status_code == 302
aid.refresh_from_db()
assert set(aid.tags) == set(['pizza', 'gratin', 'tartiflette'])
tag_names = aid._tags_m2m.values_list('name', flat=True)
assert set(tag_names) == set(['pizza', 'gratin', 'tartiflette'])
assert tags.count() == 3
def test_aid_edition_with_new_tags(client, contributor, aid_form_data):
"""Aid form can create new tags."""
aid = AidFactory(name='First title', author=contributor)
form_url = reverse('aid_edit_view', args=[aid.slug])
client.force_login(contributor)
TagFactory(name='pizza')
tags = Tag.objects.all()
assert tags.count() == 1
aid_form_data['tags'] = ['pizza', 'tartiflette', 'gratin']
res = client.post(form_url, data=aid_form_data)
assert res.status_code == 302
aid.refresh_from_db()
assert set(aid.tags) == set(['pizza', 'gratin', 'tartiflette'])
aid_tags = aid._tags_m2m.values_list('name', flat=True)
assert set(aid_tags) == set(['pizza', 'gratin', 'tartiflette'])
all_tags = tags.values_list('name', flat=True)
assert tags.count() == 3
assert 'gratin' in all_tags
assert 'tartiflette' in all_tags
def test_aid_edition_does_not_delete_tags(client, contributor, aid_form_data):
"""Unused tags stay in db."""
TagFactory(name='pizza')
TagFactory(name='gratin')
aid = AidFactory(name='First title', author=contributor, tags=[
'pizza', 'gratin'])
form_url = reverse('aid_edit_view', args=[aid.slug])
client.force_login(contributor)
tags = Tag.objects.all()
assert tags.count() == 2
aid_form_data['tags'] = ['pizza']
res = client.post(form_url, data=aid_form_data)
assert res.status_code == 302
aid.refresh_from_db()
assert set(aid.tags) == set(['pizza'])
assert tags.count() == 2
aid_tags = aid._tags_m2m.values_list('name', flat=True)
assert set(aid_tags) == set(['pizza'])
all_tags = tags.values_list('name', flat=True)
assert set(all_tags) == set(['pizza', 'gratin'])
def test_edition_of_other_users_aid(client, contributor):
"""Editing someone's else aid is forbidden."""
aid = AidFactory()
form_url = reverse('aid_edit_view', args=[aid.slug])
client.force_login(contributor)
res = client.get(form_url)
assert res.status_code == 404
def test_edition_of_aid_status(client, contributor):
"""Test that the publication workflow works as expected."""
aid = AidFactory(status='draft', author=contributor)
client.force_login(contributor)
update_status_url = reverse('aid_status_update_view', args=[aid.slug])
res = client.get(update_status_url)
assert res.status_code == 405 # Method not allowed, only post
res = client.post(update_status_url, {'current_status': 'draft'})
aid.refresh_from_db()
assert res.status_code == 302
assert aid.status == 'reviewable'
res = client.post(update_status_url, {'current_status': 'reviewable'})
aid.refresh_from_db()
assert res.status_code == 302
assert aid.status == 'draft'
aid.status = 'published'
aid.save()
res = client.post(update_status_url, {'current_status': 'published'})
aid.refresh_from_db()
assert res.status_code == 302
assert aid.status == 'draft'
def test_aid_deletion(client, contributor):
"""Test aid deletion."""
aid = AidFactory(status='published', author=contributor)
client.force_login(contributor)
delete_url = reverse('aid_delete_view', args=[aid.slug])
res = client.post(delete_url, {'confirm': True})
assert res.status_code == 302
aid.refresh_from_db()
assert aid.status == 'deleted'
def test_deletion_requires_confirmation(client, contributor):
"""Without confirmation, aid does not get deleted."""
aid = AidFactory(status='published', author=contributor)
client.force_login(contributor)
delete_url = reverse('aid_delete_view', args=[aid.slug])
res = client.post(delete_url)
assert res.status_code == 302
aid.refresh_from_db()
assert aid.status == 'published'
def test_only_aid_author_can_delete_it(client, contributor):
"""One cannot delete other users' aids."""
aid = AidFactory(status='published')
client.force_login(contributor)
delete_url = reverse('aid_delete_view', args=[aid.slug])
res = client.post(delete_url, {'confirm': True})
assert res.status_code == 404
aid.refresh_from_db()
assert aid.status == 'published'
def test_aids_under_review_menu_is_for_admin_only(client, contributor):
AidFactory(status='reviewable')
client.force_login(contributor)
url = reverse('home')
res = client.get(url)
assert res.status_code == 200
assert 'Aides en revue' not in res.content.decode('utf-8')
contributor.is_superuser = True
contributor.save()
res = client.get(url)
assert res.status_code == 200
assert 'Aides en revue' in res.content.decode('utf-8')
|
from birthday_prog import Birthday
hashtable = []
for i in range(12):
hashtable.append([])
file_var = open("bdaylist.txt","r")
line_var = file_var.readlines()
tot_counter = 0
for lines in line_var:
word_list = lines.split("/")
day = int(word_list[0])
month = int(word_list[1])
year = int(word_list[2])
my_object = Birthday(day,month,year)
hash_value = hash(my_object)
b_tuple = (my_object,lines)
for lists in range(len(hashtable)):
if lists == hash_value:
hashtable[lists].append(b_tuple)
tot_counter += 1
counter = 0
for new_lists in hashtable:
print("Hash location",counter,"has",len(new_lists),"elements.")
counter+=1
print("Total Lines:",tot_counter)
|
# -*- coding: utf-8 -*-
from nipype.interfaces.base import (TraitedSpec, File, isdefined,
traits, OutputMultiPath, InputMultiPath)
from nipype.interfaces.spm.base import (SPMCommand,
scans_for_fnames, SPMCommandInputSpec)
from nipype.utils.filemanip import split_filename
import os
import numpy as np
class NewSegmentInputSpec(SPMCommandInputSpec):
channel_files = InputMultiPath(File(exists=True),
desc="A list of files to be segmented",
field='channel', copyfile=False, mandatory=True)
channel_info = traits.Tuple(traits.Float(), traits.Float(),
traits.Tuple(traits.Bool, traits.Bool),
desc="""A tuple with the following fields:
- bias reguralisation (0-10)
- FWHM of Gaussian smoothness of bias
- which maps to save (Corrected, Field) - a tuple of two boolean values""",
field='channel')
tissues = traits.List(traits.Tuple(traits.Tuple(File(exists=True), traits.Int()), traits.Int(),
traits.Tuple(traits.Bool, traits.Bool), traits.Tuple(traits.Bool, traits.Bool)),
desc="""A list of tuples (one per tissue) with the following fields:
- tissue probability map (4D), 1-based index to frame
- number of gaussians
- which maps to save [Native, DARTEL] - a tuple of two boolean values
- which maps to save [Unmodulated, Modulated] - a tuple of two boolean values""",
field='tissue')
affine_regularization = traits.Enum('mni', 'eastern', 'subj', 'none', field='warp.affreg',
desc='mni, eastern, subj, none ')
warping_regularization = traits.List(traits.Float(), field='warp.reg',
minlen=5, maxlen=5,
desc=('controls balance between '
'parameters and data'))
mrf = traits.Float(field='warp.mrf',desc='MRF parameter')
cleanup = traits.Float(field='warp.cleanup',desc='cleanup parameter')
warp_fwhm = traits.Float(field='warp.fwhm',desc= 'warping fwhm')
sampling_distance = traits.Float(field='warp.samp',
desc='Sampling distance on data for parameter estimation')
write_deformation_fields = traits.List(traits.Bool(), minlen=2, maxlen=2, field='warp.write',
desc="Which deformation fields to write:[Inverse, Forward]")
class NewSegmentOutputSpec(TraitedSpec):
native_class_images = traits.List(traits.List(File(exists=True)), desc='native space probability maps')
dartel_input_images = traits.List(traits.List(File(exists=True)), desc='dartel imported class images')
normalized_class_images = traits.List(traits.List(File(exists=True)), desc='normalized class images')
modulated_class_images = traits.List(traits.List(File(exists=True)), desc='modulated+normalized class images')
transformation_mat = OutputMultiPath(File(exists=True), desc='Normalization transformation')
bias_corrected_images = OutputMultiPath(File(exists=True), desc='bias corrected images')
bias_field_images = OutputMultiPath(File(exists=True), desc='bias field images')
forward_deformation_field = OutputMultiPath(File(exists=True))
inverse_deformation_field = OutputMultiPath(File(exists=True))
class NewSegment(SPMCommand):
input_spec = NewSegmentInputSpec
output_spec = NewSegmentOutputSpec
def __init__(self, **inputs):
_local_version = SPMCommand().version
if _local_version and '12.' in _local_version:
self._jobtype = 'spatial'
self._jobname = 'preproc'
else:
self._jobtype = 'tools'
self._jobname = 'preproc8'
SPMCommand.__init__(self, **inputs)
def _format_arg(self, opt, spec, val):
"""convert input to appropriate format"""
if opt in ['channel_files', 'channel_info']:
# structure have to be recreated, because of some weird traits error
new_channel = {}
new_channel['vols'] = scans_for_fnames(self.inputs.channel_files)
if isdefined(self.inputs.channel_info):
info = self.inputs.channel_info
new_channel['biasreg'] = info[0]
new_channel['biasfwhm'] = info[1]
new_channel['write'] = [int(info[2][0]), int(info[2][1])]
return [new_channel]
elif opt == 'tissues':
new_tissues = []
for tissue in val:
new_tissue = {}
new_tissue['tpm'] = np.array([','.join([tissue[0][0], str(tissue[0][1])])], dtype=object)
new_tissue['ngaus'] = tissue[1]
new_tissue['native'] = [int(tissue[2][0]), int(tissue[2][1])]
new_tissue['warped'] = [int(tissue[3][0]), int(tissue[3][1])]
new_tissues.append(new_tissue)
return new_tissues
elif opt == 'write_deformation_fields':
return super(NewSegment, self)._format_arg(opt, spec, [int(val[0]), int(val[1])])
else:
return super(NewSegment, self)._format_arg(opt, spec, val)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['native_class_images'] = []
outputs['dartel_input_images'] = []
outputs['normalized_class_images'] = []
outputs['modulated_class_images'] = []
outputs['transformation_mat'] = []
outputs['bias_corrected_images'] = []
outputs['bias_field_images'] = []
outputs['inverse_deformation_field'] = []
outputs['forward_deformation_field'] = []
n_classes = 5
if isdefined(self.inputs.tissues):
n_classes = len(self.inputs.tissues)
for i in range(n_classes):
outputs['native_class_images'].append([])
outputs['dartel_input_images'].append([])
outputs['normalized_class_images'].append([])
outputs['modulated_class_images'].append([])
for filename in self.inputs.channel_files:
pth, base, ext = split_filename(filename)
if isdefined(self.inputs.tissues):
for i, tissue in enumerate(self.inputs.tissues):
if tissue[2][0]:
outputs['native_class_images'][i].append(os.path.join(pth, "c%d%s.nii" % (i + 1, base)))
if tissue[2][1]:
outputs['dartel_input_images'][i].append(os.path.join(pth, "rc%d%s.nii" % (i + 1, base)))
if tissue[3][0]:
outputs['normalized_class_images'][i].append(os.path.join(pth, "wc%d%s.nii" % (i + 1, base)))
if tissue[3][1]:
outputs['modulated_class_images'][i].append(os.path.join(pth, "mwc%d%s.nii" % (i + 1, base)))
else:
for i in range(n_classes):
outputs['native_class_images'][i].append(os.path.join(pth, "c%d%s.nii" % (i + 1, base)))
outputs['transformation_mat'].append(os.path.join(pth, "%s_seg8.mat" % base))
if isdefined(self.inputs.write_deformation_fields):
if self.inputs.write_deformation_fields[0]:
outputs['inverse_deformation_field'].append(os.path.join(pth, "iy_%s.nii" % base))
if self.inputs.write_deformation_fields[1]:
outputs['forward_deformation_field'].append(os.path.join(pth, "y_%s.nii" % base))
if isdefined(self.inputs.channel_info):
if self.inputs.channel_info[2][0]:
outputs['bias_corrected_images'].append(os.path.join(pth, "m%s.nii" % (base)))
if self.inputs.channel_info[2][1]:
outputs['bias_field_images'].append(os.path.join(pth, "BiasField_%s.nii" % (base)))
return outputs
|
import functools
import json
from enigma_docker_common import storage
from enigma_docker_common import config
from enigma_docker_common.logger import get_logger
logger = get_logger('bootstrap-loader')
class BootstrapLoader:
bootstrap_file_name = "bootstrap_addresses.json"
def __init__(self, cfg: config.Config, bootstrap_id: str = ''):
self.env = cfg.get('ENIGMA_ENV', 'COMPOSE')
self.bootstrap_id = bootstrap_id
if self.env == 'COMPOSE':
self.storage = storage.LocalStorage(directory=cfg["LOCAL_LIBP2P_KEY_PATH"])
self.storage_public = storage.LocalStorage(directory=cfg["LOCAL_LIBP2P_KEY_PATH"])
else:
self.storage = storage.AzureContainerFileService(directory='bootstrap')
self.storage_public = storage.AzureContainerFileService(directory='bootstrap-public')
self._address: str = ''
self._key: str = ''
self._public: str = ''
self.keyfile: str = ''
def all_bootstraps(self) -> str:
return self.storage_public[self.bootstrap_file_name].decode()
def to_json(self):
self.load()
return self.keyfile
def load(self):
if self.env != 'COMPOSE' and not self.storage.credential:
raise RuntimeError('Cannot get bootstrap configuration from '
'Azure storage without parameter: STORAGE_CONNECTION_STRING')
if not self.keyfile:
logger.info(f'Bootstrap ID: {self.bootstrap_id}')
self.keyfile = self._get_file(self.bootstrap_id)
as_dict = json.loads(self.keyfile)
logger.info(f'Got bootstrap configuration file: {as_dict}')
self._address = as_dict["id"]
self._key = as_dict["privKey"]
self._public = as_dict["pubKey"]
@property
def address(self):
self.load()
return self._address
@property
def key(self):
self.load()
return self._key
@property
def public(self):
self.load()
return self._public
def _get_file(self, file_name) -> bytes:
try:
return self.storage[file_name]
except PermissionError as e:
logger.error(f'Failed to get file, probably missing credentials. {e}')
raise
except ValueError as e: # not sure what Exceptions right now
logger.error(f'Failed to get file: {e}')
raise
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
strinit = "null value"
class SmallSMILHandler(ContentHandler):
def __init__ (self):
self.root-layout = {'w':'', 'h':'', 'bc':''}
self.region = []
self.img = []
self.audio = []
self.textstream = []
def startElement(self, name, attrs):
if name == 'chiste':
# De esta manera tomamos los valores de los atributos
self.root-layout['w'] = attrs.get('width',"")
elif name == 'pregunta':
self.inPregunta = 1
elif name == 'respuesta':
self.inRespuesta = 1
def endElement(self, name):
"""
Método que se llama al cerrar una etiqueta
"""
if name == 'pregunta':
self.pregunta = ""
self.inPregunta = 0
if name == 'respuesta':
self.respuesta = ""
self.inRespuesta = 0
def characters(self, char):
"""
Método para tomar contenido de la etiqueta
"""
if self.inPregunta:
self.pregunta = self.pregunta + char
print
print "Pregunta: " + self.pregunta
if self.inRespuesta:
self.respuesta += char
print "Respuesta: " + self.respuesta
print
if __name__ == "__main__":
"""
Programa principal
"""
parser = make_parser()
cHandler = ChistesHandler()
parser.setContentHandler(cHandler)
parser.parse(open('chistes2.xml'))
|
def standard_units(any_numbers):
"""Convert any array of numbers to standard units."""
return (any_numbers - np.average(any_numbers)) / np.std(any_numbers)
def correlation(t, x, y):
"""Return the correlation coefficient (r) of two variables."""
return np.mean(standard_units(t.column(x)) * standard_units(t.column(y)))
def slope(t, x, y):
"""The slope of the regression line (original units)."""
r = correlation(t, x, y)
return r * np.std(t.column(y)) / np.std(t.column(x))
def intercept(t, x, y):
"""The intercept of the regression line (original units)."""
return np.mean(t.column(y)) - slope(t, x, y) * np.mean(t.column(x))
def fit(t, x, y):
"""The fitted values along the regression line."""
a = slope(t, x, y)
b = intercept(t, x, y)
return a * t.column(x) + b
def plot_residuals(t, x, y):
"""Plot a scatter diagram and residuals."""
t.scatter(x, y, fit_line=True)
actual = t.column(y)
fitted = fit(t, x, y)
residuals = actual - fitted
print('r:', correlation(t, x, y))
print('RMSE:', np.mean(residuals**2)**0.5)
t.select(x).with_column('Residual', residuals).scatter(0, 1)
|
from threading import Thread
import sys, time
import numpy as np
from Config import Config
class ThreadReader(Thread):
def __init__(self, remote_q, local_q):
super(ThreadReader, self).__init__()
self.setDaemon(True)
self.remote_q = remote_q
self.local_q = local_q
self.exit_flag = False
def run(self):
while not self.exit_flag:
data = self.remote_q.get()
self.local_q.put(data)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_auto_20150402_0816'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'get_latest_by': 'created'},
),
]
|
from model.vehicle_handling.vehicle import Player, Enemy
from model.vehicle_handling.spawn_enemies import spawn_chance
class GameModel:
# player = Player(400, 400, 20, 20, 1, 8, 1, 8)
# vehicles.append(Enemy(1, "enemy car", 400, 500))
def __init__(self, num_players=1):
if num_players == 2:
self.vehicles = [Player(0), Player(1, "player2", x=600)]
self.player2 = self.vehicles[1]
else:
self.vehicles = [Player(0)]
self.player2 = None
self.player = self.vehicles[0]
""" methods """
def update(self):
# spawn_chance(self.vehicles)
""" updates location of all vehicles """
for i, item in enumerate(self.vehicles):
self.vehicles[i].update_location(self.vehicles)
""" checks for which vehicle to despawn next """
for i, item in enumerate(self.vehicles):
if isinstance(self.vehicles[i], Enemy):
self.vehicles[i].check_to_despawn(self.vehicles)
if self.player.is_below_screen():
self.player.health -= 10
if self.player2 is not None and self.player2.is_below_screen():
self.player2.health -= 10
self.player.score += 1
def check_if_player_is_alive(self, player):
return player.is_alive()
""" getters """
@property
def vehicles(self):
return self.__vehicles
@property
def player(self):
return self.__player
""" setters """
@vehicles.setter
def vehicles(self, vehicles):
self.__vehicles = vehicles
@player.setter
def player(self, player):
self.__player = player
|
# Let G = <N, A> be a Graph with N nodes and A Edges, let G' = <N, T> be a partial graph of
# that one, it must have, at least, N - 1 Edges to be connected.
# A Graph with N nodes with more than N - 1 Edges contains, at least, one cycle, so we can remove,
# at least, A - N - 1 edges in a graph with A edges and still have a connected graph (Removing edges
# that are part of a cycle, of course)
# G' is called the minimum spanning tree for G
# So, given a graph, find a minimum spanning tree with the least cost among it's edges (All edges have an
# Associated cost)
# Kruskal's solution
# In each step, choose the least-cost edge from the list of edges in G, if it doesn't form a cycle, add
# it to the solution, if it does, reject it and never consider it again
# selecting edges in this fashion will create groups of connected nodes that will eventually merge together
# to form the solution
# An edge forms a cycle if it does not add a new node to a group of connected elements, that is, an edge
# that connects two groups does not add new nodes to the solution, but is valid, an edge that does not
# connect two groups and adds no new nodes to the solution isn't
# We need:
# A list of candidates
# A list of selected elements
#Edge: 2-tuple with ([Node_1, Node_2], Cost), I assume the graph to not be directed so an edge goes both ways
edges = [([0, 1], 1), ([1, 2], 2), ([2, 3], 4), ([3, 4], 1), ([4, 5], 5), ([5, 0], 1),
([5, 1], 2), ([5, 2], 1), ([5, 3], 3)]
node_groups = [{0},{1},{2},{3},{4},{5}]
selected_edges = []
number_of_nodes = len(node_groups)
#Sort the edges in ascending order of cost
edges = sorted(edges, key = lambda a: a[1])
while len(selected_edges) != number_of_nodes - 1:
#Pick the least-cost edge
edge_to_check = edges[0]
#Check that at least one new node is added or 2 groups
#are connected
group_1 = [a for a in node_groups if edge_to_check[0][0] in a]
group_2 = [a for a in node_groups if edge_to_check[0][1] in a]
if group_2 != group_1:
print(node_groups)
print(group_1)
print(group_2)
edges.pop(edges.index(edge_to_check))
node_groups.pop(node_groups.index(group_1[0]))
node_groups.pop(node_groups.index(group_2[0]))
node_groups.append(group_1[0].union(group_2[0]))
selected_edges.append(edge_to_check)
else:
edges.pop(edges.index(edge_to_check))
print(selected_edges)
|
#!/usr/bin/python3
# @Author: Safer
# @Date: 2016-12-01 01:40:55
# @Last Modified by: Safer
# @Last Modified time: 2016-12-04 17:04:39
import sys
import res
from PyQt5.QtWidgets import QApplication, QDialog, QPushButton, QToolButton, QLabel, QDockWidget
from PyQt5.QtWidgets import QFormLayout, QVBoxLayout, QHBoxLayout, QGraphicsDropShadowEffect
from PyQt5.QtCore import pyqtSlot, pyqtSignal
from PyQt5 import Qt, QtCore, QtGui
class DialogFloat(QDialog):
def __init__(self, parent=None):
super(DialogFloat, self).__init__(parent)
self._init_ui()
def _init_ui(self):
# self.resize(440, 100)
self._centerPosition(70, 70)
self.setWindowFlags(Qt.Qt.FramelessWindowHint)
self.setWindowTitle("QLinearGradient Vertical Gradient ")
self.setAttribute(Qt.Qt.WA_TranslucentBackground)
# self.setMouseTracking(True)
# self.setWindowFlags(Qt.Qt.Window | Qt.Qt.FramelessWindowHint)
self._style()
title = QLabel('宝', self)
title.setObjectName("title")
title.setAlignment(Qt.Qt.AlignCenter)
title.move(22,25)
self.show()
def _click(self):
print('click')
pass
def _style(self):
_file = QtCore.QFile(':/style.qss')
_file.open(QtCore.QFile.ReadOnly)
styleSheet = _file.readAll()
styleSheet = str(styleSheet, encoding='utf8')
styleSheet += """
QDialog{
}
QLabel{
width:70px;
height:70px;
font-size:25px;
}
QLabel:hover{
font-size:26px;
color:#ff4400;
}
"""
self.setStyleSheet(styleSheet)
# self.setWindowOpacity(0.8)
# shadow = QGraphicsDropShadowEffect(self)
# shadow.setBlurRadius(50)
# shadow.setOffset(2,1)
# self.setGraphicsEffect(shadow)
self.setAttribute(Qt.Qt.WA_TranslucentBackground)
# self.setWindowFlags(QtCore.Qt.Popup | QtCore.Qt.FramelessWindowHint)
# 窗口居中显示
def _centerPosition(self, width, height):
desktop = QApplication.desktop()
screen = desktop.screenGeometry()
swidth = screen.width()
sheight = screen.height()
left = (swidth - width) / 2
top = (sheight - height) / 2
self.setGeometry(left, top, width, height)
def paintEvent(self, ev):
painter = QtGui.QPainter(self)
# gradient = QtGui.QLinearGradient(QtCore.QRectF(self.rect()).topLeft(),QtCore.QRectF(self.rect()).bottomLeft())
# painter.setBrush(gradient)
brush = QtGui.QBrush(QtGui.QImage('res/img/test.png'))
painter.setBrush(brush)
# painter.setBrush(QtGui.QColor.fromRgb(87, 130, 185))
painter.setPen(QtGui.QColor.fromRgb(0, 0, 0, 0))
painter.drawRoundedRect(0, 0, 70, 70, 50.0, 50.0)
# painter.drawRoundedRect(0, 0, 70, 70, 0, 0)
def mousePressEvent(self, event):
self.trayIcon.showMessage('title','content', None,2000)
print(event)
if event.button() == QtCore.Qt.LeftButton:
self.mousePress = True
self.dragPosition = event.globalPos() - self.frameGeometry().topLeft()
event.accept()
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.mousePress = False
event.accept()
def mouseMoveEvent(self, event):
if event.buttons() == QtCore.Qt.LeftButton:
try:
if (self.mousePress == False):
return
except AttributeError as e:
return
self.move(event.globalPos() - self.dragPosition)
import sys
from PyQt5.QtWidgets import QApplication
if __name__ == '__main__':
app = QApplication(sys.argv)
# message
dm1 = DialogFloat(None)
sys.exit(app.exec_())
|
print("===문제1===")
num = int(input("숫자 입력 : "))
count = 1
result = 0
while count <= num:
result += count
count += 1
print("1부터 %d까지의 누적합계 : %d"%(num, result))
print("===문제2===")
count = 0 #0부터 시작
while count < 10: # 0 ~ 9 : 10번 반복
print("Hello Python")
count += 1
# print("===문제3===")
# result = 0
# num = 1
# while num != 0:
# num = int(input("숫자 입력(0 입력 시 종료) : "))
# result += num
# print("합계 : %d"%result)
print("===문제4===")
num = int(input("거꾸로 할 정수 입력 : "))
# 1234 % 10 => 4
# 123 % 10 => 3
# 12 % 10 => 2
# 1 % 10 => 1
while num > 0:
print(num % 10,end="")
num //= 10
#1234 % 10 => 4
#123 % 10 => 40 + 3
#12 % 10 => 430 + 2
#1 % 10 => 4320 + 1
# reverse_num = 0
# while num > 0:
# reverse_num = reverse_num * 10 + num % 10
# num //= 10
# print(reverse_num)
print("===문제5===")
num = int(input("정수 입력 : "))
count = 1
while count <= num:
print("%d"%(num * count),end=" ")
count += 1
|
def count_text_string(search_for, search_in):
"""
a function that takes a text to
be searched for and a text to be searched in
:param search_for: word to be searched for
:param search_in: file to be searched in
:return:
"""
character_check = 0
if search_in == "":
return 0
else:
if search_for[0] == search_in[0]: #conditional statement
word = search_for
line = search_in
while word != "":
character_check = character_check + check_head(word, line)
word = word[1:]
line = line[1:]
if character_check == length_string(search_for):
return 1 + count_text_string(search_for, search_in[1:])
else:
return 0 + count_text_string(search_for, search_in[1:])
else:
return 0 + count_text_string(search_for, search_in[1:])
def check_head(string_1, string_2):
"""
to check if the word matches or not
:param string_1:
:param string_2:
:return:
"""
if string_1[0] == string_2[0]:
return 1
else:
return 0
def length_string(string):
"""
to calculate the length of the string
:param string:
:return:
"""
length = 0
for character in string:
length = length + 1
return length
def count_text_file(search_for, text_file_name):
"""
a function that takes in the
text to be searched for and the name of the file to search for the text in
:param search_for:
:param text_file_name:
:return:
"""
file = open(text_file_name)
total = 0
for line in file:
if count_text_string(search_for, line) > 0:
print(line.strip())
total = total + count_text_string(search_for, line)
print(total)
def main():
"""
main function to implement the program
:return:
"""
search_for = input("Enter search word:")
text_file_name = input("Enter file name:")
count_text_file(search_for, text_file_name)
main() |
import numpy as np
import matplotlib.pyplot as plt
a = np.linspace(-5,5,50)
xs=[]
xsdot=[]
x = 2
for i in range(50):
f = np.arcsin(x/a[i])
fdot = np.cos(f)
xs.append(f)
xsdot.append(fdot)
print(a)
print(xs)
plt.plot(a,xs,'b')
plt.plot(a,xsdot,'r')
plt.show() |
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'nonbundle',
'type': 'static_library',
'sources': [ 'file.c', ],
'postbuilds': [
{
'postbuild_name': 'Postbuild Fail',
'action': [ './postbuild-fail.sh', ],
},
{
'postbuild_name': 'Runs after failing postbuild',
'action': [ './touch-static.sh', ],
},
],
},
{
'target_name': 'bundle',
'type': 'shared_library',
'mac_bundle': 1,
'sources': [ 'file.c', ],
'postbuilds': [
{
'postbuild_name': 'Postbuild Fail',
'action': [ './postbuild-fail.sh', ],
},
{
'postbuild_name': 'Runs after failing postbuild',
'action': [ './touch-dynamic.sh', ],
},
],
},
],
}
|
#!/usr/bin/env python
#coding: utf-8
from licant.modules import submodule
from licant.cxx_modules import application
import licant
from licant.scripter import scriptq
scriptq.execute("../../gxx.g.py")
application("target",
sources = ["main.c"],
include_paths = ["../.."],
include_modules = [
("gxx", "posix"),
("gxx.dprint", "stdout")
]
)
licant.ex(default="target") |
def bmi(w, h):
return w/h**2
def bmi2(w, h):
return 1.3*w/h**(2.5)
def range_f(min, max, step):
ans = []
while min <= max:
ans.append(round(min, 1))
min += step
return ans
def cross(a, b):
assert(type(a) == list)
assert(type(b) == list)
ans = [(0.0, 0.0)]*len(a)*len(b)
print(len(a))
print(len(b))
for (index_1, x) in enumerate(a):
for (index_2, y) in enumerate(b):
ans[index_1*len(b) + index_2] = (x, y)
return ans
def every_ten(x):
ans = []
count = 0
for i in s:
if count == 10:
ans.append(i)
count = 0
count += 1
return ans
if __name__ == "__main__":
w = list(range_f(50, 70, 0.3))
#print(w)
h = list(range(160, 180, 1))
s = cross(w, h)
del w
del h
for i in s:
w, h = i
print((w, h), bmi(w, h), bmi2(w, h))
|
class Obstacle:
color = "gray"
stipple = "gray75"
def __init__(self, xy, scale=1):
self.xy = [int(x) * scale for x in xy]
def text():
for x in xy:
print(x)
def draw(self, canvas):
return canvas.create_polygon(self.xy, stipple=self.stipple, fill=self.color)
|
import os
filename = '15464657761111111.pdf'
pathDir = 'F:/tqcs/sr'
# 判断文件是否存在
if os.path.exists(pathDir + '/' + filename):
print(filename + '文件在' + pathDir + '中存在 ! ')
else:
# 打开文件,不存在则创建
file = open('F:/tqcs/msmj.txt', 'wr')
print(filename + '文件不存在sr目录下,将名字写入到msmj.txt文件中 ! ')
# 将文件名写入到指定文件中
file.write(filename)
# 关闭
file.close()
|
# Generated by Django 2.0.5 on 2018-06-05 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='book',
name='language',
field=models.CharField(blank=True, choices=[('EN', 'English'), ('CN', 'Chinese'), ('FR', 'French')], default='EN', max_length=2),
),
]
|
from Simulator_Wrapper import *
def get_crossover_name(crossover: int) -> str:
if crossover == Crossover_Algorithm.Partially_Matched:
return "Partially Matched"
elif crossover == Crossover_Algorithm.Order:
return "Order"
elif crossover == Crossover_Algorithm.Cycle_all_cycles:
return "Cycle (all)"
elif crossover == Crossover_Algorithm.Cycle_one_cycle:
return "Cycle (one)"
elif crossover == Crossover_Algorithm.Edge_Recombination:
return "Edge Recombination"
def get_crossover_value(crossover: int):
if crossover == Crossover_Algorithm.Partially_Matched:
return Crossover_Algorithm.Partially_Matched
elif crossover == Crossover_Algorithm.Order:
return Crossover_Algorithm.Order
elif crossover == Crossover_Algorithm.Cycle_all_cycles:
return Crossover_Algorithm.Cycle_all_cycles
elif crossover == Crossover_Algorithm.Cycle_one_cycle:
return Crossover_Algorithm.Cycle_one_cycle
elif crossover == Crossover_Algorithm.Edge_Recombination:
return Crossover_Algorithm.Edge_Recombination
def get_marriage_name(marriage: int) -> str:
if marriage == Marriage_Algorithm.Roulette_Reversed:
return "Roulette Reversed"
elif marriage == Marriage_Algorithm.Roulette_Reversed_Distinct:
return "Distinct Roulette Reversed"
def get_marriage_value(marriage: int):
if marriage == Marriage_Algorithm.Roulette_Reversed:
return Marriage_Algorithm.Roulette_Reversed
elif marriage == Marriage_Algorithm.Roulette_Reversed_Distinct:
return Marriage_Algorithm.Roulette_Reversed_Distinct
def get_mutation_name(mutation: int) -> str:
if mutation == Mutation_Algorithm.Delete_Shift:
return "Delete Shift"
def get_mutation_value(mutation: int):
if mutation == Mutation_Algorithm.Delete_Shift:
return Mutation_Algorithm.Delete_Shift
def get_selection_name(selection: int) -> str:
if selection == Selection_Algorithm.SOFT:
return "Survival of the Fittest"
elif selection == Selection_Algorithm.SOFT_Distinct:
return "Distinct Survival of the Fittest"
def get_selection_value(selection: int):
if selection == Selection_Algorithm.SOFT:
return Selection_Algorithm.SOFT
elif selection == Selection_Algorithm.SOFT_Distinct:
return Selection_Algorithm.SOFT_Distinct
|
# Leia uma string e diga se ela possui apenas letras ou nao
string = input()
if string.isalpha():
print("Apenas Letras")
else:
print("Possui Numeros") |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2020-06-14 20:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tickets', '0006_ticket_email'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='public',
field=models.BooleanField(default=True),
),
]
|
# a file hosts all global referred parameters/sets/etcself.
from pyomo import environ as pe
from data.thermal_data import Tb
from utility.data_utility import cal_cnumber
m = pe.ConcreteModel()
m.COMP_OLEFIN = pe.Set(initialize=['C{0}H{1}'.format(i,2*i) for i in range(2,21)],ordered=True)
m.COMP_PARAFFIN = pe.Set(initialize=['C{0}H{1}'.format(i,2*i+2) for i in range(1,57)],ordered=True)
m.COMP_INORG = pe.Set(initialize=['H2','CO','CO2','H2O'],ordered=True)
m.COMP_ORG = m.COMP_OLEFIN | m.COMP_PARAFFIN
m.COMP_TOTAL = m.COMP_INORG | m.COMP_OLEFIN | m.COMP_PARAFFIN
m.COMP_FEED = pe.Set(initialize=['H2','CO','C30H62'],ordered=True)
# m.COMP_FEED = m.COMP_INORG | m.COMP_OLEFIN | m.COMP_PARAFFIN
'''
Sort components based on boiling point
'''
product_boiling_C = [(i,Tb[i]) for i in m.COMP_ORG]
def pick_comp(x):
return x[1]
m.COMP_SORTED_BP = pe.Set(initialize=[i for i,T in sorted(product_boiling_C,key =pick_comp )],ordered=True)
'''
Product definition
'''
m.PRODUCT = pe.Set(initialize=['naphtha','gasoline','diesel','heavy','intermediate'],ordered=True)
m.PRODUCT_cnumber = pe.Set(m.PRODUCT)
m.PRODUCT_cnumber['naphtha'] = [i for i in m.COMP_ORG if cal_cnumber(i) >= 5 and cal_cnumber(i) <= 7]
m.PRODUCT_cnumber['gasoline'] = [i for i in m.COMP_ORG if cal_cnumber(i) >= 8 and cal_cnumber(i) <= 12]
m.PRODUCT_cnumber['diesel'] = [i for i in m.COMP_ORG if cal_cnumber(i) >= 13 and cal_cnumber(i) <= 18]
m.PRODUCT_cnumber['heavy'] = [i for i in m.COMP_ORG if cal_cnumber(i) >= 19 and cal_cnumber(i) <= 56]
m.PRODUCT_cnumber['intermediate'] = [i for i in m.COMP_ORG if cal_cnumber(i) >= 1 and cal_cnumber(i) <= 56]
|
# coding=utf-8
import _sqlite3 as sqlite
import csv
def connectToDB():
connector = sqlite.connect('neural_db.db')
return connector
def updateParamToDB(connector, data):
cursor = connector.cursor()
param_name = data['data']['param']
minimal = data['data']['min']
maximum = data["data"]["max"]
sql = """
UPDATE params
SET min = %s, max = %s
WHERE param = '%s'
""" % (minimal, maximum, param_name)
print(sql)
if param_name is 'snip':
print('<------ALARM is %s->>>>>>>>>>' % param_name)
cursor.execute(sql)
connector.commit()
connector.close()
def getParamValuesFromDB(connector, param):
result_object = {
"min": 0,
"max": 0
}
sql = "SELECT * FROM params WHERE param = '%s'" % param
cursor = connector.cursor()
cursor.execute(sql)
record = cursor.fetchone()
result_object['min'] = record[2]
result_object['max'] = record[3]
print("data getParamValuesFromDB", result_object)
return result_object
def getParamInputValueForNormalize(connector, param):
result_object = {
"min": 0,
"max": 0
}
sql = "SELECT * FROM params WHERE param = '%s'" % param
cursor = connector.cursor()
cursor.execute(sql)
record = cursor.fetchone()
result_object['min'] = record[2]
result_object['max'] = record[3]
print("getParamInputValueForNormalize", result_object)
return result_object
def add_new_patient(connector, input_data):
new_patient_params = ", ".join(map(str, input_data))
sql = "INSERT INTO patients VALUES (%s)" % new_patient_params
cursor = connector.cursor()
result = cursor.execute(sql)
connector.commit()
connector.close()
return 'Patient added.'
def delete_patient(id):
return 'Patient with id -> %s has been deleted.\n' % id
def update_patient(id, data):
return 'Patient with id -> %s has been edited. \n' % (id, data)
def add_pattients_to_db_from_csv(connector, filename):
doc = open(filename, 'rb')
reader = csv.reader(doc)
formatted_data = []
cursor = connector.cursor()
for row in reader:
for items in row:
splitted_items = items.split(';')
floated_items = [float(elem) for elem in splitted_items]
formatted_data.append(floated_items)
for patient in formatted_data:
patient_params = ", ".join(map(str, patient))
sql_query = "INSERT INTO patients VALUES (null, %s)" % patient_params
print(sql_query)
cursor.execute(sql_query)
connector.commit()
connector.close()
return 'All data added to DB'
def get_all_users(connector):
users = []
cursor = connector.cursor()
sql_query = "SELECT * FROM patients ORDER BY id DESC"
cursor.execute(sql_query)
users = cursor.fetchall()
connector.close()
return users
def delete_patient(connector, id):
cursor = connector.cursor()
sql_query = "DELETE FROM patients WHERE id=%s" % id
cursor.execute(sql_query)
connector.commit()
connector.close()
return 'Patient with id %s was deleted' % id
|
"""Users View"""
from django.shortcuts import render
from django.contrib.auth import views as auth_views
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView, UpdateView
from django.urls import reverse, reverse_lazy
#models
from user.models import User, Player
from results.models import Result
#forms
from user.forms import SignupForm
# Create your views here.
class LoginView(auth_views.LoginView):
"""Login user view"""
template_name = 'user/login.html'
next_url= 'user/me/profile'
class LogoutView(LoginRequiredMixin,auth_views.LogoutView):
"""Users Logout view"""
next_page= 'user:login'
class SignupView(FormView):
"""user signup view"""
form_class = SignupForm
template_name = 'user/new_user.html'
success_url = reverse_lazy('user:login')
def form_valid(self, form):
"""save form data"""
form.save()
return super().form_valid(form)
class UserDetailView(LoginRequiredMixin, DetailView):
"""User detail view."""
template_name='user/detail.html'
slug_field= 'username'
slug_url_kwarg='username'
queryset = User.objects.all()
context_object_name = 'user'
def get_context_data(self,**kwargs):
"""Add user's results to context"""
context = super().get_context_data(**kwargs)
user = self.get_object()
query= Result.objects.filter(user=user).order_by('-score')
context['results']= query[:4]
return context
class PlayerUpdateView(LoginRequiredMixin, UpdateView):
template_name = 'user/update_player.html'
model = Player
fields = ['biography', 'picture']
def get_object(self):
"""return user's Player profile"""
return self.request.user.player
def get_success_url(self):
"""return to user's Player profile"""
username = self.object.user.username
return reverse('user:detail', kwargs={'username':username})
|
import turtle
def draw_square(any_turtle):
for i in range (0, 4):
any_turtle.forward(100)
any_turtle.right(90)
def draw_circle(any_turtle):
any_turtle.circle(100)
def draw_equilateral_triangle(any_turtle):
for i in range (0, 3):
any_turtle.forward(100)
any_turtle.right(120)
def draw_flower(any_turtle):
for i in range(0, 36):
draw_square(any_turtle)
any_turtle.right(10)
def turtle_def(t_name, t_color, t_shape, t_speed):
t_name = turtle.Turtle()
t_name.color(t_color)
t_name.shape(t_shape)
t_name.speed(t_speed)
return t_name
def draw_shape():
window = turtle.Screen()
window.bgcolor("violet")
#Diego
draw_flower(turtle_def("diego", "gray", "circle", "fast"))
#Angie
draw_circle(turtle_def("angie", "blue", "arrow", None))
#Brad
draw_square(turtle_def("brad", "yellow", "turtle", 2))
#Justin
draw_equilateral_triangle(turtle_def("justin", "green", "classic", 1))
window.exitonclick()
draw_shape()
|
def find_it(seq):
dict_of_repeated_numbers = { item : seq.count(item) for item in seq }
for key,value in dict_of_repeated_numbers.items():
if value%2 != 0 :
return key |
import numpy as np
import json
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
import argparse
"""
Attempt to fit log beta vs log [Rc,R0,R90]
"""
shelldata = json.load(open("Rc-R0VsBeta.json"))
parser = argparse.ArgumentParser(description="choose y-axis to plot")
parser.add_argument("--axis",type=str,default="Rc",choices=["Rc","R0","R90"],help="y-axis choice")
parser.add_argument("--norm",action="store_true",help="Normalize with R0")
cmd_args = parser.parse_args()
b = np.logspace(-3,-1)
y_arg = {"R0":"R0'","Rc": "Rc'_f","R90":"R90'"}
y_lab = {"R0":"R'_0","Rc": "R'_c","R90":"R'_{90}"}
y_a = {"R0":0.5*np.log10(b)-np.log10(1+np.sqrt(b)),"Rc":np.log10(1.5)+0.5*np.log10(b)-np.log10(1-b),
"R90":0.5*(np.log10(2.4)+np.log10(b)-3*np.log(1-0.8*b))}
y_n = {"R0":0*b,"Rc":np.log10(1.5) - np.log10(1.-np.sqrt(b)) ,"R90":0.5*(np.log10(2.4)+2*np.log10(1+np.sqrt(b))-3*np.log(1-0.8*b))}
for i,modeldata in shelldata.items():
beta = np.array(shelldata[i]["beta"])
y = np.array(shelldata[i][y_arg[cmd_args.axis]])
if cmd_args.norm:
R0 = np.array(shelldata[i]["R0'"])
Y = y/R0
else:
Y = y
plt.plot(np.log10(beta),np.log10(Y),":",label=i)
if cmd_args.norm:
ylabel = r"$\log\left({}/R'_0\right)$".format(y_lab[cmd_args.axis])
fig_name = "log-beta-vs-log-{}-norm.pdf".format(y_lab[cmd_args.axis])
y_analytic = y_n[cmd_args.axis]
else:
ylabel = r"$\log{}$".format(y_lab[cmd_args.axis])
fig_name = "log-beta-vs-log-{}.pdf".format(y_lab[cmd_args.axis])
y_analytic = y_a[cmd_args.axis]
plt.plot(np.log10(b),y_analytic,"k--",lw=2,alpha=0.5,label="analytic i=0")
plt.legend(loc="best",fontsize="small")
plt.xlabel(r"$\log\beta$")
plt.ylabel(ylabel)
plt.savefig(fig_name)
|
#/bin/bin/python
def randnum()
|
#B
d,mn=map(int,input().split())
tq=list(map(int,input().split()[:mn]))
print(tq[mn-1])
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'users/data/epoch$',views.UserGarminDataEpochView.as_view(), name="epoch_data"),
url(r'users/data/sleep$',views.UserGarminDataSleepView.as_view(), name="sleep_data"),
url(r'users/data/body_composition$',views.UserGarminDataBodyCompositionView.as_view(),
name="body_composition_data"),
url(r'users/data/daily$',views.UserGarminDataDailyView.as_view(), name="daily_data"),
url(r'users/data/activity$',views.UserGarminDataActivityView.as_view(), name="activity_data"),
url(r'users/data/manually_updated$',views.UserGarminDataManuallyUpdatedView.as_view(),
name="manually_updated_data"),
url(r'users/last_synced$', views.UserLastSyncedItemview.as_view(),name="last_synced"),
]
|
from drawingpanel import *
import math
panel = DrawingPanel(500, 500)
panel.set_background("green")
canvas = panel.canvas
def sierpinski_triangle(iterations):
'''First iteration / seed '''
C_Point_Y_Value = 53.5898384862
if iterations >= 0:
''' First Iteration '''
canvas.create_polygon(50, 400, 450, 400, 250, C_Point_Y_Value, fill="black") #Outer triangle
if iterations >= 1:
create_triangle(150, (400 + 53.5898384862)/2, 200) #Inner triangle –> first iteration
if iterations > 1:
''' Recursive functions '''
create_triangle_children(150, (400 + 53.5898384862)/2, 200, iterations - 2)
def create_triangle(x, y, side_length):
canvas.create_polygon(x, y, x + side_length, y, x + (side_length/2), (math.sqrt((3 * side_length **2)/4)) + y, fill="white")
def create_triangle_children(x, y, side_length, iterations):
'''creates an equilateral triangle'''
create_triangle((x + 250)/2, (y + 53.5898384862)/2, side_length/2) #top
create_triangle((x + 450)/2, (y + 400)/2, side_length/2) #right
create_triangle((x + 50)/2, (y + 400)/2, side_length/2) #left
if iterations != 0:
create_triangle_children((x+ 250)/2, (y + 53.5898384862)/2, side_length/2, iterations - 1)
create_triangle_children((x + 450)/2, (y + 400)/2, side_length/2, iterations - 1)
create_triangle_children((x + 50)/2, (y + 400)/2, side_length/2, iterations - 1)
def main():
user_input = ""
while user_input == "":
user_input = int(input("How many iterations would you like to do? "))
sierpinski_triangle(user_input)
main()
|
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import jsonpickle
import dateutil.parser
from .controller_test_base import ControllerTestBase
from ..test_helper import TestHelper
from ytelapi.api_helper import APIHelper
class SharedShortCodeControllerTests(ControllerTestBase):
@classmethod
def setUpClass(cls):
super(SharedShortCodeControllerTests, cls).setUpClass()
cls.controller = cls.api_client.shared_short_code
# Retrieve a list of shortcode assignment associated with your Ytel account.
def test_test_list_shortcodes(self):
# Parameters for the API call
shortcode = None
page = None
pagesize = None
# Perform the API call through the SDK function
result = self.controller.create_list_shortcodes(shortcode, page, pagesize)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['content-type'] = 'application/json'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
# Retrieve a list of keywords associated with your Ytel account.
def test_test_list_keywords(self):
# Parameters for the API call
page = None
pagesize = None
keyword = None
shortcode = None
# Perform the API call through the SDK function
result = self.controller.create_list_keywords(page, pagesize, keyword, shortcode)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['content-type'] = 'application/json'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
# List Shortcode Templates by Type
def test_test_list_templates(self):
# Parameters for the API call
mtype = None
page = None
pagesize = None
shortcode = None
# Perform the API call through the SDK function
result = self.controller.create_list_templates(mtype, page, pagesize, shortcode)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['content-type'] = 'application/json'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
# List All Inbound ShortCode
def test_test_list_inbound_sms(self):
# Parameters for the API call
datecreated = None
page = None
pagesize = None
mfrom = None
shortcode = None
# Perform the API call through the SDK function
result = self.controller.create_list_inbound_sms(datecreated, page, pagesize, mfrom, shortcode)
# Test response code
self.assertEquals(self.response_catcher.response.status_code, 200)
# Test headers
expected_headers = {}
expected_headers['content-type'] = 'application/json'
self.assertTrue(TestHelper.match_headers(expected_headers, self.response_catcher.response.headers))
|
from threading import Thread
import time
import logging
from decimal import *
import decimal
import timeit
logging.basicConfig(level=logging.DEBUG, format='(%(threadName)s) %(message)s', )
# Note: this method taken from
# https://docs.python.org/3/library/decimal.html#recipes
def pi():
"""Compute Pi to the current precision.
>>> print(pi())
3.141592653589793238462643383
"""
getcontext().prec += 2 # extra digits for intermediate steps
three = Decimal(3) # substitute "three=3.0" for regular floats
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while s != lasts:
lasts = s
n, na = n+na, na+8
d, da = d+da, da+32
t = (t * n) / d
s += t
getcontext().prec -= 2
return +s # unary plus applies the new precision.
def calculate():
decimal.getcontext().prec = 30000
start_time = timeit.default_timer()
logging.info('starting execution')
pi()
elapsed = timeit.default_timer() - start_time
logging.info('time taken %s seconds', elapsed)
start_time = timeit.default_timer()
t1 = Thread(target=calculate)
t2 = Thread(target=calculate)
t3 = Thread(target=calculate)
t4 = Thread(target=calculate)
t1.start()
t2.start()
t3.start()
t4.start()
t1.join()
t2.join()
t3.join()
t4.join()
elapsed = timeit.default_timer() - start_time
logging.info('program time taken %s seconds', elapsed) |
#!/usr/bin/python3
from sys import argv
"""
script that adds all arguments to a Python list,
and then save them to a file
"""
save_to_json_file = __import__('7-save_to_json_file').save_to_json_file
load_from_json_file = __import__('8-load_from_json_file').load_from_json_file
my_list = []
try:
my_load = load_from_json_file('add_item.json')
for i in my_load:
my_list.append(i)
except:
my_list = []
for j in range(1, len(argv)):
my_list.append(argv[j])
save_to_json_file(my_list, 'add_item.json')
|
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.utils import six
class TokenGenerator(PasswordResetTokenGenerator):
def _make_hash_value(self, new_user, timestamp):
return (
six.text_type(new_user.pk) + six.text_type(timestamp) +
six.text_type(new_user.is_active)
)
account_activation_token = TokenGenerator() |
#!/usr/bin/env python
from File import File
from LSA import LSA
from Set import Set
from NaiveBayesClassifier import NaiveBayesClassifier
import numpy
import datetime
###############################################################################
# Initializing
###############################################################################
f = File()
print("Data imported.")
MIN_FREQ = 3
MAX_GRAM = 5
P_EIG = 0.95
time_score = []
lsa = []
alpha = [1e-10, 1, 0.5, 0.1, 0.05, 0.01, 0.005]
y = []
yerrormin = []
yerrormax = []
#for mi in min_freq:
# lsa.append(LSA(MAX_GRAM, mi, P_EIG, f.x))
#for ma in max_gram:
# lsa.append(LSA(ma, MIN_FREQ, P_EIG, f.x))
l = LSA(MAX_GRAM, MIN_FREQ, P_EIG, f.x)
test_score = []
print("LSA created.")
###########################
# LSA
human_keywords = l.manage_keywords(f.keywords)
lsa_results = l.train_phrases(human_keywords)
print("LSA Results computed.")
sets = Set(lsa_results, f.y, f.x)
for a in alpha:
print("Parameters: Min_freq =", l.min_freq,"NGram_max =", l.ngram_max, "P_eig =", l.p_eig*100, "alpha = ", a)
for i in range(len(sets.x_train)):
###########################
###########################
# NAIVE BAYES
naive = NaiveBayesClassifier(alpha=a)
naive.train(numpy.array(sets.x_train[i]), sets.y_train[i])
test_score.append(naive.test_score(numpy.array(sets.x_test[i]), numpy.array(sets.y_test[i])))
avg = numpy.round(numpy.average(numpy.array(test_score)), 2)
y.append(avg)
min_ = numpy.round(numpy.array(test_score).min(), 2)
yerrormin.append(numpy.round(avg - min_, 2))
max_ = numpy.round(numpy.array(test_score).max(), 2)
yerrormax.append(numpy.round(max_ - avg, 2))
print("Avg test performance: ", avg)
print(min_)
print(max_)
print('\n'*3)
print("y = ", y)
print("yerrormin = ", yerrormin)
print("yerrormax = ", yerrormax)
|
#coding:gb2312
#使用try-except代码块处理可能引发的异常
print("Give me two numbers,and I'll divide them.")
print("Enter 'S' to quit.")
while True:
first_num = input("Please enter first number: ")
if first_num.upper() == 'S':
break
second_num = input("Please enter second number: ")
try:
answer = int(first_num)/int(second_num)
except ZeroDivisionError:
print("You can't divide by 0!\n\n")
else:
print(str(answer)+"\n\n")
|
# %load q01_cond_prob/build.py
# So that float division is by default in python 2.7
from __future__ import division
import pandas as pd
df = pd.read_csv('data/house_pricing.csv')
# Enter Code Here
def cond_prob(df):
pd.set_option('display.max_columns',500)
all_houses=df.shape[0]
old_town=df[df['Neighborhood']=='OldTown'].shape[0]
c=1
for i in range(3):
c=c*((old_town-i)/(all_houses-i))
return c
print cond_prob(df)
|
# Copyright (c) Members of the EGEE Collaboration. 2004.
# See http://www.eu-egee.org/partners/ for details on the copyright
# holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
from DynamicSchedulerGeneric import Utils as DynSchedUtils
from TestUtils import Workspace
class UtilsTestCase(unittest.TestCase):
def setUp(self):
self.vomap = {"atlasprod": "atlas",
"atlassgm": "atlas",
"dteamgold": "dteam",
"dteamsilver": "dteam",
"dteambronze": "dteam",
"infngridlow": "infngrid",
"infngridmedium": "infngrid",
"infngridhigh": "infngrid"}
self.mjTable = {"atlasprod": 20,
"atlassgm": 30,
"dteamgold": 50,
"dteamsilver": 40,
"dteambronze": 60,
"infngridlow": 110,
"infngridmedium": 120,
"infngridhigh": 130}
def tearDown(self):
pass
def test_getMaxJobsTable_ok(self):
workspace = Workspace(vomap = self.vomap)
workspace.setMaxJobCmd(self.mjTable)
cfgfile = workspace.getConfigurationFile()
config = DynSchedUtils.readConfigurationFromFile(cfgfile)
result = DynSchedUtils.getMaxJobsTable(config)
self.assertTrue('atlas' in result and result['atlas'] == 50
and 'dteam' in result and result['dteam'] == 150
and 'infngrid' in result and result['infngrid'] == 360)
def test_getMaxJobsTable_wrongexit(self):
try:
workspace = Workspace(vomap = self.vomap)
script = """#!/bin/bash
exit 1
"""
workspace.setMaxJobCmd(script)
cfgfile = workspace.getConfigurationFile()
config = DynSchedUtils.readConfigurationFromFile(cfgfile)
result = DynSchedUtils.getMaxJobsTable(config)
except DynSchedUtils.UtilsException, test_error:
msg = str(test_error)
self.assertTrue(msg.startswith("VO max jobs backend command returned"))
def test_getMaxJobsTable_nofile(self):
try:
workspace = Workspace(vomap = self.vomap)
cfgfile = workspace.getConfigurationFile()
config = DynSchedUtils.readConfigurationFromFile(cfgfile)
result = DynSchedUtils.getMaxJobsTable(config)
except DynSchedUtils.UtilsException, test_error:
msg = str(test_error)
self.assertTrue(msg.startswith("Error running"))
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################
# #
# plot_sci_run_trends.py: pdate science run trend plots #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Feb 26, 2021 #
# #
#################################################################################
import os
import sys
import re
import string
import random
import operator
import time
import matplotlib as mpl
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
path = '/data/mta/Script/ACIS/Acis_sci_run/house_keeping/dir_list_py_t'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a private folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- converTimeFormat contains MTA time conversion routines
#
import mta_common_functions as mcf
import acis_sci_run_functions as asrf
#-----------------------------------------------------------------------------------------------
#-- plot_sci_run_trends: pdate science run trend plots --
#-----------------------------------------------------------------------------------------------
def plot_sci_run_trends(tyear=''):
"""
update science run trend plots
input: tyear --- the year of the data
output: <web_dir>Year<year>/<type>_out.png
"""
if tyear == '':
tyear = int(float((time.strftime('%Y', time.gmtime()))))
cout_dir = 'Year' + str(tyear)
#
#--- plot trends for the year
#
plot_events(cout_dir)
#
#--- plot long term trends
#
plot_events('Long_term')
#
#--- update html pages
#
today = time.strftime("%Y:%m:%d", time.gmtime())
atemp = re.split(':', today)
year = int(float(atemp[0]))
month = int(float(atemp[1]))
mday = int(float(atemp[2]))
if year != tyear:
month = 12
mday = 31
asrf.acis_sci_run_print_html(web_dir, tyear, month, mday)
#-----------------------------------------------------------------------------------------------
#--- plot_events: control sub for plotting each data group ---
#-----------------------------------------------------------------------------------------------
def plot_events(data_dir):
"""
control function to create plots for each sub data set
input: data_dir --- the directory name where the data located (e.g. Year2013/)
output: png plot file such as te3_3_out.png
"""
ifile = web_dir + data_dir + '/cc3_3_out'
outname = ifile + '.png'
acis_sci_run_plot(ifile, outname)
ifile = web_dir + data_dir + '/te3_3_out'
outname = ifile + '.png'
acis_sci_run_plot(ifile, outname)
ifile = web_dir + data_dir + '/te5_5_out'
outname = ifile + '.png'
acis_sci_run_plot(ifile, outname)
ifile = web_dir + data_dir + '/te_raw_out'
outname = ifile + '.png'
acis_sci_run_plot(ifile, outname)
#-----------------------------------------------------------------------------------------------
#-- acis_sci_run_plot: sets up the parameters for the given file and create plots ---
#-----------------------------------------------------------------------------------------------
def acis_sci_run_plot(ifile, outname):
"""
this function sets up the parameters for the given file and create plots
input: ifile --- data file name
outname --- plot output file name
output: <outname>.png
"""
#
#--- read input data
#
data = mcf.read_data_file(ifile)
#
#--- if there is no data a copy an "no data" plot
#
if len(data) == 0:
cmd = 'cp ' + house_keeping + 'no_data.png ' + outname
os.system(cmd)
return False
col = []
date_list = []
count_list = []
err_list = []
drop_list = []
xmakerInd = 0 #--- used to mark whether this is a plot for a long term (if so, 1)
for ent in data:
col = re.split('\t+|\s+', ent)
try:
val = float(col[6])
if val > 0:
m = re.search(':', col[1])
#
#--- for each year, change date format to ydate (date format in the data file is: 112:00975.727)
#
if m is not None:
atemp = re.split(':', col[1])
date = float(atemp[0]) + float(atemp[1])/86400.0
#
#---- for the case of long term: the date format is already in a fractional year date
#
else:
date = float(col[1])
xmakerInd = 1
#
#--- convert event rate and error rate in an appropreate units
#
evt = float(col[7])/float(val)/1000.0
err = float(col[8])/float(val)
#
#--- save needed data
#
date_list.append(date)
count_list.append(evt)
err_list.append(err)
drop_list.append(float(col[9]))
except:
pass
if len(date_list) > 0:
#
#--- set plotting range
#
(xmin, xmax) = set_min_max(date_list)
if xmakerInd == 1: #--- if it is a long term, x axis in year (in interger)
xmin = int(xmin)
xmax = int(xmax) + 1
(ymin1, ymax1) = set_min_max(count_list)
#
#--- if the data set is te_raw_out, set the y plotting range to fixed size: 0 - 10
#
m1 = re.search(ifile, 'te_raw_out')
if m1 is not None:
ymin1 = 0
ymax1 = 10
(ymin2, ymax2) = set_min_max(err_list)
(ymin3, ymax3) = set_min_max(drop_list)
yminSet = [ymin1, ymin2, ymin3]
ymaxSet = [ymax1, ymax2, ymax3]
xSets = [date_list, date_list, date_list]
ySets = [count_list, err_list, drop_list]
if xmakerInd == 0:
xname = 'Time (Day of Year)'
else:
xname = 'Time (Year)'
yLabel = ['Events/sec', 'Events/sec', 'Percent']
entLabels= ['Events per Second (Science Run)','Errors (Science Run)','Percentage of Exposures Dropped (Science Run)']
#
#--- calling actual plotting routine
#
plotPanel(xmin, xmax, yminSet, ymaxSet, xSets, ySets, xname, yLabel, entLabels, outname)
#-----------------------------------------------------------------------------------------------
#--- set_min_max: set min and max of plotting range ---
#-----------------------------------------------------------------------------------------------
def set_min_max(data):
"""
set min and max of the plotting range; 10% larger than actual min and max of the data set
Input: data --- one dimentioinal data set
Output (pmin, pmanx): min and max of plotting range
"""
try:
pmin = min(data)
pmax = max(data)
diff = pmax - pmin
pmin = pmin - 0.1 * diff
if pmin < 0:
pmin = 0
pmax = pmax + 0.1 * diff
if pmin == pmax:
pmax = pmin + 1
except:
pmin = 0
pmax = 1
return (pmin, pmax)
#-----------------------------------------------------------------------------------------------
#--- plotPanel: plots multiple data in separate panels ---
#-----------------------------------------------------------------------------------------------
def plotPanel(xmin, xmax, yminSet, ymaxSet, xSets, ySets, xname, yLabel, entLabels, ofile):
"""
This function plots multiple data in separate panels.
Input: xmin, xmax, ymin, ymax: plotting area
xSets: a list of lists containing x-axis data
ySets: a list of lists containing y-axis data
xname: a name of x-axis
yname: a name of y-axis
entLabels: a list of the names of each data
Output: a png plot: out.png
"""
#
#--- set line color list
#
colorList = ('blue', 'green', 'red', 'aqua', 'lime', 'fuchsia', 'maroon', 'black', 'yellow', 'olive')
#
#--- clean up the plotting device
#
plt.close('all')
#
#---- set a few parameters
#
mpl.rcParams['font.size'] = 9
props = font_manager.FontProperties(size=9)
plt.subplots_adjust(hspace=0.08)
tot = len(entLabels)
#
#--- start plotting each data
#
for i in range(0, tot):
axNam = 'ax' + str(i)
#
#--- setting the panel position
#
j = i + 1
if i == 0:
line = str(tot) + '1' + str(j)
else:
line = str(tot) + '1' + str(j) + ', sharex=ax0'
line = str(tot) + '1' + str(j)
exec("%s = plt.subplot(%s)" % (axNam, line))
exec("%s.set_autoscale_on(False)" % (axNam)) #---- these three may not be needed for the new pylab, but
exec("%s.set_xbound(xmin,xmax)" % (axNam)) #---- they are necessary for the older version to set
exec("%s.set_xlim(left=xmin, right=xmax, auto=False)" % (axNam))
exec("%s.set_ylim(bottom=yminSet[i], top=ymaxSet[i], auto=False)" % (axNam))
xdata = xSets[i]
ydata = ySets[i]
#
#---- actual data plotting
#
p, = plt.plot(xdata, ydata, color=colorList[i], lw =0, markersize=4.0, marker='o')
#
#--- add legend
#
leg = legend([p], [entLabels[i]], prop=props, loc=2)
leg.get_frame().set_alpha(0.5)
exec("%s.set_ylabel(yLabel[i], size=8)" % (axNam))
#
#--- add x ticks label only on the last panel
#
for i in range(0, tot):
ax = 'ax' + str(i)
if i != tot-1:
line = eval("%s.get_xticklabels()" % (ax))
for label in line:
label.set_visible(False)
else:
pass
xlabel(xname)
#
#--- set the size of the plotting area in inch (width: 10.0in, height 2.08in x number of panels)
#
fig = matplotlib.pyplot.gcf()
height = (2.00 + 0.08) * tot
fig.set_size_inches(10.0, height)
plt.subplots_adjust(hspace=0.08)
#
#--- save the plot in png format
#
plt.savefig(ofile, format='png', dpi=200)
#--------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) > 1:
tyear = int(float(sys.argv[1]))
else:
tyear = ''
plot_sci_run_trends(tyear)
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('administration', '0005_auto_20191019_0200'),
]
operations = [
migrations.AlterField(
model_name='clients',
name='credit_card_number',
field=models.CharField(max_length=15, verbose_name='Number credit'),
),
]
|
import json
with open('3_2 задача.json', 'w') as to_write:
with open('C:\\Users\\Виктория\\Downloads\\RomeoAndJuliet.json', 'r', encoding='utf-8') as f:
romeo = json.load(f)
for act in romeo['acts']:
for scene in act['scenes']:
set_char = set()
for characters in scene['action']:
set_char.add(characters['character'])
to_write.write(json.dumps(list(set_char), ensure_ascii=False))
to_write.write('\n')
|
import smtplib
import os
import datetime
import dropbox
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from dotenv import load_dotenv
load_dotenv()
input("\nUnlock then relock here: https://www.google.com/settings/security/lesssecureapps \n")
my_email = os.environ.get("MY_EMAIL")
my_password = os.environ.get("MY_EMAIL_PASSWORD")
dropbox_api_key = os.environ.get("DROPBOX_API_KEY")
dbx = dropbox.Dropbox(dropbox_api_key)
dashboard_name = input("Dashboard file name (with extension): ")
version = input("Dashboard version: ")
send_to = input("Send to (email): ")
# Upload to dropbox
with open("../Builds/{}".format(dashboard_name), 'rb') as f:
print("\nUploading to dropbox...")
dbx.files_upload(f.read(), "/Home Dashboards/{}".format(dashboard_name))
print("Done!")
link = dbx.sharing_create_shared_link(path="/Home Dashboards/{}".format(dashboard_name)).url
# Construct the email
email_text = """Download and install from here:\n{}\n\nTo install you'll have to browse to the downloaded file on your tablet in file explorer and install it from there.\n\nWhats new: https://github.com/iamtomhewitt/home-dashboard/blob/master/CHANGELOG.md\n\nTom""".format(link)
msg = MIMEMultipart()
msg['From'] = my_email
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = "Home Dashboard " + version
msg.attach(MIMEText(email_text))
# Now send the email
print("\nSending email to {}".format(send_to))
smtp = smtplib.SMTP('smtp.gmail.com')
smtp.starttls()
smtp.login(my_email, my_password)
smtp.sendmail(my_email, send_to, msg.as_string())
smtp.close()
# Log it
print ("Sent email to {} at {}".format(send_to, datetime.datetime.now())) |
from django.db import models
# from django.utils import timezones
from django.contrib.auth.models import User
class Meetups(models.Model):
# No = models.IntegerField()
City = models.CharField(max_length=100)
Venue = models.TextField()
Time = models.TextField()
Theme = models.TextField()
MOderator = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.Name |
from django.contrib import admin
# from rangefilter.filter import DateRangeFilter
from mpesa_api.core.models import (
AuthToken,
B2CRequest,
B2CResponse,
C2BRequest,
OnlineCheckout,
OnlineCheckoutResponse,
)
admin.site.register(AuthToken)
@admin.register(B2CRequest)
class B2CRequestAdmin(admin.ModelAdmin):
list_display = ("phone", "amount")
readonly_fields = (
"phone",
"amount",
"conversation_id",
"originator_conversation_id",
"response_code",
"response_description",
"request_id",
"error_code",
"error_message",
"date_added",
)
search_fields = ("phone",)
# list_filter = (("date_added", DateRangeFilter),)
@admin.register(B2CResponse)
class B2CResponseAdmin(admin.ModelAdmin):
list_display = (
"phone",
"amount",
"transaction_receipt",
"mpesa_user_name",
)
readonly_fields = (
"phone",
"amount",
"conversation_id",
"originator_conversation_id",
"result_type",
"result_code",
"result_description",
"transaction_id",
"transaction_receipt",
"transaction_amount",
"working_funds",
"utility_funds",
"paid_account_funds",
"transaction_date",
"mpesa_user_name",
"is_registered_customer",
)
search_fields = ("phone", "transaction_receipt", "mpesa_user_name")
# list_filter = (("transaction_date", DateRangeFilter),)
@admin.register(C2BRequest)
class C2BRequestAdmin(admin.ModelAdmin):
list_display = (
"phone",
"amount",
"name",
"transaction_id",
"transaction_date",
)
readonly_fields = (
"phone",
"amount",
"name",
"transaction_id",
"transaction_date",
"business_short_code",
"bill_ref_number",
"invoice_number",
"org_account_balance",
"third_party_trans_id",
"is_validated",
"is_completed",
"date_added",
)
search_fields = ("phone", "transaction_id", "name")
# list_filter = (
# ("transaction_date", DateRangeFilter),
# ("date_added", DateRangeFilter),
# )
@admin.register(OnlineCheckout)
class OnlineCheckoutAdmin(admin.ModelAdmin):
list_display = ("phone", "amount", "date_added")
readonly_fields = (
"phone",
"amount",
"checkout_request_id",
"account_reference",
"transaction_description",
"customer_message",
"merchant_request_id",
"response_code",
"response_description",
"date_added",
)
search_fields = ("phone", "amount", "date_added")
# list_filter = (("date_added", DateRangeFilter),)
@admin.register(OnlineCheckoutResponse)
class OnlineCheckoutResponseAdmin(admin.ModelAdmin):
list_display = (
"phone",
"amount",
"mpesa_receipt_number",
"transaction_date",
)
# comment_Out_4_tests
# readonly_fields = (
# "phone",
# "amount",
# "transaction_date",
# "mpesa_receipt_number",
# "result_description",
# "result_code",
# "checkout_request_id",
# "merchant_request_id",
# "date_added",
# )
search_fields = ("phone", "amount", "date_added", "mpesa_receipt_number")
# list_filter = (
# ("transaction_date", DateRangeFilter),
# ("date_added", DateRangeFilter),
# )
|
import numpy as np
import cv2
import math
def main():
gry = cv2.imread('lena.jpg', 0)
width, height = gry.shape
out = np.zeros((height, width), dtype = np.uint8)
a = 2
b = -3
c = -1000
shuki = 20
noise_strength = 20
for w in range(width):
for h in range(height):
dist = math.fabs(a * w + b * h + c) / math.sqrt(a ** 2 + b ** 2)
out[h][w] = gry[h][w] + noise_strength * math.sin(2 * math.pi / shuki * dist)
cv2.imwrite('lena_sin_noise.jpg', out)
main()
|
from random import randint
from boto3 import resource
db = resource("dynamodb", region_name="us-east-1").Table("FortunesServerless")
def get(event, context):
fortune = db.get_item(
Key = {"id": randint(0, db.scan()["Count"]-1)}
)["Item"]["fortune"]
return {
"isBase64Encoded": False,
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": "*"
},
"body": fortune
}
|
#File: hw2_part1.py
#Author: Joel Okpara
#Date: 2/14/2016
#Lab Section: 04
#UMBC Email: joelo1@umbc.edu
#Description: This program contains HW2 problems 1-10
#Question 1
#Expected Output: 55
num1 = (7 + 4) * 5
print("Num1 evaluates to:",num1)
#Actual Output: 55
#Explanation: Parentheses first (11), then multiply by 5 = 55
#Question 2
#Expected Output: 1
num2 =(15 % 7)
print("Num2 evaluates to:",num2)
#Actual Output: 1
#Explanation: 15 / 7 = 2 remainder 1 so 15 % 7 = 1
#Question 3
#Expected Output: 32
num3 = (32 % 36)
print("Num3 evaluates to:",num3)
#Actual Output: 32
#Explanation: 36 cannot go into 32
#Question 4
#Expected Output: 12
num4 = (5 - 3) + (10 - 5) * (8 % 3)
print("Num4 evaluates to:",num4)
#Actual Output: 12
#Explanation: Doing parentheses first will give us
# (2) + (5) * (2) [because 8 % 3 = 2]
# Multiplication next = 2 + 10
# = 12
#Question 5
#Expected Output: 4.5
num5 = 21 / 7 / 4 * (3 + 3)
print("Num5 evaluates to:", num5)
#Actual Output: 4.5
#Explanation: Parentheses first = 21 / 7 / 4 * 6
# All of these are equal operations so go left to right
# 21 / 7 / 4 * 6
# = 3 / 4 * 6
# = 0.75 *6
# = 4.5
#Question 6
#Expected Output: 14
num6 = 9 / 3 + 21 - 5 * 2
print("Num6 evaluates to:",num6)
#Actual Output: 14.0
#Explanation: Division and Miltiplication first will give us
# 3 + 21 - 10
# = 14
#Question 7
#Expected Output: 14
num7 = 7 % 5 + 6 * 2
print("Num7 evaluates to:",num7)
#Actual Output: 14
#Explanation: Mod and Multiplacation first will give us
# 2 + 12
# = 14
#Question 8
#Expected Output: 17.3
num8 = 35.2 / 2.3 + (332 % 33)
print ("Num8 evaluates to:",num8)
#Actual Output: 17.30434782608696
#Explanation: Parentheses first gives us
# 35.2 / 2.3 + 2
# 15.3 + 2
# = 17.3
# I stopped at one decimal point
#Question 9
#Given Equation: 55 / 10 + 45 / 0.2
#Solved Equation: 55 / (10 + 45)/ 0.2
#Target Number: 5.0
num9 = 55 / (10 + 45) / 0.2
print("Num9 evaluates to:",num9, "and should be", 5.0)
#Question 10
#Given Equation: 65 // 20 + 10 - 4 % 4
#Solved Equation: 65 // (20 + 10) - 4 % 4
#Target Number: 2
num10 = 65 // (20 + 10) - 4 % 4
print ("Num10 evaluates to:", num10, "and should be", 2)
|
import subprocess
def run_code(code):
try:
output=subprocess.check_output(['python','-c',code],universal_newlines=True,stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
output=e.output
except subprocess.TimeoutExpired as e:
output='\r\n'.join(['Time Out!!!',e.output])
return output
code="""print('Test success')"""
print(run_code(code)) |
__version__ = '0.0.2'
__license__ = 'MIT'
def main():
import article, argparse
# initialize parser
parser = argparse.ArgumentParser(prog='article_gen')
parser.formatter_class = argparse.RawTextHelpFormatter
parser.description = 'automatic article generator by MaxXing\n' + \
'based on jieba and Hidden Markov Model (HMM)'
parser.add_argument('file', metavar='FILE', nargs='*',
help='list of text files or directories')
parser.add_argument('-s', '--seed', default=-1, type=int,
help='specify a random seed (unsigned integer)')
parser.add_argument('-len', '--length', default=0, type=int,
help='specify the length of article')
parser.add_argument('-d', '--dump', default='',
help='dump the word data from input file')
parser.add_argument('-l', '--load', default='',
help='load word data from dump file')
parser.add_argument('-pp', '--proportion', default=0.5, type=float,
help='set the proportion of impact of parts of speech')
parser.add_argument('-p', '--print', action='store_true',
help='print progress information while scanning file')
parser.add_argument('-sep', '--separator', action='store_const',
const=' ', default='',
help='add separators between every two words')
parser.add_argument('-u', '--uniform', action='store_true',
help='use uniform distribution model instead of HMM')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__)
# parse arguments
args = parser.parse_args()
if not args.file and not args.load:
parser.print_help()
exit(0)
# initialize generator
gen = article.ArticleGen(
show_progress=args.print,
separator=args.separator,
use_weight=not args.uniform,
pos_prop=args.proportion)
# generate seed
if args.seed > -1:
s = gen.new_seed(args.seed)
else:
s = gen.seed()
print('seed:', s)
# load data
if args.load:
gen.dump_file = args.load
if not gen.load_dump():
parser.error('cannot read word from dump')
else:
if not gen.load(args.file):
parser.error('cannot read word from input files')
if args.dump:
gen.dump_file = args.dump
gen.dump()
# generate article
text = gen.generate(args.length)
print(text)
|
import pyotherside
def test_func_one():
# Test function that returns string from python script
return "world from python..."
def test_func_two(argument):
# Test function that returns string from python script
return "world from python... but with argument { " + str(argument) + " }"
|
#!/usr/bin/python
from datasport import SerieD
import sys
gironi={
'2015' : [15523,15524,15525,15526,15527,15528,15529,15530,15531],
'2016' : [16348,16349,16350,16351,16351,16353,16354,16355,16368]
}
girone=ord(sys.argv[1].upper())-65
anno=sys.argv[2]
g=0
while g==0:
p=SerieD(gironi[anno][girone],sys.argv[3])
g=p.parser()
if g!=0:
for p in g:
print p
|
# Exercício 7.1 - Livro
s1 = str(input('Digite a primeira string: ')).upper()
s2 = str(input('Digite a segunda string: ')).upper()
pos = s1.find(s2)
if pos >= 0:
print(f'{s2} encontrada na posição {pos}')
else:
print('Nada foi encontrado!')
|
from customers.Aurora.provider.provider_mappings import CREDENTIALS, provider_type_map
from lib.master_fake_data_generator import FakeDataGenerator
class AURORAProviderFakeDataGenerator(FakeDataGenerator):
def generate_pipeline_row(self, row: str, file_size: int) -> dict:
f = self._faker
r = self._random
provider_line = {
"PROV_ID": f"{row + 1}",
"PROV_NAME": f.name(),
"CREDENTIALS": f.random_element(CREDENTIALS),
"NPI": self.random_or_empty(f.random_number(10)),
"PROV_TYPE": f.random_element(list(provider_type_map)),
"PROCESS_EXTRACT_DTTM": f"{self.get_current_date()}:{r.randint(100,999)}-05:00"
}
return provider_line
|
#coding=utf-8
"""
Extra things to make the discord library nicer
"""
from discord import errors
async def safeSend(channel, text=None, embed=None):
"""
Send a text / embed message (one or the other, not both) to a
user, and if an error occurs, safely supress it
On failure, returns:
-1 : Nothing to send (text & embed are `None`)
-2 : Forbidden
-3 : HTTPException
-4 : InvalidArgument
On success returns what the channel.send method returns
"""
try:
if text:
return await channel.send(text)
elif embed:
return await channel.send(embed=embed)
else:
return -1
except errors.Forbidden:
return -2 # API down, Message too big, etc.
except errors.HTTPException:
return -3 # No permission to message this channel
except errors.InvalidArgument:
return -4 # Invalid channel ID (channel deleted)
|
import turtle
tur=turtle.Turtle()
scr=turtle.Screen()
scr.bgcolor('black')
#tur.pencolor('white')
x=30
y=0
Color =['red', 'purple', 'blue', 'green']
tur.speed(0)
tur.penup()
#tur.goto(0,200)
tur.pendown()
while True:
tur.circle(x)
tur.pencolor(Color[y%len(Color)])
tur.forward(x)
tur.right(90)
x +=1
y +=1
if y==1000:
break
turtle.done()
|
'''
Underlying platform implementation for kernel debugging
with vmware gdbserver.
Msv1_0SubAuthenticationRoutine
VMWare config options...
debugStub.listen.guest64 = "TRUE" # ends up on port 8864 (or next avail)
# 32 bit target.... ( defaults to port 8832 )
debugStub.listen.guest32 = "TRUE"
debugStub.listen.guest32.remote = "TRUE" # bind to 0.0.0.0 rather than 127.0.0.1
debugStub.hideBreakpoints = "TRUE" # Enable breakpoints
# 64 bit target.... ( defaults to port 8864 )
debugStub.listen.guest64 = "TRUE"
debugStub.listen.guest64.remote = "TRUE" # bind to 0.0.0.0 rather than 127.0.0.1
debugStub.hideBreakpoints = "TRUE" # Enable breakpoints
'''
import PE
import vtrace
import envi.bits as e_bits
import envi.resolver as e_resolv
import vtrace.archs.i386 as vt_i386
import vtrace.platforms.base as vt_base
import vtrace.platforms.win32 as vt_win32
import vtrace.platforms.winkern as vt_winkern
import vtrace.platforms.gdbstub as vt_gdbstub
class VMWareMixin(vt_gdbstub.GdbStubMixin):
def __init__(self, host=None, port=None):
vt_gdbstub.GdbStubMixin.__init__(self, host=host, port=port)
self.bigmask = e_bits.u_maxes[ self.getPointerSize() ]
class VMWare32WindowsTrace(
vtrace.Trace,
VMWareMixin,
vt_i386.i386Mixin,
vt_base.TracerBase,
):
def __init__(self, host=None, port=None):
vtrace.Trace.__init__(self, archname='i386')
vt_base.TracerBase.__init__(self)
vt_i386.i386Mixin.__init__(self)
VMWareMixin.__init__(self, host=host, port=port)
self.setMeta('Format','pe')
self.setMeta('Platform','Windows')
self._break_after_bp = False # we stop directly on the bp addr
def _getVmwareReg(self, rname):
'''
Use VMWare's monitor extension to get a register we wouldn't
normally have...
'''
#fs 0x30 base 0xffdff000 limit 0x00001fff type 0x3 s 1 dpl 0 p 1 db 1
fsstr = self._monitorCommand('r %s' % rname)
fsparts = fsstr.split()
return int(fsparts[3], 16)
def _gdbJustAttached(self):
# Implement the callback from the GdbStubMixin parent...
fsbase = self._getVmwareReg('fs')
fs_fields = self.readMemoryFormat(fsbase, '<8I')
# Windows has a self reference in the KPCR...
if fs_fields[7] != fsbase:
print [ hex(x) for x in fs_fields ]
raise Exception('poi(fsbase+(ptr*7)) != fsbase! ( not actually windows? )')
import vstruct.defs.windows.win_5_1_i386.ntoskrnl as vs_w_ntoskrnl
self.vsbuilder.addVStructNamespace('nt', vs_w_ntoskrnl)
self.casesens = False
kpcr = self.getStruct('nt.KPCR', fsbase)
kver = self.getStruct('nt.DBGKD_GET_VERSION64', kpcr.KdVersionBlock)
#print kpcr.tree()
print kver.tree()
winver = vt_winkern.win_builds.get( kver.MinorVersion )
if winver == None:
winver = 'Untested Windows Build! (%d)' % kver.MinorVersion
print('vtrace (vmware32): Windows Version: %s' % winver)
kernbase = kver.KernBase & self.bigmask
modlist = kver.PsLoadedModuleList & self.bigmask
self.setVariable('kpcr', fsbase)
self.setVariable('KernelBase', kernbase)
self.setVariable('PsLoadedModuleList', modlist)
#self.platformParseBinary = self.platformParseBinaryPe
self.fireNotifiers(vtrace.NOTIFY_ATTACH)
self.addLibraryBase('nt', kernbase, always=True)
ldr_entry = self.readMemoryFormat(modlist, '<I')[0]
while ldr_entry != modlist:
ldte = self.getStruct('nt.LDR_DATA_TABLE_ENTRY', ldr_entry)
try:
dllname = self.readMemory(ldte.FullDllName.Buffer, ldte.FullDllName.Length).decode('utf-16le')
dllbase = ldte.DllBase & self.bigmask
self.addLibraryBase(dllname, dllbase, always=True)
except Exception, e:
print('Trouble while parsing one...')
ldr_entry = ldte.InLoadOrderLinks.Flink & self.bigmask
vt_winkern.addBugCheckBreaks(self)
def normFileName(self, libname):
basename = libname.split('\\')[-1]
return basename.split(".")[0].split("-")[0].lower()
def platformParseBinary(self, filename, baseaddr, normname):
try:
pe = PE.peFromMemoryObject(self, baseaddr)
for rva, ord, name in pe.getExports():
self.addSymbol(e_resolv.Symbol(name, baseaddr+rva, 0, normname))
except Exception, e:
print('Error Parsing Binary (%s): %s' % (normname, e))
def buildNewTrace(self):
return VMWare32WindowsTrace( host=self._gdb_host, port=self._gdb_port )
# FIXME move these to gdbstub
def isValidPointer(self, addr):
# Fake this out by attempting to read... ( slow/lame )
cmd = 'm%x,%x' % (addr, 1)
pkt = self._cmdTransact(cmd)
return not pkt.startswith('E')
def archActivBreakpoint(self, addr):
self._gdbAddMemBreak(addr, 1)
def archClearBreakpoint(self, addr):
self._gdbDelMemBreak(addr, 1)
|
#!/usr/bin/env python3
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
from amaranth import *
from amaranth.hdl.ast import Fell
from usb_protocol.emitters import SuperSpeedDeviceDescriptorCollection
from luna import top_level_cli
from luna.gateware.platform import NullPin
from luna.gateware.usb.devices.ila import USBIntegratedLogicAnalyer, USBIntegratedLogicAnalyzerFrontend
from luna.usb3 import USBSuperSpeedDevice, SuperSpeedStreamInEndpoint
class USBSuperSpeedExample(Elaboratable):
""" Simple example of a USB SuperSpeed device using the LUNA framework. """
BULK_ENDPOINT_NUMBER = 1
MAX_BULK_PACKET_SIZE = 1024
def create_descriptors(self):
""" Create the descriptors we want to use for our device. """
descriptors = SuperSpeedDeviceDescriptorCollection()
#
# We'll add the major components of the descriptors we we want.
# The collection we build here will be necessary to create a standard endpoint.
#
# We'll need a device descriptor...
with descriptors.DeviceDescriptor() as d:
d.idVendor = 0x16d0
d.idProduct = 0xf3b
# We're complying with the USB 3.2 standard.
d.bcdUSB = 3.2
# USB3 requires this to be "9", to indicate 2 ** 9, or 512B.
d.bMaxPacketSize0 = 9
d.iManufacturer = "LUNA"
d.iProduct = "SuperSpeed Bulk Test"
d.iSerialNumber = "1234"
d.bNumConfigurations = 1
# ... and a description of the USB configuration we'll provide.
with descriptors.ConfigurationDescriptor() as c:
c.bMaxPower = 50
with c.InterfaceDescriptor() as i:
i.bInterfaceNumber = 0
with i.EndpointDescriptor(add_default_superspeed=True) as e:
e.bEndpointAddress = 0x80 | self.BULK_ENDPOINT_NUMBER
e.wMaxPacketSize = self.MAX_BULK_PACKET_SIZE
return descriptors
def elaborate(self, platform):
m = Module()
# Generate our domain clocks/resets.
m.submodules.car = platform.clock_domain_generator()
# Create our core PIPE PHY. Since PHY configuration is per-board, we'll just ask
# our platform for a pre-configured USB3 PHY.
m.submodules.phy = phy = platform.create_usb3_phy()
# Create our core SuperSpeed device.
m.submodules.usb = usb = USBSuperSpeedDevice(phy=phy)
# Add our standard control endpoint to the device.
descriptors = self.create_descriptors()
usb.add_standard_control_endpoint(descriptors)
# Create our example bulk endpoint.
stream_in_ep = SuperSpeedStreamInEndpoint(
endpoint_number=self.BULK_ENDPOINT_NUMBER,
max_packet_size=self.MAX_BULK_PACKET_SIZE
)
usb.add_endpoint(stream_in_ep)
# Create a simple, monotonically-increasing data stream, and connect that up to
# to our streaming endpoint.
counter = Signal(16)
stream_in = stream_in_ep.stream
# Always provide our counter as the input to our stream; it will be consumed
# whenever our stream endpoint can accept it.
m.d.comb += [
stream_in.data .eq(counter),
stream_in.valid .eq(0b1111)
]
# Increment our counter whenever our endpoint is accepting data.
with m.If(stream_in.ready):
m.d.ss += counter.eq(counter + 1)
# Return our elaborated module.
return m
if __name__ == "__main__":
top_level_cli(USBSuperSpeedExample)
|
from typing import Dict
def save_row(text: str, n2: int, p2: int, smile_dict: Dict[str, str], line: str) -> None:
"""
Функция для решения задачи № 3:
выгрузить в отдельный файл строку, которая содержит
частицу "не" или "ни" и смайлик
:param text: str
:param n2: int
:param p2: int
:param smile_dict: Dict[str, str]
:param line: str
:return: None
"""
text_set = set(text.lower().split(" "))
if n2 > 0 and p2 > 0:
smiles = "\t".join(smile_dict["positive"] + smile_dict["negative"])
with open("files_result/not_token.csv", "a") as file3:
# "не" + "ни" + any smile
if "не" in text_set and "ни" in text_set:
file3.write(f'{line}"|"не,ни"|"{smiles}\n')
# "не" + any smile
elif "не" in text_set:
file3.write(f'{line}"|"не"|"{smiles}\n')
# "ни" + any smile
elif "ни" in text_set:
file3.write(f'{line}"|"ни"|"{smiles}\n')
|
# =============================================================================
# Copyright (c) 2001-2018 FLIR Systems, Inc. All Rights Reserved.
#
# This software is the confidential and proprietary information of FLIR
# Integrated Imaging Solutions, Inc. ("Confidential Information"). You
# shall not disclose such Confidential Information and shall use it only in
# accordance with the terms of the license agreement you entered into
# with FLIR Integrated Imaging Solutions, Inc. (FLIR).
#
# FLIR MAKES NO REPRESENTATIONS OR WARRANTIES ABOUT THE SUITABILITY OF THE
# SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT. FLIR SHALL NOT BE LIABLE FOR ANY DAMAGES
# SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING
# THIS SOFTWARE OR ITS DERIVATIVES.
# =============================================================================
import PyCapture2
def print_build_info():
lib_ver = PyCapture2.getLibraryVersion()
print('PyCapture2 library version: %d %d %d %d' % (lib_ver[0], lib_ver[1], lib_ver[2], lib_ver[3]))
print()
def print_camera_info(cam):
cam_info = cam.getCameraInfo()
print('\n*** CAMERA INFORMATION ***\n')
print('Serial number - %d', cam_info.serialNumber)
print('Camera model - %s', cam_info.modelName)
print('Camera vendor - %s', cam_info.vendorName)
print('Sensor - %s', cam_info.sensorInfo)
print('Resolution - %s', cam_info.sensorResolution)
print('Firmware version - %s', cam_info.firmwareVersion)
print('Firmware build time - %s', cam_info.firmwareBuildTime)
print()
def save_video_helper(cam, file_format, filename, framerate):
num_images = 200
video = PyCapture2.FlyCapture2Video()
for i in range(num_images):
print('index ', i)
try:
print("before retrievebuffer")
image = cam.retrieveBuffer()
print("after retrievebuffer")
except PyCapture2.Fc2error as fc2Err:
print('Error retrieving buffer : %s' % fc2Err)
continue
print('Grabbed image {}'.format(i))
if (i == 0):
if file_format == 'AVI':
video.AVIOpen(filename, framerate)
elif file_format == 'MJPG':
video.MJPGOpen(filename, framerate, 75)
elif file_format == 'H264':
video.H264Open(filename, framerate, image.getCols(), image.getRows(), 1000000)
else:
print('Specified format is not available.')
return
video.append(image)
print('Appended image %d...' % i)
print('Appended {} images to {} file: {}...'.format(num_images, file_format, filename))
video.close()
#
# Example Main
#
# Print PyCapture2 Library Information
print_build_info()
# Ensure sufficient cameras are found
bus = PyCapture2.BusManager()
num_cams = bus.getNumOfCameras()
print('Number of cameras detected: %d' % num_cams)
if not num_cams:
print('Insufficient number of cameras. Exiting...')
exit()
cam = PyCapture2.Camera()
cam.connect(bus.getCameraFromIndex(0))
print_camera_info(cam)
cam.startCapture()
# print('Detecting frame rate from Camera')
# fRateProp = cam.getProperty(PyCapture2.PROPERTY_TYPE.FRAME_RATE)
# framerate = fRateProp.absValue
framerate = 10
file_format = 'H264'
filename = 'SaveImageToAviEx_{}.avi'.format(file_format)
save_video_helper(cam, file_format, filename.encode('utf-8'), framerate)
print('Stopping capture...')
cam.stopCapture()
cam.disconnect()
input('Done! Press Enter to exit...\n') |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'lotto.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
MainWindow.setStyleSheet("* {\n"
" font-size: 15px;\n"
" font-weight: bold;\n"
"}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(60, 60, 151, 16))
self.label.setObjectName("label")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(60, 140, 331, 41))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lineEdit_4 = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_4.setObjectName("lineEdit_4")
self.horizontalLayout_2.addWidget(self.lineEdit_4)
self.lineEdit_3 = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_3.setObjectName("lineEdit_3")
self.horizontalLayout_2.addWidget(self.lineEdit_3)
self.lineEdit_6 = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_6.setObjectName("lineEdit_6")
self.horizontalLayout_2.addWidget(self.lineEdit_6)
self.lineEdit_5 = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_5.setObjectName("lineEdit_5")
self.horizontalLayout_2.addWidget(self.lineEdit_5)
self.lineEdit_2 = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit_2.setObjectName("lineEdit_2")
self.horizontalLayout_2.addWidget(self.lineEdit_2)
self.lineEdit = QtWidgets.QLineEdit(self.horizontalLayoutWidget)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout_2.addWidget(self.lineEdit)
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(60, 390, 391, 41))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.lineEdit_13 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_13.setObjectName("lineEdit_13")
self.horizontalLayout_3.addWidget(self.lineEdit_13)
self.lineEdit_12 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_12.setObjectName("lineEdit_12")
self.horizontalLayout_3.addWidget(self.lineEdit_12)
self.lineEdit_11 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_11.setObjectName("lineEdit_11")
self.horizontalLayout_3.addWidget(self.lineEdit_11)
self.lineEdit_10 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_10.setObjectName("lineEdit_10")
self.horizontalLayout_3.addWidget(self.lineEdit_10)
self.lineEdit_9 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_9.setObjectName("lineEdit_9")
self.horizontalLayout_3.addWidget(self.lineEdit_9)
self.lineEdit_8 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_8.setObjectName("lineEdit_8")
self.horizontalLayout_3.addWidget(self.lineEdit_8)
self.lineEdit_7 = QtWidgets.QLineEdit(self.horizontalLayoutWidget_2)
self.lineEdit_7.setObjectName("lineEdit_7")
self.horizontalLayout_3.addWidget(self.lineEdit_7)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(60, 310, 151, 16))
self.label_2.setObjectName("label_2")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(60, 90, 91, 41))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(60, 340, 91, 41))
self.pushButton_2.setObjectName("pushButton_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(60, 200, 601, 21))
self.label_3.setText("")
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(60, 450, 601, 21))
self.label_4.setText("")
self.label_4.setObjectName("label_4")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Onsdags Lotto:"))
self.label_2.setText(_translate("MainWindow", "Lørdags Lotto:"))
self.pushButton.setText(_translate("MainWindow", "Tjek Tal"))
self.pushButton_2.setText(_translate("MainWindow", "Tjek Tal"))
|
class Cup1:
def __init__(self):
self.color = None # public variable
self.content = None #public variable
def fill(self,beverage):
self.content = beverage
def empty(self):
self.content = None
def __str__(self):
return self.color + " " + self.content
cup1 = Cup1()
cup1.color = "red"
cup1.content = "tea"
print(cup1)
cup1.empty()
cup1.content = "coffee"
print(cup1)
class Cup2:
def __init__(self):
self.color = None # public variable
self._content = None # protected variable
# korumalı üye C# gibi dillerde miras alınan sınıflarda görünsede python dilinde
# tel bir _ ç,zgisi varsa bana dokunma anlamına gelir
def fill(self,bevarage):
self._content = bevarage
def empty(self):
self._content = None
def __str__(self):
return self.color + " " + self._content
cup2 = Cup2()
cup2.color = "blue"
cup2._content = "tea"
print(cup2)
class Cup3:
def __init__(self,color):
self._color = color # protected variable
self.__content = None # private variable
def fill(self,bevarage):
self.__content = bevarage;
def empty(self):
self.__content = None
def __str__(self):
return self._color + " " + self.__content
cup3 = Cup3("blue")
#cup3._Cup3__content = "tea"
cup3._Cup3__content = "çay"
print(cup3) |
import re
from rest_framework import serializers
def correctness_struct_serial(value):
regex = '[GMS][0-9]{3}'
pattern_object = re.compile(regex)
is_match = pattern_object.match(value)
if not is_match:
serializers.ValidationError("El serial del dispositivo no es valido")
|
def power(x, n): # Function name, arguments/parameters
ans = 1
for i in range(0, n):
ans = ans * x
return ans # Return statement exits and returns a value.
# Passing values to functions - When we call a function we have to pass values for the arguments and this is done exactly the same way as assigning a value to a name
print(power(3, 5)) # Like an implicit assignment x = 3 and n = 5
# Same rules apply for mutable and immutable values
# Immutable values will not be affected at calling point
# Mutable values will be affected
def update(list, i, v):
if i >= 0 and i < len(list):
list[i] = v
return True
else:
v = v + 1
return False
ns = [3, 11, 12]
z = 8
print(update(ns, 2, z))
print(ns) # If we pass through parameter a value which is mutable it can get updated in the function and this is sometimes called a side effect.
print(update(ns, 4, z))
print(z) # If we pass through parameter a value which is immutable then the value doesn't change no matter what we do inside the function
# Return value may be ignored. If there is no return, function ends when last statement is reached. For example, a function which displays an error or warning message. Such a function just has to display a message and not compute or return anything.
# Scope of names
# Names within a function have local scope i.e. names within a function are disjoint from names outside a function.
def stupid(x):
n = 17
return x
n = 7
print(stupid(28))
print(n)
# A function must be defined before it is invoked
def f(x):
return g(x + 1)
def g(y):
return y + 3
print(f(77))
# If we define funtion g after invoking function f we will get an error saying NameError: name 'g' is not defined
# A function can call itself - recursion
def factorial(n):
if n <= 0:
return 1
else:
return n * factorial(n - 1)
print(factorial(5))
# Functions are a good way to organise code in logical chunks
# Passing arguments to a function is like assigning values to names
# Only mutable values can be updated
# Names in functions have local scope
# Functions must be defined before use
# Recursion - a function can call itself
|
with open(r"C:\Users\admin\OneDrive\デスクトップ\python1\07\20k1026-06-turtle.txt") as file:
import turtle
t = turtle.Pen()
for line in file:
data = line.split(":")
if data[0] == "FORWARD":
t.forward(float(data[1]))
if data[0] == "LEFT":
t.left(float(data[1]))
if data[0] == "RIGHT":
t.right(float(data[1]))
if data[0] == "UP":
t.up()
if data[0] == "DOWN":
t.down()
if data[0] == "GOTO":
t.goto(float(data[1]),float(data[2]))
# |
from pathlib import Path
import pandas as pd
import shutil
import os
WORKING_FILES = Path(r"working_files")
HTML_1 = r'<html><body>'
HTML_2 = r'</body></html>'
PARENT = WORKING_FILES.parent
RESULT = PARENT / 'result'
if RESULT.exists():
shutil.rmtree(RESULT)
os.mkdir(RESULT)
for filepath in WORKING_FILES.rglob("*.xlsx"):
sub_folder_name = filepath.stem
sub_folder_path = RESULT / sub_folder_name
if not sub_folder_path.exists():
os.mkdir(sub_folder_path)
df = pd.read_excel(filepath, dtype=object, use_default_na=False)
for index, row in df.iterrows():
new_name = 'new_name'
suffix = 'suffix'
english_html = row['HTML']
file_num = index + 2
filename = "_".join([str(file_num), new_name, suffix]) + '.doc'
filepath = sub_folder_path / filename
with open(filepath, 'w', encoding="utf-8") as f:
f.write(HTML_1)
f.write(english_html)
f.write(HTML_2)
|
import sqlite3
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMainWindow, QTableWidgetItem, QWidget
from PyQt5.QtWidgets import QMessageBox
from main_design import Ui_MainWindow
from addEditCoffeeForm import Ui_Form
class CafeCoffee(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.id = 1
self.setupUi(self)
self.initUI()
def initUI(self):
self.setWindowTitle('Кафе «Кофе»')
self.add_btn.clicked.connect(lambda: self.widget_show(1))
self.edit_btn.clicked.connect(lambda: self.widget_show(0) if
self.coffee_table.selectedItems()
else QMessageBox.critical(self,
"Ошибка ", "Выберите элемент", QMessageBox.Ok))
self.update_btn.clicked.connect(self.initialization)
self.initialization()
def initialization(self):
connection = sqlite3.connect("./data/coffee.sqlite")
cursor = connection.cursor()
self.data = [list(i) for i in cursor.execute("SELECT * FROM coffee")]
length = len(self.data)
self.coffee_table.setRowCount(length)
for i in range(length):
self.coffee_table.setItem(i, 0, QTableWidgetItem(str(self.data[i][0])))
sql_request = f"SELECT DISTINCT sorts.title FROM coffee LEFT JOIN sorts ON " \
f"sorts.id = coffee.sort_id WHERE sorts.id = {self.data[i][1]}"
sort = str(*[str(*i) for i in cursor.execute(sql_request)])
sql_request = f"SELECT DISTINCT degrees.title FROM coffee LEFT JOIN degrees ON " \
f"degrees.id = coffee.degree_roasting_id WHERE degrees.id = " \
f"{int(self.data[i][2])}"
degree = str(*[str(*i) for i in cursor.execute(sql_request)])
sql_request = f"SELECT DISTINCT types.title FROM coffee LEFT JOIN types ON " \
f"types.id = coffee.type_id WHERE types.id = {self.data[i][3]}"
type_coffee = str(*[str(*i) for i in cursor.execute(sql_request)])
self.coffee_table.setItem(i, 1, QTableWidgetItem(sort))
self.coffee_table.setItem(i, 2, QTableWidgetItem(degree))
self.coffee_table.setItem(i, 3, QTableWidgetItem(type_coffee))
self.coffee_table.setItem(i, 4, QTableWidgetItem(str(self.data[i][4])))
self.coffee_table.setItem(i, 5, QTableWidgetItem(f"{self.data[i][5]}₽"))
self.coffee_table.setItem(i, 6, QTableWidgetItem(f"{self.data[i][6]}ml"))
def widget_show(self, t):
if t == 0:
self.id = self.data[self.coffee_table.currentRow()][0]
self.widget = AddEditCoffee(t, self.id)
self.widget.show()
class AddEditCoffee(QWidget, Ui_Form):
def __init__(self, type_, id_):
super().__init__()
self.type = type_
self.id = id_
self.connection = sqlite3.connect("data/coffee.sqlite")
self.cursor = self.connection.cursor()
self.setupUi(self)
self.initUI()
def initUI(self):
self.setWindowTitle('Виджет добавления/изменения кофе')
self.add_btn.clicked.connect(self.check_info)
self.edit_btn.clicked.connect(self.edit_coffee)
self.hide_tabs()
def check_info(self):
if self.description_line.text() and self.price_line.text() and self.volume_line.text():
if self.price_line.text().isdigit() and self.volume_line.text().isdigit():
self.add_coffee()
else:
QMessageBox.critical(self, "Ошибка ", "Цена и объём указываются числами",
QMessageBox.Ok)
else:
QMessageBox.critical(self, "Ошибка ", "Неверные данные", QMessageBox.Ok)
def hide_tabs(self):
if self.type == 0:
self.tab_widget.removeTab(0)
self.edit_tab_init()
else:
self.tab_widget.removeTab(1)
self.add_tab_init()
def add_tab_init(self):
sorts = [str(*i) for i in self.cursor.execute("SELECT title FROM sorts")]
for item in sorts:
self.sort_box.addItem(item)
degrees = [str(*i) for i in self.cursor.execute("SELECT title FROM degrees")]
for item in degrees:
self.degree_box.addItem(item)
types = [str(*i) for i in self.cursor.execute("SELECT title FROM types")]
for item in types:
self.type_box.addItem(item)
def edit_tab_init(self):
sorts = [str(*i) for i in self.cursor.execute("SELECT title FROM sorts")]
cur_index = int(*[str(*i) for i in self.cursor.execute("SELECT sorts.id FROM "
"coffee LEFT JOIN sorts ON "
"sorts.id = coffee.sort_id "
f"WHERE coffee.id = {self.id}")])
for item in sorts:
self.sort_box_edit.addItem(item)
self.sort_box_edit.setCurrentIndex(cur_index - 1)
degrees = [str(*i) for i in self.cursor.execute("SELECT title FROM degrees")]
cur_index = int(*[str(*i) for i in self.cursor.execute("SELECT degrees.id FROM "
"coffee LEFT JOIN degrees ON "
"degrees.id = "
"coffee.degree_roasting_id "
f"WHERE coffee.id = {self.id}")])
for item in degrees:
self.degree_box_edit.addItem(item)
self.degree_box_edit.setCurrentIndex(cur_index - 1)
types = [str(*i) for i in self.cursor.execute("SELECT title FROM types")]
cur_index = int(*[str(*i) for i in self.cursor.execute("SELECT types.id FROM "
"coffee LEFT JOIN types ON "
"types.id = coffee.type_id "
f"WHERE coffee.id = {self.id}")])
for item in types:
self.type_box_edit.addItem(item)
self.type_box_edit.setCurrentIndex(cur_index - 1)
description = str(*[str(*i) for i in self.cursor.execute("SELECT flavor_description "
f"FROM coffee WHERE id = "
f"{self.id}")])
self.description_line_edit.setText(description)
price = str(*[str(*i) for i in self.cursor.execute("SELECT price "
f"FROM coffee WHERE id = "
f"{self.id}")])
self.price_line_edit.setText(price)
volume = str(*[str(*i) for i in self.cursor.execute("SELECT volume "
f"FROM coffee WHERE id = "
f"{self.id}")])
self.volume_line_edit.setText(volume)
def add_coffee(self):
sql_request = "SELECT id FROM coffee WHERE ID = (SELECT MAX(id) FROM coffee)"
id_ = int(*[str(*i) for i in self.cursor.execute(sql_request)]) + 1
sort = self.sort_box.currentIndex() + 1
degree = self.degree_box.currentIndex() + 1
type = self.type_box.currentIndex() + 1
description = self.description_line.text()
price = self.price_line.text()
volume = self.volume_line.text()
sql_request = "INSERT INTO coffee(id,sort_id,degree_roasting_id,type_id,"\
f"flavor_description,price,volume) VALUES("\
f"{id_},{sort},{degree},{type},'{description}',{price},{volume})"
self.cursor.execute(sql_request)
self.connection.commit()
AddEditCoffee.close(self)
def edit_coffee(self):
sort = self.sort_box_edit.currentIndex()
sql_request = f"UPDATE coffee SET sort_id = {sort + 1} WHERE id = {self.id}"
self.cursor.execute(sql_request)
degree = self.degree_box_edit.currentIndex()
sql_request = f"UPDATE coffee SET degree_roasting_id = {degree + 1} WHERE id = {self.id}"
self.cursor.execute(sql_request)
type = self.type_box_edit.currentIndex()
sql_request = f"UPDATE coffee SET type_id = {type + 1} WHERE id = {self.id}"
self.cursor.execute(sql_request)
description = self.description_line_edit.text()
sql_request = f"UPDATE coffee SET flavor_description = '{description}' WHERE id = " \
f"{self.id}"
self.cursor.execute(sql_request)
price = self.price_line_edit.text()
sql_request = f"UPDATE coffee SET price = {price} WHERE id = {self.id}"
self.cursor.execute(sql_request)
volume = self.volume_line_edit.text()
sql_request = f"UPDATE coffee SET volume = {volume} WHERE id = {self.id}"
self.cursor.execute(sql_request)
self.connection.commit()
AddEditCoffee.close(self)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = CafeCoffee()
ex.show()
sys.exit(app.exec())
|
from flask import Flask, request, redirect, render_template
from flask_restful import Resource, Api
import json, re, random, string
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker
from db import URLs, Base
# connect to the db
e = create_engine('sqlite:///hackerEarth.db')
Base.metadata.bind = e
app = Flask(__name__)
api = Api(app)
serverName = "deopa.herokuapp.com/"
@app.route('/')
def render_static():
return render_template('index.html')
# checks whether the long URL is valid or not
def isValidURL(url):
# regular expression to check if the url is valid or not
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return regex.match(url)
# generates short URL for a valid URL
def generateShortURL():
# generate 8 characters random string
randomStr = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(8)])
# create short url
return randomStr
# save the pair of actual URL and short URL in DB
def saveURL(longURL, shortURL):
# start new DB session
DBSession = sessionmaker(bind = e)
session = DBSession()
# create an object of the URLs class
newURL = URLs(longURL = longURL, shortURL = shortURL, visitCount = 0)
# add the object to the session
session.add(newURL)
# save the new URL data in the DB
session.commit()
# get data of a short URL
def getURLData(url):
DBSession = sessionmaker(bind = e)
session = DBSession()
return session.query(URLs).filter(URLs.shortURL == url).first()
class createShortURL(Resource):
def post(self):
# return request.get_json(force=True)
reqData = request.form
longURL = reqData["long_url"]
# executes when the URL is invalid
if not isValidURL(longURL):
res = { "status": "FAILED", "status_codes": ["INVALID_URLS"] }
return res
# generates a short URL
shortURL = generateShortURL()
# save the URL info in DB
saveURL(longURL, shortURL)
res = { "short_url": serverName + shortURL, "status": "OK", "status_codes": [] }
return res;
class createShortURLs(Resource):
def post(self):
reqData = request.form
longURLs = json.loads(reqData["long_urls"])
validURLs = []
invalidURLs = []
for url in longURLs:
# executes when long URL is invalid
if not isValidURL(url):
invalidURLs.append(url)
continue
validURLs.append(url)
# executes when one or more invalid URLs
if len(invalidURLs) > 0:
res = { "invalid_urls": invalidURLs, "status": "FAILED", "status_codes": ["INVALID_URLS"] }
return res
short_urls = {}
# create short URL of all the valid URLs
for url in validURLs:
shortURL = generateShortURL()
# return shortURL
saveURL(url, shortURL)
short_urls[url] = serverName + shortURL
res = { "short_urls": short_urls, "invalid_urls" : [], "status": "OK", "status_codes": [] }
return res
class getLongURL(Resource):
def post(self):
reqData = request.form
shortURL = reqData["short_url"]
hashCode = [s for s in shortURL.split("/")][-1]
# get the data of a short URL from DB
urlData = getURLData(hashCode)
# executes when the short URL is invalid
if urlData is None:
res = { "status": "FAILED", "status_codes": ["SHORT_URLS_NOT_FOUND"] }
return res
res = {"long_url": urlData.longURL, "status": "OK", "status_codes": []}
return res
class getLongURLs(Resource):
def post(self):
reqData = request.form
shortURLs = json.loads(reqData["short_urls"])
long_urls = {}
invalidURLs = []
for url in shortURLs:
hashCode = [s for s in url.split("/")][-1]
urlData = getURLData(hashCode)
# executes when short URL is invalid
if urlData is None:
invalidURLs.append(url)
continue
long_urls[url] = urlData.longURL
# executes when one or more invalid short URLs given
if len(invalidURLs) > 0:
res = { "invalid_urls": invalidURLs, "status": "FAILED", "status_codes": ["SHORT_URLS_NOT_FOUND"] }
return res
res = { "long_urls": long_urls, "invalid_urls" : [], "status": "OK", "status_codes": [] }
return res
class accessServer(Resource):
def get(self, shortURL):
# get data of the given short URL
DBSession = sessionmaker(bind=e)
session = DBSession()
urlData = session.query(URLs).filter(URLs.shortURL == shortURL).first()
if urlData is None:
res = { "status": "FAILED", "status_codes": ["SHORT_URLS_NOT_FOUND"] }
return res
longURL = urlData.longURL
# update visit count of the URL that is being visited
urlData.visitCount = urlData.visitCount + 1
session.commit()
return redirect(longURL, code=302)
class countVisits(Resource):
def post(self):
reqData = request.form
shortURL = reqData["short_url"]
hashCode = [s for s in shortURL.split("/")][-1]
urlData = getURLData(hashCode)
if urlData is None:
res = {"status": "FAILED", "status_codes": ["SHORT_URLS_NOT_FOUND"]}
return res
res = {"count": urlData.visitCount, "status": "OK", "status_codes": []}
return res
class CleanURLs(Resource):
def get(self):
Session = sessionmaker(bind = e)
session = Session()
session.query(URLs).delete()
session.commit()
session.close()
res = { "status": 'OK', "status_codes": "[]" }
return res
# api.add_resource
# create short URL for single URL
api.add_resource(createShortURL, '/fetch/short-url/', methods=['POST'])
# create short URL for Multiple URLs
api.add_resource(createShortURLs, '/fetch/short-urls/', methods=['POST'])
# get actual URL of a single short URL
api.add_resource(getLongURL, '/fetch/long-url/', methods=['POST'])
# get actual URL of Multiple short URLs
api.add_resource(getLongURLs, '/fetch/long-urls/', methods=['POST'])
# get count of number of times a short URL has been accessed
api.add_resource(countVisits, '/fetch/count/')
# truncate all data from DB
api.add_resource(CleanURLs, '/clean-urls/')
# access a short url
api.add_resource(accessServer, '/<shortURL>/')
if __name__ == '__main__':
app.run(debug=True) |
import time
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import torchvision
import matplotlib.pyplot as plt
from PIL import Image
import sys
sys.path.append('F:/anaconda3/Lib/site-packages')
import d2lzh_pytorch as d2l
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
'''
常用的图像增广方法:
我们来读取一张形状为400×500(高和宽分别为400像素和500像素)的图像作为实验的样例。
'''
img = Image.open('./img/cat1.jpg')
# plt.imshow(img)
'''
下面定义绘图函数show_images
'''
# 本函数已保存在d2lzh_pytorch包中方便以后使用
def show_images(imgs, num_rows, num_cols, scale=2):
figsize = (num_cols * scale, num_rows * scale)
_, axes = plt.subplots(num_rows, num_cols, figsize=figsize)
# print(a)
# print(axes)
for i in range(num_rows):
for j in range(num_cols):
axes[i][j].imshow(imgs[i * num_cols + j])
axes[i][j].axes.get_xaxis().set_visible(False) # x 轴不可见
axes[i][j].axes.get_yaxis().set_visible(False) # y 轴不可见
return axes
def apply(img, aug, num_rows=2, num_cols=4, scale=1.5):
Y = [aug(img) for _ in range(num_rows * num_cols)]
show_images(Y, num_rows, num_cols, scale)
'''
翻转和裁剪:
左右翻转图像通常不改变物体的类别。它是最早也是最广泛使用的一种图像增广方法。
下面我们通过torchvision.transforms模块创建RandomHorizontalFlip实例来实现一半概率的图像水平(左右)翻转。
'''
# apply(img, torchvision.transforms.RandomHorizontalFlip())
'''
上下翻转不如左右翻转通用。但是至少对于样例图像,上下翻转不会造成识别障碍。
下面我们创建RandomVerticalFlip实例来实现一半概率的图像垂直(上下)翻转。
'''
# apply(img, torchvision.transforms.RandomVerticalFlip())
'''
在下面的代码里,我们每次随机裁剪出一块面积为原面积10%∼100%的区域,且该区域的宽和高之比随机取自0.5∼2,
然后再将该区域的宽和高分别缩放到200像素。若无特殊说明,本节中a和b之间的随机数指的是从区间[a,b]中随机均匀采样所得到的连续值。
'''
shape_aug = torchvision.transforms.RandomResizedCrop(200, scale=(0.1, 1), ratio=(0.5, 2))
# class torchvision.transforms.RandomResizedCrop(size, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=2)
# 功能:随机大小,随机长宽裁剪原始照片,最后将照片resize到设定好的size
# 参数:
# size:输出的分辨率,就是输出的大小
# scale:随机剪裁的大小区间,上体来说,crop出来的图片会在0.08倍到1倍之间
# ratio:随机宽长比设置
# interpolation:插值的方法。
print(shape_aug)
# apply(img, shape_aug)
'''
变化颜色:
另一类增广方法是变化颜色。我们可以从4个方面改变图像的颜色:亮度(brightness)、对比度(contrast)、饱和度(saturation)和色调(hue)。
在下面的例子里,我们将图像的亮度随机变化为原图亮度的50%(1−0.5)∼150%(1+0.5)。
'''
# apply(img, torchvision.transforms.ColorJitter(brightness=0.5))
'''
我们也可以随机变化图像的色调。
'''
# apply(img, torchvision.transforms.ColorJitter(hue=0.5))
'''
类似地,我们也可以随机变化图像的对比度。
'''
# apply(img, torchvision.transforms.ColorJitter(contrast=0.5))
'''
我们也可以同时设置如何随机变化图像的亮度(brightness)、对比度(contrast)、饱和度(saturation)和色调(hue)。
'''
color_aug = torchvision.transforms.ColorJitter(
brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5)
# apply(img, color_aug)
# transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0)
# 功能:调整亮度、对比度、饱和度和色相
# brightness:亮度调整因子
# 当为a时,从[max(0, 1-a), 1+a]中随机选择
# 当为(a, b)时,从[a, b]中
# contrast:对比度参数,同brightness
# saturation:饱和度参数,同brightness
# hue:色相参数,当为a时,从[-a, a]中选择参数,注: 0<= a <= 0.5
# 当为(a, b)时,从[a, b]中选择参数,注:-0.5 <= a <= b <= 0.5
'''
叠加多个图像增广方法:
实际应用中我们会将多个图像增广方法叠加使用。我们可以通过Compose实例将上面定义的多个图像增广方法叠加起来,再应用到每张图像之上。
'''
augs = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
color_aug,
shape_aug]
)
apply(img, augs)
'''
使用图像增广训练模型:
下面我们来看一个将图像增广应用在实际训练中的例子。这里我们使用CIFAR-10数据集,而不是之前我们一直使用的Fashion-MNIST数据集。
这是因为Fashion-MNIST数据集中物体的位置和尺寸都已经经过归一化处理,而CIFAR-10数据集中物体的颜色和大小区别更加显著。
下面展示了CIFAR-10数据集中前32张训练图像。
'''
all_imges = torchvision.datasets.CIFAR10(train=True, root="./Datasets/CIFAR-10", download=True)
# all_imges的每一个元素都是(image, label)
show_images([all_imges[i][0] for i in range(32)], 4, 8, scale=0.8)
'''
为了在预测时得到确定的结果,我们通常只将图像增广应用在训练样本上,而不在预测时使用含随机操作的图像增广。
在这里我们只使用最简单的随机左右翻转。此外,我们使用ToTensor将小批量图像转成PyTorch需要的格式,
即形状为(批量大小, 通道数, 高, 宽)、值域在0到1之间且类型为32位浮点数。
'''
flip_aug = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor()]
)
no_aug = torchvision.transforms.Compose([
torchvision.transforms.ToTensor()]
)
'''
接下来我们定义一个辅助函数来方便读取图像并应用图像增广。有关DataLoader的详细介绍,可参考更早的3.5节图像分类数据集(Fashion-MNIST)。
'''
num_workers = 0
def load_cifar10(is_train, augs, batch_size, root="./Datasets/CIFAR-10"):
dataset = torchvision.datasets.CIFAR10(root=root, train=is_train, transform=augs, download=True)
return DataLoader(dataset, batch_size=batch_size, shuffle=is_train, num_workers=num_workers)
'''
使用图像增广训练模型
'''
# 本函数已保存在d2lzh_pytorch包中方便以后使用
def train(train_iter, test_iter, net, loss, optimizer, device, num_epochs):
net = net.to(device)
print("training on ", device)
batch_count = 0
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
for X, y in train_iter:
X = X.to(device)
y = y.to(device)
y_hat = net(X)
# print(y_hat, y_hat.size()) # 256*10
# print('-'*100)
# print(y, y.shape) # 256
l = loss(y_hat, y)
optimizer.zero_grad()
l.backward()
optimizer.step()
train_l_sum += l.cpu().item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
# argmax(dim=1) 返回每行最大值的索引,(y_hat.argmax(dim=1) == y) ->[True, False,....], sum()->True+False=1
n += y.shape[0]
batch_count += 1
test_acc = d2l.evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
% (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))
'''
然后就可以定义train_with_data_aug函数使用图像增广来训练模型了。该函数使用Adam算法作为训练使用的优化算法,
然后将图像增广应用于训练数据集之上,最后调用刚才定义的train函数训练并评价模型
'''
def train_with_data_aug(train_augs, test_augs, lr=0.001):
batch_size, net = 256, d2l.resnet18(10)
optimizer = torch.optim.Adam(net.parameters(), lr=lr)
loss = torch.nn.CrossEntropyLoss()
train_iter = load_cifar10(True, train_augs, batch_size)
test_iter = load_cifar10(False, test_augs, batch_size)
train(train_iter, test_iter, net, loss, optimizer, device, num_epochs=10)
'''
下面使用随机左右翻转的图像增广来训练模型。
'''
train_with_data_aug(flip_aug, no_aug)
# a = torch.tensor([1, 2, 3])
# b = torch.tensor((1, 2, 2))
# print((a == b)) # tensor([ True, True, False])
# print((a == b).sum()) # tensor(2)
'''
图像增广基于现有训练数据生成随机图像从而应对过拟合。
为了在预测时得到确定的结果,通常只将图像增广应用在训练样本上,而不在预测时使用含随机操作的图像增广。
可以从torchvision的transforms模块中获取有关图片增广的类。
''' |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:hua
from flask import Blueprint, render_template, redirect
user = Blueprint('user',__name__)
@user.route('/index',method=['GET'])
def index():
return ('user/index')
@user.route('/add')
def add():
return 'user_add'
@user.route('/show')
def show():
return 'user_show' |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load project cloudsql data into Inventory."""
import json
from dateutil import parser as dateutil_parser
from google.cloud.security.common.data_access import errors as dao_errors
from google.cloud.security.common.util import log_util
from google.cloud.security.common.util import parser
from google.cloud.security.inventory import errors as inventory_errors
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadProjectsCloudsqlPipeline(base_pipeline.BasePipeline):
"""Pipeline to load project CloudSql data into Inventory."""
PROJECTS_RESOURCE_NAME = 'project_iam_policies'
RESOURCE_NAME = 'cloudsql'
RESOURCE_NAME_INSTANCES = 'cloudsql_instances'
RESOURCE_NAME_IPADDRESSES = 'cloudsql_ipaddresses'
RESOURCE_NAME_AUTHORIZEDNETWORKS = ( # pylint: disable=invalid-name
'cloudsql_ipconfiguration_authorizednetworks')
MYSQL_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
@staticmethod
def _transform_data(cloudsql_instances_map):
"""Yield an iterator of loadable instances.
Args:
cloudsql_instances_map (iterable): Instances as per-project
dictionary.
Example: {'project_number': 11111,
'instances': instances_dict}
Yields:
iterable: cloudsql, as a per-cloudsql dictionary.
"""
for instances_map in cloudsql_instances_map:
instances = instances_map['instances']
for item in instances:
yield {
'project_number': instances_map['project_number'],
'name': item.get('name'),
'project': item.get('project'),
'backend_type': item.get('backendType'),
'connection_name': item.get('connectionName'),
'current_disk_size': int(item.get('currentDiskSize', 0)),
'database_version': item.get('databaseVersion'),
'failover_replica_available':\
item.get('failoverReplica', {}).get('available'),
'failover_replica_name':\
item.get('failoverReplica', {}).get('name'),
'instance_type': item.get('instanceType'),
'ipv6_address': item.get('ipv6Address'),
'kind': item.get('kind'),
'master_instance_name': item.get('masterInstanceName'),
'max_disk_size': int(item.get('maxDiskSize', 0)),
'on_premises_configuration_host_port':\
item.get('onPremisesConfiguration', {})\
.get('hostPort'),
'on_premises_configuration_kind':\
item.get('onPremisesConfiguration', {}).get('kind'),
'region': item.get('region'),
'replica_configuration':\
json.dumps(item.get('replicaConfiguration')),
'replica_names': json.dumps(item.get('replicaNames')),
'self_link': item.get('selfLink'),
'server_ca_cert': json.dumps(item.get('serverCaCert')),
'service_account_email_address':\
item.get('serviceAccountEmailAddress'),
'settings_activation_policy':\
item.get('settings', {}).get('activationPolicy'),
'settings_authorized_gae_applications':\
json.dumps(item.get('settings', {})\
.get('authorizedGaeApplications')),
'settings_availability_type':\
item.get('settings', {}).get('availabilityType'),
'settings_backup_configuration_binary_log_enabled':\
item.get('settings', {})\
.get('backupConfiguration', {})\
.get('binaryLogEnabled'),
'settings_backup_configuration_enabled':\
item.get('settings', {})\
.get('backupConfiguration', {}).get('enabled'),
'settings_backup_configuration_kind':\
item.get('settings', {})\
.get('backupConfiguration', {}).get('kind'),
'settings_backup_configuration_start_time':\
item.get('settings', {})\
.get('backupConfiguration', {}).get('startTime'),
'settings_crash_safe_replication_enabled':\
item.get('settings', {})\
.get('crashSafeReplicationEnabled'),
'settings_data_disk_size_gb':\
int(item.get('settings', {}).get('dataDiskSizeGb', 0)),
'settings_data_disk_type':
item.get('settings', {}).get('dataDiskType'),
'settings_database_flags':
json.dumps(item.get('settings', {})\
.get('databaseFlags')),
'settings_database_replication_enabled':
item.get('settings', {})\
.get('databaseReplicationEnabled', {}),
'settings_ip_configuration_ipv4_enabled':
item.get('settings', {}).get('ipConfiguration', {})\
.get('ipv4Enabled', {}),
'settings_ip_configuration_require_ssl':
item.get('settings', {}).get('ipConfiguration', {})\
.get('requireSsl', {}),
'settings_kind': item.get('settings', {}).get('kind'),
'settings_labels':
json.dumps(item.get('settings', {}).get('labels')),
'settings_location_preference_follow_gae_application':\
item.get('settings', {}).get('locationPreference', {})\
.get('followGaeApplication'),
'settings_location_preference_kind':\
item.get('settings', {}).get('locationPreference', {})\
.get('kind'),
'settings_location_preference_zone':\
item.get('settings', {}).get('locationPreference', {})\
.get('zone'),
'settings_maintenance_window':\
json.dumps(item.get('settings', {})\
.get('maintenanceWindow')),
'settings_pricing_plan':\
item.get('settings', {}).get('pricingPlan'),
'settings_replication_type':\
item.get('settings', {}).get('replicationType'),
'settings_settings_version':\
int(item.get('settings', {}).get('settingsVersion', 0)),
'settings_storage_auto_resize':\
item.get('settings', {}).get('storageAutoResize'),
'settings_storage_auto_resize_limit':\
int(item.get('settings', {})\
.get('storageAutoResizeLimit', 0)),
'settings_tier': item.get('settings', {}).get('tier'),
'state': item.get('state'),
'suspension_reason': \
json.dumps(item.get('suspensionReason')),
'raw_cloudsql_instance': parser.json_stringify(item)
}
def _transform_authorizednetworks(self, cloudsql_instances_map):
"""Yield an iterator of loadable authorized networks of cloudsql
instances.
Args:
cloudsql_instances_map (iterable): instances as per-project
dictionary.
Example: {'project_number': 11111,
'instances': instances_dict}
Yields:
iterable: authorized network dictionary.
"""
for instances_map in cloudsql_instances_map:
instances = instances_map['instances']
for item in instances:
authorizednetworks = item.get('settings', {})\
.get('ipConfiguration', {}).get('authorizedNetworks', [{}])
for network in authorizednetworks:
if network.get('expirationTime') is not None:
try:
parsed_time = dateutil_parser\
.parse(network.get('expirationTime'))
formatted_expirationtime = (
parsed_time\
.strftime(self.MYSQL_DATETIME_FORMAT))
except (TypeError, ValueError, AttributeError) as e:
LOGGER.error(
'Unable to parse timeCreated' +\
'from authorizednetworks: %s\n%s',
network.get('expirationTime', ''), e)
formatted_expirationtime = '1972-01-01 00:00:00'
else:
formatted_expirationtime = '1972-01-01 00:00:00'
yield {
'project_number': instances_map['project_number'],
'instance_name': item.get('name'),
'kind': network.get('kind'),
'name': network.get('name'),
'value': network.get('value'),
'expiration_time': formatted_expirationtime
}
def _transform_ipaddresses(self, cloudsql_instances_map):
"""Yield an iterator of loadable ipAddresses of cloudsql instances.
Args:
cloudsql_instances_map (iterable): Instances as per-project
dictionary.
Example: {'project_number': 11111,
'instances': instances_dict}
Yields:
iterable: ipAddresses dictionary.
"""
for instances_map in cloudsql_instances_map:
instances = instances_map['instances']
for item in instances:
ipaddresses = item.get('ipAddresses', [{}])
for ipaddress in ipaddresses:
if ipaddress.get('timeToRetire') is not None:
try:
parsed_time = dateutil_parser\
.parse(ipaddress.get('timeToRetire'))
formatted_timetoretire = (
parsed_time\
.strftime(self.MYSQL_DATETIME_FORMAT))
except (TypeError, ValueError, AttributeError) as e:
LOGGER.error(
'Unable to parse timeCreated' +\
' from ipaddresses: %s\n%s',
ipaddress.get('timeToRetire', ''), e)
formatted_timetoretire = '1972-01-01 00:00:00'
else:
formatted_timetoretire = '1972-01-01 00:00:00'
yield {
'project_number': instances_map['project_number'],
'instance_name': item.get('name'),
'ip_address': ipaddress.get('ipAddress'),
'type': ipaddress.get('type'),
'time_to_retire': formatted_timetoretire
}
# pylint: disable=arguments-differ
def _transform(self, cloudsql_instances_map):
"""returns a dictionary of generators for a different types of resources
Args:
cloudsql_instances_map (iterable): instances as per-project
dictionary.
Example: {'project_number': 11111,
'instances': instances_dict}
Returns:
dict: iterables as a per resource type
"""
data_dict = {}
data_dict[self.RESOURCE_NAME_INSTANCES] = \
self._transform_data(cloudsql_instances_map)
data_dict[self.RESOURCE_NAME_AUTHORIZEDNETWORKS] = \
self._transform_authorizednetworks(cloudsql_instances_map)
data_dict[self.RESOURCE_NAME_IPADDRESSES] = \
self._transform_ipaddresses(cloudsql_instances_map)
return data_dict
def _retrieve(self):
"""Retrieve the project cloudsql instances from GCP.
Returns:
list: Instances as per-project dictionary.
Example: [{project_number: project_number,
instances: instances_dict}]
Raises:
LoadDataPipelineException: An error with loading data has occurred.
"""
# Get the projects for which we will retrieve the instances.
try:
project_numbers = self.dao.get_project_numbers(
self.PROJECTS_RESOURCE_NAME, self.cycle_timestamp)
except dao_errors.MySQLError as e:
raise inventory_errors.LoadDataPipelineError(e)
instances_maps = []
for project_number in project_numbers:
instances = self.safe_api_call('get_instances', project_number)
if instances:
instances_map = {'project_number': project_number,
'instances': instances}
instances_maps.append(instances_map)
return instances_maps
def _get_loaded_count(self):
"""Get the count of how many of a instances has been loaded."""
try:
self.count = self.dao.select_record_count(
self.RESOURCE_NAME_INSTANCES,
self.cycle_timestamp)
except dao_errors.MySQLError as e:
LOGGER.error('Unable to retrieve record count for %s_%s:\n%s',
self.RESOURCE_NAME_INSTANCES, self.cycle_timestamp, e)
def run(self):
"""Runs the load Cloudsql data pipeline."""
instances_maps = self._retrieve()
loadable_instances_dict = self._transform(instances_maps)
self._load(self.RESOURCE_NAME_INSTANCES, \
loadable_instances_dict[self.RESOURCE_NAME_INSTANCES])
self._load(self.RESOURCE_NAME_IPADDRESSES, \
loadable_instances_dict[self.RESOURCE_NAME_IPADDRESSES])
self._load(self.RESOURCE_NAME_AUTHORIZEDNETWORKS, \
loadable_instances_dict[
self.RESOURCE_NAME_AUTHORIZEDNETWORKS
])
self._get_loaded_count()
|
"""This file should contain all tests that need access to the internet (apart
from the ones in test_datasets_download.py)
We want to bundle all internet-related tests in one file, so the file can be
cleanly ignored in FB internal test infra.
"""
import os
from urllib.error import URLError
import pytest
import torchvision.datasets.utils as utils
class TestDatasetUtils:
def test_download_url(self, tmpdir):
url = "http://github.com/pytorch/vision/archive/master.zip"
try:
utils.download_url(url, tmpdir)
assert len(os.listdir(tmpdir)) != 0
except URLError:
pytest.skip(f"could not download test file '{url}'")
def test_download_url_retry_http(self, tmpdir):
url = "https://github.com/pytorch/vision/archive/master.zip"
try:
utils.download_url(url, tmpdir)
assert len(os.listdir(tmpdir)) != 0
except URLError:
pytest.skip(f"could not download test file '{url}'")
def test_download_url_dont_exist(self, tmpdir):
url = "http://github.com/pytorch/vision/archive/this_doesnt_exist.zip"
with pytest.raises(URLError):
utils.download_url(url, tmpdir)
def test_download_url_dispatch_download_from_google_drive(self, mocker, tmpdir):
url = "https://drive.google.com/file/d/1GO-BHUYRuvzr1Gtp2_fqXRsr9TIeYbhV/view"
id = "1GO-BHUYRuvzr1Gtp2_fqXRsr9TIeYbhV"
filename = "filename"
md5 = "md5"
mocked = mocker.patch("torchvision.datasets.utils.download_file_from_google_drive")
utils.download_url(url, tmpdir, filename, md5)
mocked.assert_called_once_with(id, tmpdir, filename, md5)
if __name__ == "__main__":
pytest.main([__file__])
|
"""
module for base plugin class, utilities, etc.
"""
from multiprocessing import Process, Pipe
import time
import monitor
class Plugin(object):
def __init__(self):
self.monitor = monitor.Monitor()
self.pipe = None
def start(self):
self.pipe, child = Pipe()
self.proc = Process(target=self.run, args=(child,))
self.proc.start()
def send(self, msg):
self.pipe.send(msg)
def join(self):
self.proc.join()
def run(self, pipe):
while True:
obj = pipe.recv()
print "Got", obj
#time.sleep(10)
if self.monitor.check_ages():
print "Shutting down"
break
if __name__ == '__main__':
p = Plugin()
p.start()
p.send("hello")
time.sleep(10)
p.send("there")
time.sleep(10)
p.send("world")
p.join()
|
'''n = int(input())
arr = list(map(int,input().strip().split()))[:n]
u = len(arr)
for i in range(2):
for j in range(i+1,3,+1):
if arr[i] != arr[j]:
arr.remove(arr[i])
print(arr)''' |
#just like set in math: a unique collection of items - no duplicated items
a= [1,2,3,4,5,6,1,1,1]
sA = set(a)
print sA
sB = set([4,5,6,2,7])
print sA - sB #
print sA & sB #intersection
print sA | sB #union |
import time
from scapy.all import *
probe = False
def arp_display(pkt):
if probe:
if pkt[ARP].op == 1: #who-has (request)
if pkt[ARP].psrc == '0.0.0.0': # ARP Probe
print "ARP Probe from: " + pkt[ARP].hwsrc
if pkt[ARP].hwsrc == "00:bb:3a:41:4e:7c":
print time.ctime(), "Pushed Gerber"
os.system("coffee private/ddp_client/dash.coffee")
print sniff(prn=arp_display, filter="arp", store=0) #, count=10)
|
#-*- coding: utf-8 -*-
def gcd(a, b):
while True:
r = a % b
if r == 0:
return b
a, b = b, r
if __name__ == '__main__':
for testcase in range(input()):
values = raw_input().split()
print gcd(int(values[0]), int(values[1]))
|
## Sample Input
# the first line contains two space-separated integrs denoting the respective values of
# n the number of integers in the array
# and d the number of rotations to perform
## 5 4
## 1 2 3 4 5
## Expected output for a left rotation of 4
# 5 1 2 3 4
from enum import Enum
import sys
class ArrayRotator:
def __init__(self):
self.data = []
self.size = 0
def populate(self, size, input):
data = input.split(' ')
for i in range(0, int(size)):
self.data.append(int(data[i]))
self.size = int(size)
def rotate(self, start, len, rotations):
if (rotations%len > 0):
start2 = start + len - rotations
len2 = rotations
if (2*rotations > len):
start2 = start + rotations
len2 = len - rotations
self.swap(start, start2, len2)
if (2*rotations <= len):
len = len - rotations
else:
start = start + len - rotations
tmp = rotations
rotations = 2 * rotations - len
len = tmp
if (rotations < len):
self.rotate(start, len, rotations)
def swap(self, ind1, ind2, len):
#print('swap', ind1, ind2, len)
for i in range(0, len):
tmp = self.data[ind1 + i]
self.data[ind1 + i] = self.data[ind2 + i]
self.data[ind2 + i] = tmp
def main():
R = ArrayRotator()
inp1 = sys.stdin.readline().strip('\n')
args = inp1.split(' ')
inp2 = sys.stdin.readline().strip('\n')
R.populate(args[0], inp2)
R.rotate(0, int(args[0]), int(args[1]))
print (*R.data, sep=' ')
if __name__ == "__main__":
main()
|
from fotutils.forms import ModelFormWithSlugBase
from vars.models import Var, Device
class VarForm(ModelFormWithSlugBase):
class Meta(ModelFormWithSlugBase.Meta):
model = Var
class DeviceForm(ModelFormWithSlugBase):
class Meta(ModelFormWithSlugBase.Meta):
model = Device |
def filter_string(string):
return int(''.join(a for a in string if a.isdigit()))
|
# -*- coding:utf-8 -*-
"""
This python file is used to tranfer the words in corpus to vector, and save the word2vec model under the path 'w2v_model'.
"""
from gensim.models.word2vec import Word2Vec
import pickle
import os
import gc
import sys
"""
DirofCorpus class
-----------------------------
This class is used to make a generator to produce sentence for word2vec training
# Arguments
dirname: The src of corpus files
"""
class DirofCorpus(object):
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for d in self.dirname:
for fn in os.listdir(d):
# print(fn)
if not os.path.isdir(d + '/' + fn):
continue
for filename in os.listdir(os.path.join(d, fn)):
if not filename.endswith('.pkl'):
continue
samples = pickle.load(open(os.path.join(d, fn, filename), 'rb'))[0]
for sample in samples:
yield sample
del samples
gc.collect()
'''
generate_w2vmodel function
-----------------------------
This function is used to learning vectors from corpus, and save the model
# Arguments
decTokenFlawPath: String type, the src of corpus file
w2vModelPath: String type, the src of model file
'''
def generate_w2vModel(decTokenFlawPath, w2vModelPath):
print("training...")
model = Word2Vec(sentences=DirofCorpus(decTokenFlawPath), size=30, alpha=0.01, window=5, min_count=0,
max_vocab_size=None, sample=0.001, seed=1, workers=1, min_alpha=0.0001, sg=1, hs=0, negative=10,
iter=5) # min_count was 0 when this program was downloaded from github
model.save(w2vModelPath)
def generate_corpus_v2(w2vModelPath, samples):
model = Word2Vec.load(w2vModelPath)
print("begin generate input...")
dl_corpus = [[model[word] for word in sample] for sample in samples]
print("generate input success...")
return dl_corpus
def get_input_dl(corpusPath, w2v_model_path, vectorPath):
for corpusFiles in os.listdir(corpusPath):
# print(corpusFiles)
if not os.path.isdir(corpusPath + os.path.sep + corpusFiles):
continue
if corpusFiles not in os.listdir(vectorPath):
folder_path = os.path.join(vectorPath, corpusFiles)
if not os.path.exists(folder_path):
os.mkdir(folder_path)
for corpusFile in os.listdir(corpusPath + os.path.sep + corpusFiles):
corpus_path = os.path.join(corpusPath, corpusFiles, corpusFile)
f_corpus = open(corpus_path, 'rb')
data = pickle.load(f_corpus)
f_corpus.close()
data[0] = generate_corpus_v2(w2v_model_path, data[0]) # 转化为向量
vector_path = os.path.join(vectorPath, corpusFiles, corpusFile)
f_vector = open(vector_path, 'wb')
pickle.dump(data, f_vector, protocol=pickle.HIGHEST_PROTOCOL)
f_vector.close()
def get_all_dl(vectorPath, dlCorpusPath):
N = 1
num = 1
test_set = [[], [], [], [], [], [], []]
for i in range(num):
for folder in os.listdir(vectorPath):
if not os.path.isdir(vectorPath + os.path.sep + folder):
continue
for filename in os.listdir(vectorPath + os.path.sep + folder):
print(filename)
if not filename.endswith(".pkl"):
continue
f = open(vectorPath + os.path.sep + folder + os.path.sep + filename, 'rb')
data = pickle.load(f)
for n in range(6):
test_set[n] = test_set[n] + data[n]
test_set[-1].append(filename)
if test_set[0] == []:
continue
f_train = open(dlCorpusPath + os.path.sep + "test.pkl", "wb")
pickle.dump(test_set, f_train, protocol=pickle.HIGHEST_PROTOCOL)
f_train.close()
del test_set
gc.collect()
# argv[1]: 待检测文件夹路径
# argv[2]: 已经准备好的corpus
# basePath = sys.argv[1]
# dec_tokenFlaw_path = [basePath + os.path.sep + 'corpus', sys.argv[2]]
# w2v_model_path = basePath + os.path.sep + 'model'
# print(dec_tokenFlaw_path)
# print(w2v_model_path)
# generate_w2vModel(dec_tokenFlaw_path, w2v_model_path)
#
# corpusPath = basePath + os.path.sep + 'corpus'
# vectorPath = basePath + os.path.sep + 'vector'
# print(corpusPath)
# print(vectorPath)
# if __name__ == '__main__':
# corpusPath = "/Users/ke/Documents/snail/graduate/platform/serverTest/Test/test_v2/corpus"
# vectorPath = "/Users/ke/Documents/snail/graduate/platform/serverTest/Test/test_v2/vector"
# w2v_model_path = "/Users/ke/Documents/snail/graduate/platform/serverTest/Test_v2/test/model"
# get_input_dl(corpusPath, w2v_model_path, vectorPath)
# dl_path = "/Users/ke/Documents/snail/graduate/platform/serverTest/Test/test/dlCorpus"
# get_all_dl(vectorPath, dl_path)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.views.generic import TemplateView, ListView, CreateView, UpdateView
from django.shortcuts import redirect, resolve_url
from django.contrib import messages
from django.conf import settings
from npcms.models import Section, SECTION_MODULE_CHOICES
class HomeView(ListView):
template_name = 'npcms/dashboard.html'
model = Section
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
context['module_choices'] = SECTION_MODULE_CHOICES
return context
def get(self, request, *args, **kwargs):
print settings.MESSAGE_STORAGE
return super(HomeView, self).get(request, args, kwargs)
class SectionCreateView(CreateView):
model = Section
fields = ['name', 'title', 'module', 'order']
def get_success_url(self):
return resolve_url('npcms:dashboard')
def get(self, request, *args, **kwargs):
return redirect('npcms:dashboard')
def form_invalid(self, form):
messages.warning(self.request, form.errors)
return redirect('npcms:dashboard')
def form_valid(self, form):
messages.success(self.request, '创建成功')
return super(SectionCreateView, self).form_valid(form) |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import pickle
# pickle模块是对 python对象 进行 序列化/反序列化 的 二进制协议
# pickle模块有两个过程
# picking:序列化,将 python对象 转换为 字节流
# unpicking:反序列化,将 字节流 转换为 python对象
# pickle使用的数据格式仅用于python
# pickle可以直接表示大部分python数据类型,包括自定义类型
# 序列化和反序列化
# dumps(obj, protocol=None, *, fix_imports=True)
# loads(bytes_object, *, fix_imports=True, encoding='ASCII', errors='strict')
# 文件读写
# dump(obj, file, protocol=None, *, fix_imports=True)
# pickle.(file, protocol).dump(obj)
# load(file, *, fix_imports=True, encoding='ASCII', errors='strict')
# ---------------------------------------- 内置数据 ----------------------------------------
dic_a = {'name': '小明', 'age': 14, 'score': 88}
# dumps和loads
my_encode = pickle.dumps(dic_a)
print('pickle.dumps序列化后的结果:\n\t', my_encode)
print('\t序列化后的数据类型:', type(my_encode))
my_decode = pickle.loads(my_encode)
print('pickle.loads反序列化后的结果:\n\t', my_decode)
print('\t反序列化后的数据类型:', type(my_decode))
# dump和load
with open('fileRes\\pickle.pkl', 'wb') as file:
print('正在写入文件pickle.pkl。。。')
pickle.dump(dic_a, file)
print('文件写入完毕!')
with open('fileRes\\pickle.pkl', 'rb') as file:
print('正在读取文件pickle.pkl。。。')
bin_content = pickle.load(file)
print('文件读取完毕!内容为:\n\t', bin_content)
# ---------------------------------------- 自定义数据 ----------------------------------------
class Student(object):
def __init__(self, name, age, score):
self.name = name
self.age = age
self.score = score
stu1 = Student('小黄', 30, 53)
# dumps和loads
my_encode = pickle.dumps(stu1)
print('pickle.dumps序列化后的结果:\n\t', my_encode)
print('\t序列化后的数据类型:', type(my_encode))
my_decode = pickle.loads(my_encode)
print('pickle.loads反序列化后的结果:\n\t', my_decode)
print('\t反序列化后的数据类型:', type(my_decode))
# dump和load 写入和读取多个对象
with open('fileRes\\pickle.pkl', 'rb+') as file:
print('正在写入文件pickle.pkl。。。')
pickle.dump(stu1, file)
pickle.dump(dic_a, file)
print('文件写入完毕!')
with open('fileRes\\pickle.pkl', 'rb') as file:
print('正在读取文件pickle.pkl。。。')
bin_content = pickle.load(file)
print(type(bin_content))
print('文件读取完毕!内容为:\n\t', bin_content.__dict__)
bin_content = pickle.load(file)
print(type(bin_content))
print('文件读取完毕!内容为:\n\t', bin_content)
|
from django.db import models
from django.db.models.base import Model
# Create your models here.
class Product(models.Model):
name = models.CharField(max_length=255)
price = models.FloatField()
stock = models.IntegerField()
status = models.CharField(max_length=100)
image= models.FileField(upload_to="documents")
class Subscription(models.Model):
email = models.EmailField(max_length = 254, null=False, blank=False, unique=True)
def __str__(self):
return self.email
|
from django.shortcuts import render
from django.views import View
from db.login_mixin import LoginRequiredMixin
from interview.models import Interview
# Create your views here.
class OfferView(LoginRequiredMixin,View):
'''Offer提交页面'''
def get(self,request,interview_id):
interview = Interview.objects.get(id=interview_id)
resume = interview.user.resume_set.all()[0]
return render(request, 'offer.html',{"interview":interview,"resume":resume})
class OfferDetailView(LoginRequiredMixin,View):
'''招聘需求页面'''
def get(self,request):
return render(request, 'offer_detail.html')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **********************************************************
# * Author : xoyabc
# * Email : xoyabc@qq.com
# * Last modified : 2018-07-03 23:11
# * Filename : host.py
# * Description :
# * ********************************************************
import json
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from lxml import etree
info_list = []
def load_json_from_file(f):
data = json.load(f)
data_list = data['Data']
return data_list
def get_element_of_span_or_code(text):
html = etree.HTML(text)
result = html.xpath('//td//code|//span')[0].text
if result is not None:
result = result.replace(',', '')
else:
result = "None"
return result
def get_element_of_td(text):
html = etree.HTML(text)
try:
result = html.xpath('//td')[0].text
except AttributeError:
#print "{0} AttributeError" .format(text)
result = text
return result
def line_prepender(filename, line):
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
def write_vm_info_to_file(f):
data_list = load_json_from_file(f)
for machine in data_list:
# get the value of the machine dict
host = machine['host_on']
lan_ip = machine['management_ip']
service = machine['service']
hostname = machine['hostname']
asset_type = machine['asset_type']
ram = machine['ram']
cpu_core_count = machine['cpu_core_count']
disk = machine['disk']
# get the real value of every item
r_hostname = get_element_of_span_or_code(hostname)
r_lan_ip = get_element_of_span_or_code(lan_ip)
r_service = get_element_of_span_or_code(service)
r_ram = get_element_of_td(ram)
r_cpu_core_count = get_element_of_td(cpu_core_count)
r_disk = get_element_of_td(disk)
r_host = get_element_of_td(host)
r_vm_info = '{0} {1} {2} {3} {4} {5} {6}\n' .format(r_hostname,r_lan_ip,r_service,r_ram,r_cpu_core_count,r_disk,r_host)
info_list.append(r_vm_info)
print r_vm_info
# write to result file
with open ('result', 'w') as f:
f.writelines(info_list)
if __name__=='__main__':
f = open("test.json")
head_instruction = "主机名 内网IP 服务 内存(M) CPU核数 硬盘(G) 宿主机IP"
write_vm_info_to_file(f)
line_prepender('result', head_instruction)
|
import numpy as np
import math
from math import *
import matplotlib.pyplot as plt
import ROOT
from ROOT import gROOT
from math import *
from array import array
from scipy import stats
from scipy.stats import norm
import matplotlib.mlab as mlab
from mpl_toolkits.mplot3d import Axes3D as ax3
inFile=ROOT.TFile("proc_cry_total12.root")
# inFile=ROOT.TFile("proc_cry_total9.root")
# inFile=ROOT.TFile("cry_0.root.pro")
tree=ROOT.TTree()
tree=inFile.Get("photon_Data")
# y=np.zeros(1,dtype=float)
x=array("f",[0])
y=array("f",[0])
z=array("f",[0])
x0=array("f",[0])
y0=array("f",[0])
z0=array("f",[0])
x1=array("f",[0])
y1=array("f",[0])
z1=array("f",[0])
left=array("i",[0])
right=array("i",[0])
t=array("f",[0])
e=array("f",[0])
pid=array("i",[0])
vol=array("i",[0])
tree.SetBranchAddress("x",x)
tree.SetBranchAddress("y",y)
tree.SetBranchAddress("z",z)
tree.SetBranchAddress("x0",x0)
tree.SetBranchAddress("y0",y0)
tree.SetBranchAddress("z0",z0)
tree.SetBranchAddress("x1",x1)
tree.SetBranchAddress("y1",y1)
tree.SetBranchAddress("z1",z1)
tree.SetBranchAddress("t",t)
tree.SetBranchAddress("left",left)
tree.SetBranchAddress("right",right)
tree.SetBranchAddress("pid",pid)
tree.SetBranchAddress("vol",vol)
tree.SetBranchAddress("e",e)
entries=int(tree.GetEntries())
# entries=26255
px=[]
py=[]
pz=[]
px0=[]
py0=[]
pz0=[]
px1=[]
py1=[]
pz1=[]
dx=[]
dy=[]
dz=[]
leftList=[]
rightList=[]
tim=[]
volList=[]
cellLen=950
eList=[]
rList=[]
slopeList=[]
zCutList=[]
ratList=[]
totalList=[]
for i in xrange(0,entries):
tree.GetEntry(i)
# if abs(t[0])!=0 and abs(pid[0])==13:
if abs(t[0])<30 and abs(t[0])!=0 and abs(x[0])<(80) and abs(z[0])<(80) and abs(y[0])<(500.0) and not (math.isnan(x0[0]) or math.isnan(x1[0])) and abs(pid[0])==13 :
if (z0[0]-z1[0])>0 and right[0]>0 and left[0]>0 :
# if y[0]>(-57*t[0]-100) and y[0]<(-57*t[0]+100):
# if y[0]<(-57*t[0]-100) or y[0]>(-57*t[0]+100):
leftList.append(left[0])
rightList.append(right[0])
rat=float(left[0])/float(right[0])
total=left[0]+right[0]
totalList.append(total)
lr=log10(rat)
ratList.append(lr)
px.append(x[0])
py.append(y[0])
pz.append(z[0])
px0.append(x0[0])
py0.append(y0[0])
pz0.append(z0[0])
px1.append(x1[0])
py1.append(y1[0])
pz1.append(z1[0])
dx.append(x0[0]-x1[0])
dy.append(y0[0]-y1[0])
dz.append(z0[0]-z1[0])
tim.append(t[0])
volList.append(vol[0])
eList.append(e[0])
print "number of muon events "+str(len(tim))
####################Slope Est##########################
print "Slope Est"
time=np.array(tim)
posX=np.array(px)
posY=np.array(py)
posZ=np.array(pz)
energy=np.array(eList)
logRat=np.array(ratList)
slope, intercept, r_value, p_value, std_err = stats.linregress(time,posY)
dY=sqrt(12*posY.var())
dt=sqrt(12*time.var())
print "b-a "+str(dt)
print "mean "+str(time.mean())
print "std "+str(time.std())
print "dY "+str(dY)
print "mean "+str(posY.mean())
print "std "+str(posY.std())
est=-dY/dt
print "slope est "+str(est)
slopeList.append(-slope)
bias=100*(slope-est)/slope
print "bias "+str(bias)
minT=time.min()
maxT=time.max()
ran=10
minT=-ran
maxT=ran
tRange=range(int(minT),int(maxT))
posRange=[]
posRangeEst=[]
t0=time.mean()
for i in tRange:
posRange.append(slope*float(i)+intercept)
posRangeEst.append(est*(float(i)-t0))
####################Time v Pos##########################
print
print "time v pos"
print "slope "+str(slope)
print "intercept "+str(intercept)
print "r "+str(r_value)
print "p "+str(p_value)
print "std "+str(std_err)
print "dt mean " +str(time.mean())
print "dt std " +str(time.std())
print
# plt.plot(time,py,'.')
# plt.plot(tRange,posRange,'o',color='r',)
# plt.plot(tRange,posRangeEst,'*',color='c')
# plt.ylabel("postion along the cell(mm)")
# plt.xlabel("delta time(ns)")
# plt.title("Postion along the Cell vs Time")
# plt.show()
####################Pos V Rat###########################
print "pos v rat"
slope, intercept, r_value, p_value, std_err = stats.linregress(logRat,posY)
print "slope "+str(slope)
print "intercept "+str(intercept)
print "r "+str(r_value)
print "p "+str(p_value)
print "std "+str(std_err)
# plt.plot(logRat,posY,'.')
# plt.ylabel("postion along the cell(mm)")
# plt.xlabel("log ratio of signals")
# plt.title("log10(S0/S1) vs Pos")
# plt.show()
print
####################Time v Rat##########################
print "time v rat"
slope, intercept, r_value, p_value, std_err = stats.linregress(time,logRat)
print "slope "+str(slope)
print "intercept "+str(intercept)
print "r "+str(r_value)
print "p "+str(p_value)
print "std "+str(std_err)
n=len(ratList)
stdSum=0
for i in xrange(0,len(ratList)):
ratEst=slope*time[i]+intercept
stdSum+=(ratList[i]-ratEst)**2
stdErr=sqrt(stdSum/len(ratList))
ran=10
minT=-ran
maxT=ran
tRange=range(int(minT),int(maxT))
logRange=[]
for i in tRange:
logRange.append(slope*float(i)+intercept)
# plt.figure()
# plt.plot(tRange,logRange,'*',color='c')
# plt.plot(time,logRat,'.')
# plt.errorbar(tRange, logRange, yerr=1*stdErr, fmt='o',color='r',ecolor='r',capthick=2)
# plt.ylabel("log ratio of signals")
# plt.xlabel("delta time(ns)")
# plt.title("log10(S0/S1) vs Time")
# plt.show()
print
#############time hist***********
print "time "
print "time mean: "+str(time.mean())
print "time std: "+str(time.std())
# n, bins, patches=plt.hist(time,bins=31,range=[-15,15])
# plt.title("Delta Time (ns)")
# plt.show()
print
############# posY hist***********
print "postion"
print "posY mean: "+str(posY.mean())
print "posY std: "+str(posY.std())
print
# n, bins, patches=plt.hist(posY,bins=30)
# plt.title("Muon Position (cm)")
# plt.show()
#############logRat hist***********
print "log ratio"
print "logRat mean: "+str(logRat.mean())
print "logRat std: "+str(logRat.std())
print
# n, bins, patches=plt.hist(logRat,bins=30)
# plt.title("log ratio of signals")
# plt.show()
#############energy hist***********
print "energy "
print "energy mean: "+str(energy.mean())
print "energy std: "+str(energy.std())
print
# plt.show()
mu , sigma=norm.fit(eList)
n, bins, patches=plt.hist(energy,normed=True,bins=100,range=[0,500])
gaus = mlab.normpdf( bins, mu, sigma)
plt.title("Energy (Mev)")
plt.plot(bins, gaus, 'r--', linewidth=2)
plt.show()
print "mu "+str(mu)
print "mean "+str(energy.mean())
print "sigma "+ str(sigma)
print
print "###################################"
print
print
print "###################################"
print "Cut calculation"
#################cuts#########################
delList=[]
slope, intercept, r_value, p_value, std_err = stats.linregress(time,logRat)
print "std err"+str(stdErr)
for i in xrange(0,len(ratList)):
rat=ratList[i]
t=time[i]
e=eList[i]
ratEst=slope*t+intercept
eCut=e>75
ratCut=abs(rat-ratEst)>1*stdErr
cutList=[ratCut,eCut]
for cut in cutList:
if cut :
if i not in delList:
delList.append(i)
print "deleted events "+str(len(delList))
for i in reversed(delList):
del leftList[i]
del rightList[i]
del ratList[i]
del totalList[i]
del px[i]
del py[i]
del pz[i]
del px0[i]
del py0[i]
del pz0[i]
del px1[i]
del py1[i]
del pz1[i]
del dx[i]
del dy[i]
del dz[i]
del tim[i]
del volList[i]
del eList[i]
posBin=[]
for i in xrange(0,100):
posBin.append([])
for i in xrange(0,len(totalList)):
y=py[i]+500
binNum=y/10
posBin[int(binNum)].append(totalList[i])
sTotal=[]
yIndex=[]
min=time.min()
max=time.max()
interval=max-min
for i in xrange(0,100):
pb= posBin[i]
sTotal.append(np.array(pb).mean())
yIndex.append(min+i*interval/100)
print "number of muon events cut "+str(len(tim))
####################Slope Est##########################
print "Slope Est"
time=np.array(tim)
posX=np.array(px)
posY=np.array(py)
posZ=np.array(pz)
energy=np.array(eList)
logRat=np.array(ratList)
slope, intercept, r_value, p_value, std_err = stats.linregress(time,posY)
dY=sqrt(12*posY.var())
dt=sqrt(12*time.var())
print "b-a "+str(dt)
print "std "+str(time.std())
print "dY "+str(dY)
print "std "+str(posY.std())
est=-dY/dt
print "slope est "+str(est)
slopeList.append(-slope)
bias=100*(slope-est)/slope
print "bias "+str(bias)
minT=time.min()
maxT=time.max()
ran=10
minT=-ran
maxT=ran
tRange=range(int(minT),int(maxT))
posRange=[]
posRangeEst=[]
t0=time.mean()
for i in tRange:
posRange.append(slope*float(i)+intercept)
posRangeEst.append(est*(float(i)-t0))
####################Time v Pos##########################
print
print "time v pos"
print "slope "+str(slope)
print "intercept "+str(intercept)
print "r "+str(r_value)
print "p "+str(p_value)
print "std "+str(std_err)
print "dt mean " +str(time.mean())
print "dt std " +str(time.std())
print
# plt.plot(time,py,'.')
# plt.plot(tRange,posRange,'o',color='r',)
# plt.plot(tRange,posRangeEst,'*',color='c')
# plt.ylabel("postion along the cell(mm)")
# plt.xlabel("delta time(ns)")
# plt.title("Postion along the Cell vs Time")
# plt.show()
####################Pos V Rat###########################
print "pos v rat"
slope, intercept, r_value, p_value, std_err = stats.linregress(logRat,posY)
print "slope "+str(slope)
print "intercept "+str(intercept)
print "r "+str(r_value)
print "p "+str(p_value)
print "std "+str(std_err)
# plt.plot(logRat,posY,'.')
# plt.ylabel("postion along the cell(mm)")
# plt.xlabel("log ratio of signals")
# plt.title("log10(S0/S1) vs Pos")
# plt.show()
print
####################Time v Rat##########################
print "time v rat"
slope, intercept, r_value, p_value, std_err = stats.linregress(time,logRat)
print "slope "+str(slope)
print "intercept "+str(intercept)
print "r "+str(r_value)
print "p "+str(p_value)
print "std "+str(std_err)
# plt.plot(time,logRat,'.')
# plt.ylabel("log ratio of signals")
# plt.xlabel("delta time(ns)")
# plt.title("log10(S0/S1) vs Time")
# plt.show()
print
#############time hist***********
print "time "
print "time mean: "+str(time.mean())
print "time std: "+str(time.std())
# n, bins, patches=plt.hist(time,bins=31,range=[-15,15])
# plt.title("Delta Time (ns)")
# plt.show()
print
############# posY hist***********
print "postion"
print "posY mean: "+str(posY.mean())
print "posY std: "+str(posY.std())
print
# n, bins, patches=plt.hist(posY,bins=30)
# plt.title("Muon Position (cm)")
# plt.show()
#############logRat hist***********
print "log ratio"
print "logRat mean: "+str(logRat.mean())
print "logRat std: "+str(logRat.std())
print
# n, bins, patches=plt.hist(logRat,bins=30)
# plt.title("log ratio of signals")
# plt.show()
#############energy hist***********
print "energy "
print "energy mean: "+str(energy.mean())
print "energy std: "+str(energy.std())
print
n, bins, patches=plt.hist(energy,normed=True,bins=100,range=[0,500])
plt.title("Energy (Mev)")
mu , sigma=norm.fit(eList)
gaus = mlab.normpdf( bins, mu, sigma)
plt.plot(bins, gaus, 'r--', linewidth=2)
print "mu "+str(mu)
print "mean "+str(energy.mean())
print "sigma "+ str(sigma)
plt.show()
print
|
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 20 20:39:09 2015
@author: lenovo
"""
'''
题目内容:
一个斐波那契数列的前10项为:1, 2, 3, 5, 8, 13, 21, 34, 55, 89,
对于一个最大项的值不超过n的斐波那契数列,求值为偶数的项的和。
输入格式:
一个正整数n,如100。
输出格式:
值为偶数的项的和,如 2 + 8 + 34 = 44。
输入样例:
100
输出样例:
44
'''
#n = int(raw_input())
#count = 0
#if n < 2:
# pass
#elif n == 2:
# count = 2
#else:
# count = 2
# n1 = 1
# n2 = 2
# fi = n1 + n2
# while fi < n:
# fi = n1 + n2
# if fi >n:
# break
# else:
# if fi % 2 == 0:
# count += fi
# n1 = n2
# n2 = fi
#print count
'''
题目内容:
若已知1800年1月1日为星期3,
则对于一个给定的年份和月份,输出这个月的最后一天是星期几。
输入格式:
两行整数,分别代表年份和月份
输出格式:
星期数,0代表星期日
输入样例:
2033
12
输出样例:
6
'''
#x = int(raw_input())
#y = int(raw_input())
#n = x - 1800
#days =0
#while n >= 0:
# if x % 4 == 0 and x % 100 != 0 or x % 400 ==0:
# days += 366#x为闰年
# else:
# days += 365
# n = n - 1
#while y > 0:
# if y == 1 or y == 3 or y==5 or y ==7 or y == 8 or y == 10 or y == 12:
# days += 31
# elif y == 4 or y == 6 or y == 9 or y == 11:
# days += 30
# else:
# if x % 4 == 0 and x % 100 != 0 or x % 400 ==0:
# days += 29
# else:
# days += 28
# y = y - 1
#s = (days + 2) % 7
#print s
'''
题目内容:
如在汉诺塔游戏中,我们希望将塔A上的n个盘子,通过塔B移动到塔C,
则对于任意输入的n,给出移动的步骤。
输入格式:
一个正整数n
输出格式:
移动的步骤
输入样例:
2
输出样例:
Move 1 from A to B
Move 2 from A to C
Move 1 from B to C
'''
x =int(raw_input())
def hanoi(n,A,B,C):
if n == 1:
print "Move disk",n,"from",A,"to",C
else:
hanoi(n-1,A,C,B)
print "Move disk",n,"from",A,"to",C
hanoi(n-1,B,A,C)
hanoi(x,'A','B','C') |
# -*- coding: utf-8 -*-
import argparse
import logging
import random
import sys
import time
from enum import Enum
from enum import unique
from pathlib import Path
import requests
@unique
class DeviceBrand(Enum):
Samsung = 'Samsung'
Google = 'Google'
OnePlus = 'OnePlus'
Xiaomi = 'Xiaomi'
Vivo = 'Vivo'
Oppo = 'Oppo'
Motorola = 'Motorola'
Huawei = 'Huawei'
SansungTablet = 'Samsung Galaxy Tab'
@staticmethod
def to_brand(brand_str: str):
for brand in DeviceBrand:
if brand_str == brand.value:
return brand
raise Exception(f'Unknown brand {brand_str}')
@unique
class TestState(Enum):
Running = 'running'
Failed = 'failed'
Error = 'error'
TimedOut = 'timed out'
Passed = 'passed'
Queued = 'queued'
Skipped = 'skipped'
def to_state(state_str: str):
if not state_str:
return TestState.Running
state_str = state_str.lower()
for state in TestState:
if state.value == state_str:
return state
raise Exception(f'Unknown test state {state_str}')
def is_complete(self):
if self == TestState.Failed:
return True
if self == TestState.Error:
return True
if self == TestState.TimedOut:
return True
if self == TestState.Passed:
return True
if self == TestState.Skipped:
return True
return False
def is_success(self):
if self == TestState.Passed:
return True
if self == TestState.Skipped:
return True
return False
API_MAP = {
'13.0': 33,
'12.0': 31,
'11.0': 30,
'10.0': 29,
'9.0': 28,
'8.1': 27,
'8.0': 26,
'7.1': 25,
'7.1.1': 25,
'7.0': 24,
'6.0': 23,
'5.1': 22,
'5.0': 21
}
MAX_PARALLEL = 1
BUILD_QUERY_INTERVAL = 15
TEST_PROJECT = 'fakelinker'
DEVICE_LOG_ENABLE = True
def api_string_to_level(api_str) -> int:
return API_MAP.get(api_str, 0)
class DeviceInfo:
def __init__(self, device: str) -> None:
strs = device.split(' ')
self.brand = DeviceBrand.to_brand(strs[0])
self.name = device
self.api = api_string_to_level(strs[-1].split('-')[-1])
def is_device(self, name, version):
return self.name == f'{name}-{version}'
def __eq__(self, other):
if isinstance(other, DeviceInfo):
return self.name == other.name
return False
def __hash__(self):
return self.name.__hash__()
class BrowserStackDevice:
default_device = DeviceInfo('Google Pixel 7 Pro-13.0')
def __init__(self) -> None:
self.android_13 = []
self.android_13.append(DeviceInfo('Google Pixel 7 Pro-13.0'))
self.android_13.append(DeviceInfo('Google Pixel 7-13.0'))
self.android_13.append(DeviceInfo('Google Pixel 6 Pro-13.0'))
self.android_12 = []
self.android_12.append(DeviceInfo('Samsung Galaxy S22 Ultra-12.0'))
self.android_12.append(DeviceInfo('Samsung Galaxy S22 Plus-12.0'))
self.android_12.append(DeviceInfo('Samsung Galaxy S22-12.0'))
self.android_12.append(DeviceInfo('Samsung Galaxy S21-12.0'))
self.android_12.append(DeviceInfo('Google Pixel 6 Pro-12.0'))
self.android_12.append(DeviceInfo('Google Pixel 6-12.0'))
self.android_12.append(DeviceInfo('Google Pixel 5-12.0'))
self.android_12.append(DeviceInfo('Samsung Galaxy Tab S8-12.0'))
self.android_11 = []
self.android_11.append(DeviceInfo('Samsung Galaxy S21 Ultra-11.0'))
self.android_11.append(DeviceInfo('Samsung Galaxy S21-11.0'))
self.android_11.append(DeviceInfo('Samsung Galaxy S21 Plus-11.0'))
self.android_11.append(DeviceInfo('Samsung Galaxy M52-11.0'))
self.android_11.append(DeviceInfo('Samsung Galaxy M32-11.0'))
self.android_11.append(DeviceInfo('Samsung Galaxy A52-11.0'))
self.android_11.append(DeviceInfo('Google Pixel 5-11.0'))
self.android_11.append(DeviceInfo('Google Pixel 4-11.0'))
self.android_11.append(DeviceInfo('Xiaomi Redmi Note 11-11.0'))
self.android_11.append(DeviceInfo('Vivo Y21-11.0'))
self.android_11.append(DeviceInfo('Vivo V21-11.0'))
self.android_11.append(DeviceInfo('Oppo Reno 6-11.0'))
self.android_11.append(DeviceInfo('Oppo A96-11.0'))
self.android_11.append(DeviceInfo('Motorola Moto G71 5G-11.0'))
self.android_11.append(DeviceInfo('Samsung Galaxy Tab S7-11.0'))
self.android_10 = []
self.android_10.append(DeviceInfo('Samsung Galaxy S20-10.0'))
self.android_10.append(DeviceInfo('Samsung Galaxy S20 Plus-10.0'))
self.android_10.append(DeviceInfo('Samsung Galaxy S20 Ultra-10.0'))
self.android_10.append(DeviceInfo('Samsung Galaxy Note 20 Ultra-10.0'))
self.android_10.append(DeviceInfo('Samsung Galaxy Note 20-10.0'))
self.android_10.append(DeviceInfo('Samsung Galaxy A51-10.0'))
self.android_10.append(DeviceInfo('Samsung Galaxy A11-10.0'))
self.android_10.append(DeviceInfo('Google Pixel 4 XL-10.0'))
self.android_10.append(DeviceInfo('Google Pixel 4-10.0'))
self.android_10.append(DeviceInfo('Google Pixel 3-10.0'))
self.android_10.append(DeviceInfo('OnePlus 8-10.0'))
self.android_10.append(DeviceInfo('OnePlus 7T-10.0'))
self.android_10.append(DeviceInfo('Xiaomi Redmi Note 9-10.0'))
self.android_10.append(DeviceInfo('Oppo Reno 3 Pro-10.0'))
self.android_10.append(DeviceInfo('Motorola Moto G9 Play-10.0'))
self.android_10.append(DeviceInfo('Samsung Galaxy Tab S7-10.0'))
self.android_9 = []
self.android_9.append(DeviceInfo('Samsung Galaxy S9 Plus-9.0'))
self.android_9.append(DeviceInfo('Samsung Galaxy S10e-9.0'))
self.android_9.append(DeviceInfo('Samsung Galaxy S10 Plus-9.0'))
self.android_9.append(DeviceInfo('Samsung Galaxy S10-9.0'))
self.android_9.append(DeviceInfo('Samsung Galaxy Note 10 Plus-9.0'))
self.android_9.append(DeviceInfo('Samsung Galaxy Note 10-9.0'))
self.android_9.append(DeviceInfo('Samsung Galaxy A10-9.0'))
self.android_9.append(DeviceInfo('Google Pixel 3a XL-9.0'))
self.android_9.append(DeviceInfo('Google Pixel 3a-9.0'))
self.android_9.append(DeviceInfo('Google Pixel 3 XL-9.0'))
self.android_9.append(DeviceInfo('Google Pixel 3-9.0'))
self.android_9.append(DeviceInfo('Google Pixel 2-9.0'))
self.android_9.append(DeviceInfo('OnePlus 7-9.0'))
self.android_9.append(DeviceInfo('OnePlus 6T-9.0'))
self.android_9.append(DeviceInfo('Xiaomi Redmi Note 8-9.0'))
self.android_9.append(DeviceInfo('Xiaomi Redmi Note 7-9.0'))
self.android_9.append(DeviceInfo('Motorola Moto G7 Play-9.0'))
self.android_9.append(DeviceInfo('Huawei P30-9.0'))
self.android_9.append(DeviceInfo('Samsung Galaxy Tab S6-9.0'))
self.android_9.append(DeviceInfo('Samsung Galaxy Tab S5e-9.0'))
self.android_8_1 = []
self.android_8_1.append(DeviceInfo('Samsung Galaxy Note 9-8.1'))
self.android_8_1.append(DeviceInfo('Samsung Galaxy J7 Prime-8.1'))
self.android_8_1.append(DeviceInfo('Samsung Galaxy Tab S4-8.1'))
self.android_8_0 = []
self.android_8_0.append(DeviceInfo('Samsung Galaxy S9 Plus-8.0'))
self.android_8_0.append(DeviceInfo('Samsung Galaxy S9-8.0'))
self.android_8_0.append(DeviceInfo('Google Pixel 2-8.0'))
self.android_7_1 = []
self.android_7_1.append(DeviceInfo('Samsung Galaxy Note 8-7.1'))
self.android_7_1.append(DeviceInfo('Samsung Galaxy A8-7.1'))
self.android_7_1.append(DeviceInfo('Google Pixel-7.1'))
self.android_7_0 = []
self.android_7_0.append(DeviceInfo('Samsung Galaxy S8 Plus-7.0'))
self.android_7_0.append(DeviceInfo('Samsung Galaxy S8-7.0'))
self.android_6 = []
self.android_6.append(DeviceInfo('Samsung Galaxy S7-6.0'))
self.android_6.append(DeviceInfo('Google Nexus 6-6.0'))
self.android_5_1 = []
self.android_5_0 = []
self.android_5_0.append(DeviceInfo('Samsung Galaxy S6-5.0'))
self.devices = {
33: self.android_13,
32: self.android_12,
31: self.android_12,
30: self.android_11,
29: self.android_10,
28: self.android_9,
27: self.android_8_1,
26: self.android_8_0,
25: self.android_7_1,
24: self.android_7_0,
23: self.android_6,
22: self.android_5_1,
21: self.android_5_0
}
def random_device(self, api_level=33):
devices = self.devices.get(api_level, self.android_13)
return random.choice(devices)
def random_devices(self, apis: list[int] = [33]):
result = set()
for level in apis:
devices = self.devices.get(level)
if devices:
result.add(random.choice(devices))
return list(result)
def find_device(self, info: DeviceInfo) -> DeviceInfo:
devices: list[DeviceInfo] = self.devices.get(info.api, [])
if devices.count(info) != 0:
return info
return None
class BrowserStackResponse:
def __init__(self, res: requests.Response) -> None:
self.res = res
if not res.encoding:
res.encoding = 'utf-8'
try:
self.value: dict = res.json()
except requests.exceptions.JSONDecodeError as e:
logging.error('The request returns a non-json format %s', e)
self.value = res.content
if not res.ok:
logging.error('request error: %s', self.content)
raise requests.exceptions.RequestException(
f'The request returned an error code: {res.status_code}')
logging.debug('request url:%s, response:\n%s', res.url, self.value)
def parse_value(self, name: str, default_value=None):
return self.value.get(name, default_value)
class BrowserStackBean:
def __init__(self, res: BrowserStackResponse | dict) -> None:
if isinstance(res, BrowserStackResponse):
self.data = res.value
else:
self.data = res
def parse_value(self, name: str, default_value=None):
return self.data.get(name, default_value)
def parse_time(self, name: str):
value = self.parse_value(name)
if value:
try:
s_time = time.strptime(value, '%Y-%m-%d %H:%M:%S %Z')
except ValueError:
s_time = time.strptime(value, '%Y-%m-%d %H:%M:%S %z')
return int(time.mktime(s_time))
return int(time.time())
def parse_state(self, name: str):
val = self.parse_value(name)
if not val:
return TestState.Running
return TestState.to_state(val)
def parse_int(self, name: str, default_value=-1):
val = self.parse_value(name, default_value)
if not val:
return default_value
return int(val)
class AppBean(BrowserStackBean):
def __init__(self, res) -> None:
super().__init__(res)
self.app_name = self.parse_value('app_name')
self.app_url = self.parse_value('app_url')
self.app_version = self.parse_value('app_version')
self.app_id = self.parse_value('app_id')
self.uploaded_at = self.parse_time('uploaded_at')
self.custom_id = self.parse_value('custom_id')
self.shareable_id = self.parse_value('shareable_id')
self.expiry = self.parse_time('expiry')
def get_app_url(self):
if self.custom_id:
return self.custom_id
if self.shareable_id:
return self.shareable_id
return self.app_url
class TestSuiteBean(BrowserStackBean):
def __init__(self, res) -> None:
super().__init__(res)
self.test_suite_name = self.parse_value('test_suite_name')
self.test_suite_url = self.parse_value('test_suite_url')
self.test_suite_id = self.parse_value('test_suite_id')
self.uploaded_at = self.parse_time('uploaded_at')
self.custom_id = self.parse_value('custom_id')
self.shareable_id = self.parse_value('shareable_id')
self.framework = self.parse_value('framework')
self.expiry = self.parse_time('expiry')
def get_test_suite_url(self):
if self.custom_id:
return self.custom_id
if self.shareable_id:
return self.shareable_id
return self.test_suite_url
class SessionBean(BrowserStackBean):
def __init__(self, res) -> None:
super().__init__(res)
self.id = self.parse_value('id')
self.status = self.parse_state('status')
self.start_time = self.parse_time('start_time')
self.duration = self.parse_int('duration')
self.testcases = self.parse_value('testcases')
def is_success(self):
return self.status.is_success()
class BuildBean(BrowserStackBean):
def __init__(self, res) -> None:
super().__init__(res)
self.build_id = self.parse_value('build_id')
self.message = self.parse_value('message')
if not self.build_id:
self.id = self.parse_value('id')
self.start_time = self.parse_value('start_time')
self.framework = self.parse_value('framework')
if self.framework:
self.duration = self.parse_int('duration')
self.status = self.parse_state('status')
self.input_capabilities = self.parse_value(
'input_capabilities')
self.start_time = self.parse_time('start_time')
self.app_details = self.parse_value('app_details')
self.test_suite_details = self.parse_value(
'test_suite_details')
self.devices: list = self.parse_value('devices')
def parse_session(self, device: DeviceInfo = None) -> list[SessionBean]:
if not self.devices or len(self.devices) == 0:
return []
device_info: dict = None
for info in self.devices:
if not device or device.is_device(info.get('device'), info.get('os_version')):
device_info = info
break
if not device_info:
return []
return [SessionBean(session) for session in device_info.get('sessions')]
def parse_failed_device(self) -> list[DeviceInfo]:
result = []
for info in self.devices:
name = info.get('device')
version = info.get('os_version')
for session_info in info.get('sessions'):
if not SessionBean(session_info).is_success():
result.append(DeviceInfo(f'{name}-{version}'))
break
return result
class BrowserStack:
_cloud_api_url = 'https://api-cloud.browserstack.com/'
def __init__(self, auth: str) -> None:
ss = auth.split(':')
self.user = ss[0]
self.password = ss[1]
def post(self, url: str, files: dict = None, json_data: dict = None, headers: dict = None):
res = requests.post(BrowserStack._cloud_api_url + url, auth=(self.user, self.password),
json=json_data, headers=headers, files=files)
logging.debug(
f'post request url: {url}\n\tfiles: {files}\n\tjson content: {json_data}\n\theaders:{headers}')
return BrowserStackResponse(res)
def get(self, url: str, app_id: str = '', test_suite_id: str = '', build_id: str = '', session_id: str = ''):
url = BrowserStack._cloud_api_url + url
url = url.format(app_id=app_id, test_suite_id=test_suite_id,
build_id=build_id, session_id=session_id)
logging.debug(f'get request url:{url}')
return BrowserStackResponse(requests.get(url, auth=(self.user, self.password)))
def delete(self, url: str, app_id: str = '', test_suite_id: str = '', build_id: str = '', session_id: str = ''):
url = BrowserStack._cloud_api_url + url
url = url.format(app_id=app_id, test_suite_id=test_suite_id,
build_id=build_id, session_id=session_id)
logging.debug(f'delete request url:{url}')
return BrowserStackResponse(requests.delete(url, auth=(self.user, self.password)))
class BrowserStackApp(BrowserStack):
# POST
_upload_app_url = 'app-automate/espresso/v2/app'
# GET
_list_upload_app_url = 'app-automate/espresso/v2/apps'
# GET
_app_detail_get_url = 'app-automate/espresso/v2/apps/{app_id}'
# DELETE
_delete_app_url = 'app-automate/espresso/v2/apps/{app_id}'
def __init__(self, auth: str, app_id=None, custom_id=None) -> None:
super().__init__(auth)
self.app_id = app_id
self.custom_id = custom_id
def _get_app_id(self, aid):
app_id = aid if aid else self.app_id
if isinstance(app_id, AppBean):
return app_id.app_id
return app_id
def _get_custom_id(self, custom_id):
id = custom_id if custom_id else self.custom_id
if isinstance(id, AppBean):
return id.custom_id
return id
def update_app(self, file: Path, custom_id: AppBean | str = None) -> AppBean:
if not file.is_file():
raise FileExistsError(f'update file not exist {file}')
files = {
'file': file.open('rb')
}
custom_id = self._get_custom_id(custom_id)
if custom_id:
files['custom_id'] = (None, custom_id)
return AppBean(self.post(self._upload_app_url, files=files))
def list_upload_app(self) -> list[AppBean]:
res = self.get(self._list_upload_app_url)
return [AppBean(data) for data in res.parse_value('apps')]
def get_last_uplad_app(self) -> AppBean:
apps = self.list_upload_app()
if apps:
return apps[0]
return None
def find_custom_id_app(self, custom_id: str) -> AppBean:
result = None
for app in self.list_upload_app():
if app.custom_id == custom_id:
if not result or app.uploaded_at > result.uploaded_at:
result = app
return app
def get_app_details(self, app_id: AppBean | str = None) -> AppBean:
res = self.get(self._app_detail_get_url,
app_id=self._get_app_id(app_id))
return AppBean(res.parse_value('app'))
def delete_app(self, app_id: AppBean | str = None) -> bool:
try:
res = self.delete(self._delete_app_url,
app_id=self._get_app_id(app_id))
return res.parse_value('success') != None
except requests.exceptions.RequestException as e:
logging.error('Delete app failed app_id: %s, error: %s', app_id, e)
return False
def delete_recent_app(self):
for app in self.list_upload_app():
logging.info(f'delete recent app %s result: %s',
app.app_id, self.delete_app(app))
class BrowserStackTestSuite(BrowserStack):
# POST
_upload_test_suite_url = 'app-automate/espresso/v2/test-suite'
# GET
_list_test_suites_url = 'app-automate/espresso/v2/test-suites'
# GET
_test_suite_get_url = 'app-automate/espresso/v2/test-suites/{test_suite_id}'
# DELETE
_delete_test_suite_url = 'app-automate/espresso/v2/test-suites/{test_suite_id}'
def __init__(self, auth: str, test_suite_id=None, custom_id=None) -> None:
super().__init__(auth)
self.test_suite_id = test_suite_id
self.custom_id = custom_id
def _get_test_suite_id(self, tid):
suite = tid if tid else self.test_suite_id
if isinstance(suite, TestSuiteBean):
return suite.test_suite_id
return suite
def _get_custom_id(self, custom_id):
id = custom_id if custom_id else self.custom_id
if isinstance(id, TestSuiteBean):
return id.custom_id
return id
def upload_test_suite(self, file: Path, custom_id: TestSuiteBean | str = None):
if not file.is_file():
raise FileExistsError(f'update file not exist {file}')
files = {
'file': file.open('rb')
}
custom_id = self._get_custom_id(custom_id)
if custom_id:
files['custom_id'] = (None, custom_id)
return TestSuiteBean(self.post(self._upload_test_suite_url, files=files))
def list_test_suites(self) -> list[TestSuiteBean]:
res = self.get(self._list_test_suites_url)
return [TestSuiteBean(suite) for suite in res.parse_value('test_suites')]
def get_last_test_suite(self) -> TestSuiteBean:
suites = self.list_test_suites()
if len(suites) > 0:
return suites[0]
return None
def find_custom_id_test_suite(self, custom_id) -> TestSuiteBean:
result = None
for suite in self.list_test_suites():
if suite.custom_id == custom_id:
if not result or suite.uploaded_at > result.uploaded_at:
result = suite
return result
def get_test_suite_details(self, test_suite_id: TestSuiteBean | str = None) -> TestSuiteBean:
res = self.get(self._test_suite_get_url,
test_suite_id=self._get_test_suite_id(test_suite_id))
return TestSuiteBean(res.parse_value('test_suite'))
def delete_test_suite(self, test_suite_id: TestSuiteBean | str = None) -> bool:
try:
res = self.delete(self._delete_test_suite_url,
test_suite_id=self._get_test_suite_id(test_suite_id))
return res.parse_value('success') != None
except requests.exceptions.RequestException as e:
logging.error(
'Delete test suite failed id: %s, error: %s', test_suite_id, e)
return False
def delete_recent_test_suite(self):
for suite in self.list_test_suites():
logging.info('delete test suite %s result %s',
suite.test_suite_id, self.delete_test_suite(suite))
class BrowserStackBuild(BrowserStack):
# POST
_espresso_build_url = 'app-automate/espresso/v2/build'
# GET
_build_state_get_url = 'app-automate/espresso/v2/builds/{build_id}'
# GET
_list_recent_builds_url = 'app-automate/espresso/v2/builds'
def __init__(self, auth: str, build_id=None, app_url=None, test_suite_url=None, devices: list[DeviceInfo] = []) -> None:
super().__init__(auth)
self.build_id = build_id
self.test_suite_url = test_suite_url
self.app_url = app_url
self.devices = []
def _get_build_id(self, bid):
build = bid if bid else self.build_id
if isinstance(build, BuildBean):
return build.build_id if build.build_id else build.id
return build
def _get_app_url(self, url):
app = url if url else self.app_url
if isinstance(app, AppBean):
return app.get_app_url()
return app
def _get_test_suite_url(self, url):
suite = url if url else self.test_suite_url
if isinstance(suite, TestSuiteBean):
return suite.get_test_suite_url()
return suite
def _get_devices(self, ds: list[DeviceInfo]):
device_list = ds
if not ds or len(ds) == 0:
device_list = self.devices
return [device.name for device in device_list]
def espresso_build(self, app_url: AppBean | str = None, test_suite_url: TestSuiteBean | str = None, devices: list[DeviceInfo] = [BrowserStackDevice.default_device]) -> BuildBean:
data = {
'app': self._get_app_url(app_url),
'testSuite': self._get_test_suite_url(test_suite_url),
'devices': self._get_devices(devices)
}
if TEST_PROJECT:
data['project'] = TEST_PROJECT
if DEVICE_LOG_ENABLE:
data['deviceLogs'] = True
return BuildBean(self.post(self._espresso_build_url, json_data=data))
def get_build_state(self, build_id: BuildBean | str = None) -> BuildBean:
return BuildBean(self.get(self._build_state_get_url, build_id=self._get_build_id(build_id)))
def list_recent_builds(self) -> list[BuildBean]:
res = self.get(self._list_recent_builds_url)
return [BuildBean(build) for build in res.value]
def get_last_build_task(self) -> BuildBean:
tasks = self.list_recent_builds()
if len(tasks) > 0:
return tasks[0]
return None
@staticmethod
def split_device_chunks(devices, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(devices), n):
yield devices[i:i + n]
def build_device_test(self, app_url: AppBean | str = None, test_suite_url: TestSuiteBean | str = None,
devices: list[DeviceInfo] = BrowserStackDevice.default_device) -> list[BuildBean]:
build_result = []
for split_devices in BrowserStackBuild.split_device_chunks(devices, MAX_PARALLEL * 2):
build_bean = self.espresso_build(
app_url, test_suite_url, split_devices)
build_state = None
while True:
build_state = self.get_build_state(build_bean)
if build_state.status.is_complete():
break
logging.info('The test task is being executed: %s',
build_state.status)
time.sleep(BUILD_QUERY_INTERVAL)
if build_state.status.is_success():
logging.info('test app all pass')
else:
logging.info('test app exist error')
build_result.append(build_state)
return build_result
def get_build_test(self, build_bean: BuildBean):
build_state = None
while True:
build_state = self.get_build_state(build_bean)
if build_state.status.is_complete():
break
logging.info('The test task is being executed: %s',
build_state.status)
time.sleep(BUILD_QUERY_INTERVAL)
if build_state.status.is_success():
logging.info('test app all pass')
else:
logging.info('test app exist error')
return build_state
class BrowserStackSession(BrowserStack):
# GET
_session_details_get_url = 'app-automate/espresso/v2/builds/{build_id}/sessions/{session_id}'
# GET
_juint_report_get_url = 'app-automate/espresso/v2/builds/{build_id}/sessions/{session_id}/report'
# GET
_code_coverage_get_url = 'app-automate/espresso/v2/builds/{build_id}/sessions/{session_id}/coverage'
def __init__(self, auth: str, build_id=None, session_id=None) -> None:
super().__init__(auth)
self.build_id: str = build_id
self.session_id: str = session_id
def _get_session_id(self, sid):
session = sid if sid else self.session_id
if isinstance(session, SessionBean):
return session.id
return session
def _get_build_id(self, bid):
build = bid if bid else self.build_id
if isinstance(build, BuildBean):
return build.build_id if build.build_id else build.id
return build
def _get(self, url, build_id=None, session_id=None):
return self.get(url, build_id=self._get_build_id(build_id), session_id=self._get_session_id(session_id))
def get_session_details(self, build_id: BuildBean | str = None, session_id: SessionBean | str = None):
res = self._get(self._session_details_get_url, build_id, session_id)
return SessionBean(res)
def get_juint_report(self, build_id: BuildBean | str = None, session_id: SessionBean | str = None) -> str:
return self._get(self._juint_report_get_url, build_id, session_id)
def get_code_coverage(self, build_id: BuildBean | str = None, session_id: SessionBean | str = None):
return self._get(self._code_coverage_get_url, build_id, session_id)
def get_last_build_test_task(user: str) -> BuildBean:
try:
build = BrowserStackBuild(user)
task = build.get_last_build_task()
if not task:
logging.error(
'There is no build test task, please create it and try again')
return 12
return build.get_build_test(task)
except requests.exceptions.RequestException as e:
logging.error('get last recent build info error: %s', e)
raise e
def execute_remove_command(args):
if args.apk:
app = BrowserStackApp(args.user)
app.delete_recent_app()
if args.suite:
suite = BrowserStackTestSuite(args.user)
suite.delete_recent_test_suite()
return 0
def parse_test_devices(api: list[int], all_api: bool, names: list[str]) -> list[DeviceInfo]:
result = []
device = BrowserStackDevice()
if names:
devices = set()
for name in names:
info = device.find_device(DeviceInfo(name))
if info:
devices.add(info)
else:
logging.error(
'The specified device name was not found: %s', name)
result = list(devices)
elif all_api:
api = [33, 32, 31, 30, 29, 28, 27, 26, 25, 25, 24, 23, 22, 21]
result = device.random_devices(api)
else:
result = device.random_devices(api)
return result
def parse_test_app(user: str, custom_id: str, apk: Path) -> AppBean:
app = BrowserStackApp(user)
if apk and apk.is_file():
bean = app.update_app(apk, custom_id)
elif custom_id:
bean = app.find_custom_id_app(custom_id)
else:
bean = app.get_last_uplad_app()
return bean
def parse_test_suite(user: str, custom_id: str, suite_path: Path) -> TestSuiteBean:
suite = BrowserStackTestSuite(user)
if suite_path and suite_path.is_file():
bean = suite.upload_test_suite(suite_path, custom_id)
elif custom_id:
bean = suite.find_custom_id_test_suite(custom_id)
else:
bean = suite.get_last_test_suite()
return bean
def execute_test_command(args):
global MAX_PARALLEL
MAX_PARALLEL = args.max_parallel
global BUILD_QUERY_INTERVAL
BUILD_QUERY_INTERVAL = args.query_interval
if args.is_32bit:
if args.apk_custom_id:
args.apk_custom_id += '32'
if args.project:
args.project += '32'
if args.project:
global TEST_PROJECT
TEST_PROJECT = args.project
global DEVICE_LOG_ENABLE
DEVICE_LOG_ENABLE = args.device_log
if args.get_last_build:
get_last_build_test_task(args.user)
return 0
if args.build_last_faild:
task = get_last_build_test_task(args.user)
test_devices = task.parse_failed_device()
else:
test_devices = parse_test_devices(args.api, args.all_api, args.devices)
if not test_devices:
logging.error('There is no device to test')
return 10
app_bean = parse_test_app(args.user, args.apk_custom_id, args.apk)
if not app_bean:
logging.error(
'The test apk does not exist or the upload path `%s` does not exist, please upload and try again', args.apk)
return 11
suite_bean = parse_test_suite(
args.user, args.test_suite_custom_id, args.test_suite)
if not suite_bean:
logging.error(
'The test suite does not exist or the upload path `%s` does not exist, please upload and try again', args.test_suite)
return 12
try:
build = BrowserStackBuild(args.user)
success = True
for state in build.build_device_test(app_bean, suite_bean, test_devices):
if state.status.is_success():
logging.info('test build %s all passed', state.id)
else:
logging.error('test build %s has errors', state.id)
success = False
return 0 if success else 13
except requests.exceptions.RequestException as e:
logging.error('test app error: %s', e)
raise e
class PathAction(argparse._StoreAction):
def __init__(self, option_strings, dest, must_exist=True, nargs=None, **kwargs) -> None:
self._must_exist = must_exist
super().__init__(option_strings, dest, nargs, **kwargs)
def check_value(self, value, option_string):
path = Path(value).resolve()
if self._must_exist and not path.exists():
name = option_string if option_string else self.dest.upper()
raise argparse.ArgumentError(
self, f'input path does not exist: {name} {path}')
return path
def __call__(self, parser, namespace, values, option_string=None) -> None:
if values == None or len(values) < 1:
raise argparse.ArgumentError(
self, f'The required path parameter does not exist: {option_string}')
if isinstance(values, list):
paths = [self.check_value(x, option_string) for x in values]
else:
paths = self.check_value(values, option_string)
setattr(namespace, self.dest, paths)
class FileAction(PathAction):
def check_value(self, value, option_string):
p = Path(value).resolve()
if (p.exists() and not p.is_file()) or (self._must_exist and not p.is_file()):
name = option_string if option_string else self.dest.upper()
raise argparse.ArgumentError(
self, f'Invalid file path input: {name} {p}')
return p
class DirectoryAction(argparse._StoreAction):
def check_value(self, value, option_string):
p = Path(value).resolve()
if (p.exists() and not p.is_dir()) or (self._must_exist and not p.is_dir()):
name = option_string if option_string else self.dest.upper()
raise argparse.ArgumentError(
self, f'Invalid directory path input: {name} {p}')
return p
class RequiredAction(argparse._StoreAction):
def __init__(self, option_strings, dest, required_actions: list[argparse.Action] = [], nargs='?', **kwargs) -> None:
self._actions = required_actions
super().__init__(option_strings, dest, nargs='?', const=True, **kwargs)
def get_requires(self):
return self._actions
def __call__(self, parser, namespace, values, option_string=None) -> None:
return super().__call__(parser, namespace, values, option_string)
class TrueRequiredAction(RequiredAction):
def __call__(self, parser, namespace, values, option_string=None) -> None:
for action in self.get_requires():
action.required = True
super().__call__(parser, namespace, values, option_string)
class FalseRequiredAction(RequiredAction):
def __call__(self, parser, namespace, values, option_string=None):
for action in self.get_requires():
action.required = False
super().__call__(parser, namespace, values, option_string)
def parse_argument():
parser = argparse.ArgumentParser('fakelinker_browserstack_test', description='Automated testing of the fakelinker project',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--log', help='Show more log',
action='store_true')
subparser = parser.add_subparsers(description='test subcommand')
remove_cmd = subparser.add_parser(
'remove', help='Remove app/test suite/build etc.')
remove_cmd.add_argument(
'-u', '--user', help='Specify BrowserStack access key', required=True)
remove_cmd.add_argument(
'-a', '--apk', help='Remove test apk', action='store_true')
remove_cmd.add_argument(
'-s', '--suite', help='Remove test suite', action='store_true')
remove_cmd.add_argument(
'-b', '--build', help='Remove test build', action='store_true')
remove_cmd.set_defaults(func=execute_remove_command)
test_cmd = subparser.add_parser('test', help='Run test tasks')
test_cmd.add_argument(
'-u', '--user', help='Specify BrowserStack access key', required=True)
test_cmd.add_argument('--apk', help='apk to be tested', action=FileAction)
test_cmd.add_argument(
'--is-32bit', help='Specifies that the test apk is a 32-bit program', action='store_true')
test_cmd.add_argument('--apk-custom-id', help='Specify apk custom id',
type=str, default='FakelinkerTestApp')
test_cmd.add_argument(
'--test-suite', help='specify test suite', action=FileAction)
test_cmd.add_argument('--test-suite-custom-id', help='Specify test suite custom id',
type=str, default='FakelinkerTestSuite')
test_cmd.add_argument(
'--api', help='Specify the api level of the test', type=int, nargs='+', default=33)
test_cmd.add_argument(
'--all-api', help='Execute one test per api level', action='store_true')
test_cmd.add_argument(
'--devices', help='Specify the name of the device to test', type=str, nargs='+')
test_cmd.add_argument(
'--max-parallel', help='The maximum number of parallel tests', type=int, default=5)
test_cmd.add_argument('--query-interval',
help='Specifies the time interval for query build test tasks', type=int, default=15)
test_cmd.add_argument(
'--repeat-last', help='Test again with recent test apk and test suite', action='store_true')
test_cmd.add_argument(
'--get-last-build', help='Get recent build test information', action='store_true')
test_cmd.add_argument('--build-last-faild',
help='Select the device that failed the upload test to test again', action='store_true')
test_cmd.add_argument(
'--project', help='Set test project name', type=str, default='fakelinker')
test_cmd.add_argument(
'--device-log', help='Open test task log', action='store_true')
test_cmd.set_defaults(func=execute_test_command)
args = parser.parse_args()
args.print_help = parser.print_help
if args.log:
logging.basicConfig(level=logging.DEBUG)
return args
def main():
args = parse_argument()
if hasattr(args, 'func'):
sys.exit(args.func(args))
else:
args.print_help()
sys.exit(1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
def bald(s):
states = ["Clean!","Unicorn!","Homer!","Careless!","Careless!","Careless!"]
hairs = s.count('/')
return [s.replace('/','-'), states[hairs] if hairs < 6 else "Hobo!"]
'''
Being a bald man myself, I know the feeling of needing to keep it clean shaven.
Nothing worse that a stray hair waving in the wind.
You will be given a string(x). Clean shaved head is shown as "-" and stray hairs
are shown as "/". Your task is to check the head for stray hairs and get rid of them.
You should return the original string, but with any stray hairs removed.
Keep count of them though, as there is a second element you need to return:
0 hairs --> "Clean!"
1 hair --> "Unicorn!"
2 hairs --> "Homer!"
3-5 hairs --> "Careless!"
>5 hairs --> "Hobo!"
So for this head: "------/------" you should return:
["-------------", "Unicorn!"]
'''
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import ast
import builtins
import itertools
import logging
import os.path
import sys
import typing
from dataclasses import dataclass
from pathlib import PurePath
from typing import Any, Sequence, cast
from pants.build_graph.address import (
Address,
AddressInput,
BuildFileAddress,
BuildFileAddressRequest,
MaybeAddress,
ResolveError,
)
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.env_vars import CompleteEnvironmentVars, EnvironmentVars, EnvironmentVarsRequest
from pants.engine.fs import DigestContents, FileContent, GlobMatchErrorBehavior, PathGlobs, Paths
from pants.engine.internals.defaults import BuildFileDefaults, BuildFileDefaultsParserState
from pants.engine.internals.dep_rules import (
BuildFileDependencyRules,
DependencyRuleApplication,
MaybeBuildFileDependencyRulesImplementation,
)
from pants.engine.internals.mapper import AddressFamily, AddressMap
from pants.engine.internals.parser import (
BuildFilePreludeSymbols,
BuildFileSymbolsInfo,
Parser,
error_on_imports,
)
from pants.engine.internals.session import SessionValues
from pants.engine.internals.synthetic_targets import (
SyntheticAddressMaps,
SyntheticAddressMapsRequest,
)
from pants.engine.internals.target_adaptor import TargetAdaptor, TargetAdaptorRequest
from pants.engine.rules import Get, MultiGet, QueryRule, collect_rules, rule
from pants.engine.target import (
DependenciesRuleApplication,
DependenciesRuleApplicationRequest,
RegisteredTargetTypes,
)
from pants.engine.unions import UnionMembership
from pants.init.bootstrap_scheduler import BootstrapStatus
from pants.option.global_options import GlobalOptions
from pants.util.frozendict import FrozenDict
from pants.util.strutil import softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class BuildFileOptions:
patterns: tuple[str, ...]
ignores: tuple[str, ...] = ()
prelude_globs: tuple[str, ...] = ()
@rule
def extract_build_file_options(
global_options: GlobalOptions,
bootstrap_status: BootstrapStatus,
) -> BuildFileOptions:
return BuildFileOptions(
patterns=global_options.build_patterns,
ignores=global_options.build_ignore,
prelude_globs=(
() if bootstrap_status.in_progress else global_options.build_file_prelude_globs
),
)
@rule(desc="Expand macros")
async def evaluate_preludes(
build_file_options: BuildFileOptions,
parser: Parser,
) -> BuildFilePreludeSymbols:
prelude_digest_contents = await Get(
DigestContents,
PathGlobs(
build_file_options.prelude_globs,
glob_match_error_behavior=GlobMatchErrorBehavior.ignore,
),
)
globals: dict[str, Any] = {
**{name: getattr(builtins, name) for name in dir(builtins) if name.endswith("Error")},
**{name: getattr(typing, name) for name in typing.__all__},
# Ensure the globals for each prelude includes the builtin symbols (E.g. `python_sources`)
# and any build file aliases (e.g. from plugins)
**parser.symbols,
}
locals: dict[str, Any] = {}
env_vars: set[str] = set()
for file_content in prelude_digest_contents:
try:
file_content_str = file_content.content.decode()
content = compile(file_content_str, file_content.path, "exec", dont_inherit=True)
exec(content, globals, locals)
except Exception as e:
raise Exception(f"Error parsing prelude file {file_content.path}: {e}")
error_on_imports(file_content_str, file_content.path)
env_vars.update(BUILDFileEnvVarExtractor.get_env_vars(file_content))
# __builtins__ is a dict, so isn't hashable, and can't be put in a FrozenDict.
# Fortunately, we don't care about it - preludes should not be able to override builtins, so we just pop it out.
# TODO: Give a nice error message if a prelude tries to set a expose a non-hashable value.
locals.pop("__builtins__", None)
# Ensure preludes can reference each other by populating the shared globals object with references
# to the other symbols
globals.update(locals)
return BuildFilePreludeSymbols.create(locals, env_vars)
@rule
async def get_all_build_file_symbols_info(
parser: Parser, prelude_symbols: BuildFilePreludeSymbols
) -> BuildFileSymbolsInfo:
return BuildFileSymbolsInfo.from_info(
parser.symbols_info.info.values(), prelude_symbols.info.values()
)
@rule
async def maybe_resolve_address(address_input: AddressInput) -> MaybeAddress:
# Determine the type of the path_component of the input.
if address_input.path_component:
paths = await Get(Paths, PathGlobs(globs=(address_input.path_component,)))
is_file, is_dir = bool(paths.files), bool(paths.dirs)
else:
# It is an address in the root directory.
is_file, is_dir = False, True
if is_file:
return MaybeAddress(address_input.file_to_address())
if is_dir:
return MaybeAddress(address_input.dir_to_address())
spec = address_input.path_component
if address_input.target_component:
spec += f":{address_input.target_component}"
return MaybeAddress(
ResolveError(
softwrap(
f"""
The file or directory '{address_input.path_component}' does not exist on disk in
the workspace, so the address '{spec}' from {address_input.description_of_origin}
cannot be resolved.
"""
)
)
)
@rule
async def resolve_address(maybe_address: MaybeAddress) -> Address:
if isinstance(maybe_address.val, ResolveError):
raise maybe_address.val
return maybe_address.val
@dataclass(frozen=True)
class AddressFamilyDir(EngineAwareParameter):
"""The directory to find addresses for.
This does _not_ recurse into subdirectories.
"""
path: str
def debug_hint(self) -> str:
return self.path
@dataclass(frozen=True)
class OptionalAddressFamily:
path: str
address_family: AddressFamily | None = None
def ensure(self) -> AddressFamily:
if self.address_family is not None:
return self.address_family
raise ResolveError(f"Directory '{self.path}' does not contain any BUILD files.")
@rule
async def ensure_address_family(request: OptionalAddressFamily) -> AddressFamily:
return request.ensure()
class BUILDFileEnvVarExtractor(ast.NodeVisitor):
def __init__(self, filename: str):
super().__init__()
self.env_vars: set[str] = set()
self.filename = filename
@classmethod
def get_env_vars(cls, file_content: FileContent) -> Sequence[str]:
obj = cls(file_content.path)
obj.visit(ast.parse(file_content.content, file_content.path))
return tuple(obj.env_vars)
def visit_Call(self, node: ast.Call):
is_env = isinstance(node.func, ast.Name) and node.func.id == "env"
for arg in node.args:
if not is_env:
self.visit(arg)
continue
# Only first arg may be checked as env name
is_env = False
if sys.version_info[0:2] < (3, 8):
value = arg.s if isinstance(arg, ast.Str) else None
else:
value = arg.value if isinstance(arg, ast.Constant) else None
if value:
# Found env name in this call, we're done here.
self.env_vars.add(value)
return
else:
logger.warning(
f"{self.filename}:{arg.lineno}: Only constant string values as variable name to "
f"`env()` is currently supported. This `env()` call will always result in "
"the default value only."
)
for kwarg in node.keywords:
self.visit(kwarg)
@rule(desc="Search for addresses in BUILD files")
async def parse_address_family(
parser: Parser,
bootstrap_status: BootstrapStatus,
build_file_options: BuildFileOptions,
prelude_symbols: BuildFilePreludeSymbols,
directory: AddressFamilyDir,
registered_target_types: RegisteredTargetTypes,
union_membership: UnionMembership,
maybe_build_file_dependency_rules_implementation: MaybeBuildFileDependencyRulesImplementation,
session_values: SessionValues,
) -> OptionalAddressFamily:
"""Given an AddressMapper and a directory, return an AddressFamily.
The AddressFamily may be empty, but it will not be None.
"""
digest_contents, all_synthetic_address_maps = await MultiGet(
Get(
DigestContents,
PathGlobs(
globs=(
*(os.path.join(directory.path, p) for p in build_file_options.patterns),
*(f"!{p}" for p in build_file_options.ignores),
)
),
),
Get(SyntheticAddressMaps, SyntheticAddressMapsRequest(directory.path)),
)
synthetic_address_maps = tuple(itertools.chain(all_synthetic_address_maps))
if not digest_contents and not synthetic_address_maps:
return OptionalAddressFamily(directory.path)
defaults = BuildFileDefaults({})
dependents_rules: BuildFileDependencyRules | None = None
dependencies_rules: BuildFileDependencyRules | None = None
parent_dirs = tuple(PurePath(directory.path).parents)
if parent_dirs:
maybe_parents = await MultiGet(
Get(OptionalAddressFamily, AddressFamilyDir(str(parent_dir)))
for parent_dir in parent_dirs
)
for maybe_parent in maybe_parents:
if maybe_parent.address_family is not None:
family = maybe_parent.address_family
defaults = family.defaults
dependents_rules = family.dependents_rules
dependencies_rules = family.dependencies_rules
break
defaults_parser_state = BuildFileDefaultsParserState.create(
directory.path, defaults, registered_target_types, union_membership
)
build_file_dependency_rules_class = (
maybe_build_file_dependency_rules_implementation.build_file_dependency_rules_class
)
if build_file_dependency_rules_class is not None:
dependents_rules_parser_state = build_file_dependency_rules_class.create_parser_state(
directory.path,
dependents_rules,
)
dependencies_rules_parser_state = build_file_dependency_rules_class.create_parser_state(
directory.path,
dependencies_rules,
)
else:
dependents_rules_parser_state = None
dependencies_rules_parser_state = None
def _extract_env_vars(
file_content: FileContent, extra_env: Sequence[str], env: CompleteEnvironmentVars
) -> Get[EnvironmentVars]:
"""For BUILD file env vars, we only ever consult the local systems env."""
env_vars = (*BUILDFileEnvVarExtractor.get_env_vars(file_content), *extra_env)
return Get(
EnvironmentVars,
{
EnvironmentVarsRequest(env_vars): EnvironmentVarsRequest,
env: CompleteEnvironmentVars,
},
)
all_env_vars = await MultiGet(
_extract_env_vars(
fc, prelude_symbols.referenced_env_vars, session_values[CompleteEnvironmentVars]
)
for fc in digest_contents
)
address_maps = [
AddressMap.parse(
fc.path,
fc.content.decode(),
parser,
prelude_symbols,
env_vars,
bootstrap_status.in_progress,
defaults_parser_state,
dependents_rules_parser_state,
dependencies_rules_parser_state,
)
for fc, env_vars in zip(digest_contents, all_env_vars)
]
# Freeze defaults and dependency rules
frozen_defaults = defaults_parser_state.get_frozen_defaults()
frozen_dependents_rules = cast(
"BuildFileDependencyRules | None",
dependents_rules_parser_state
and dependents_rules_parser_state.get_frozen_dependency_rules(),
)
frozen_dependencies_rules = cast(
"BuildFileDependencyRules | None",
dependencies_rules_parser_state
and dependencies_rules_parser_state.get_frozen_dependency_rules(),
)
# Process synthetic targets.
for address_map in address_maps:
for synthetic in synthetic_address_maps:
synthetic.process_declared_targets(address_map)
synthetic.apply_defaults(frozen_defaults)
return OptionalAddressFamily(
directory.path,
AddressFamily.create(
spec_path=directory.path,
address_maps=(*address_maps, *synthetic_address_maps),
defaults=frozen_defaults,
dependents_rules=frozen_dependents_rules,
dependencies_rules=frozen_dependencies_rules,
),
)
@rule
async def find_build_file(request: BuildFileAddressRequest) -> BuildFileAddress:
address = request.address
address_family = await Get(AddressFamily, AddressFamilyDir(address.spec_path))
owning_address = address.maybe_convert_to_target_generator()
if address_family.get_target_adaptor(owning_address) is None:
raise ResolveError.did_you_mean(
owning_address,
description_of_origin=request.description_of_origin,
known_names=address_family.target_names,
namespace=address_family.namespace,
)
bfa = next(
build_file_address
for build_file_address in address_family.build_file_addresses
if build_file_address.address == owning_address
)
return BuildFileAddress(address, bfa.rel_path) if address.is_generated_target else bfa
def _get_target_adaptor(
address: Address, address_family: AddressFamily, description_of_origin: str
) -> TargetAdaptor:
target_adaptor = address_family.get_target_adaptor(address)
if target_adaptor is None:
raise ResolveError.did_you_mean(
address,
description_of_origin=description_of_origin,
known_names=address_family.target_names,
namespace=address_family.namespace,
)
return target_adaptor
@rule
async def find_target_adaptor(request: TargetAdaptorRequest) -> TargetAdaptor:
"""Hydrate a TargetAdaptor so that it may be converted into the Target API."""
address = request.address
if address.is_generated_target:
raise AssertionError(
"Generated targets are not defined in BUILD files, and so do not have "
f"TargetAdaptors: {request}"
)
address_family = await Get(AddressFamily, AddressFamilyDir(address.spec_path))
target_adaptor = _get_target_adaptor(address, address_family, request.description_of_origin)
return target_adaptor
def _rules_path(address: Address) -> str:
if address.is_file_target and os.path.sep in address.relative_file_path: # type: ignore[operator]
# The file is in a subdirectory of spec_path
return os.path.dirname(address.filename)
else:
return address.spec_path
async def _get_target_family_and_adaptor_for_dep_rules(
*addresses: Address, description_of_origin: str
) -> tuple[tuple[AddressFamily, TargetAdaptor], ...]:
# Fetch up to 2 sets of address families per address, as we want the rules from the directory
# the file is in rather than the directory where the target generator was declared, if not the
# same.
rules_paths = set(
itertools.chain.from_iterable(
{address.spec_path, _rules_path(address)} for address in addresses
)
)
maybe_address_families = await MultiGet(
Get(OptionalAddressFamily, AddressFamilyDir(rules_path)) for rules_path in rules_paths
)
maybe_families = {maybe.path: maybe for maybe in maybe_address_families}
return tuple(
(
(
maybe_families[_rules_path(address)].address_family
or maybe_families[address.spec_path].ensure()
),
_get_target_adaptor(
address,
maybe_families[address.spec_path].ensure(),
description_of_origin,
),
)
for address in addresses
)
@rule
async def get_dependencies_rule_application(
request: DependenciesRuleApplicationRequest,
maybe_build_file_rules_implementation: MaybeBuildFileDependencyRulesImplementation,
) -> DependenciesRuleApplication:
build_file_dependency_rules_class = (
maybe_build_file_rules_implementation.build_file_dependency_rules_class
)
if build_file_dependency_rules_class is None:
return DependenciesRuleApplication.allow_all()
(
origin_rules_family,
origin_target,
), *dependencies_family_adaptor = await _get_target_family_and_adaptor_for_dep_rules(
request.address,
*request.dependencies,
description_of_origin=request.description_of_origin,
)
dependencies_rule: dict[Address, DependencyRuleApplication] = {}
for dependency_address, (dependency_rules_family, dependency_target) in zip(
request.dependencies, dependencies_family_adaptor
):
dependencies_rule[
dependency_address
] = build_file_dependency_rules_class.check_dependency_rules(
origin_address=request.address,
origin_adaptor=origin_target,
dependencies_rules=origin_rules_family.dependencies_rules,
dependency_address=dependency_address,
dependency_adaptor=dependency_target,
dependents_rules=dependency_rules_family.dependents_rules,
)
return DependenciesRuleApplication(request.address, FrozenDict(dependencies_rule))
def rules():
return (
*collect_rules(),
# The `BuildFileSymbolsInfo` is consumed by the `HelpInfoExtracter` and uses the scheduler
# session `product_request()` directly so we need an explicit QueryRule to provide this type
# as an valid entrypoint into the rule graph.
QueryRule(BuildFileSymbolsInfo, ()),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.