max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
particle_environments/mager/observation.py | rallen10/ergo_particle_gym | 0 | 12773551 | <reponame>rallen10/ergo_particle_gym
import numpy as np
from copy import deepcopy
from bisect import bisect, insort
from particle_environments.common import distance, delta_pos
from particle_environments.mager.world import SensingLimitedMortalAgent
def format_observation(observe, objects, num_observations, observation_size, sort_key=None):
"""
Formats a list of observations
:param observe: function that computes observation given an object from objects
:param objects: list of objects
:param num_observations: # of observations, truncation or padding will be applied to meet this
:param observation_size: length of observation arrarys, list, tuples
:param sort_key: function of (object, observation) that gives a value to sort by (in ascending order)
:return: a 1-d array of formatted observation values
"""
assert callable(observe)
assert num_observations >= 1
# TODO: handle case where no objects are present (e.g. all other agents terminated)
#assert len(objects) > 0
# sorting
observations = []
if sort_key is None:
for obj in objects:
observation = observe(obj)
if isinstance(observation, (list, tuple)):
assert len(observation) == observation_size
observations += observation
else:
observations.append(observation)
else:
unsorted_observations = []
for obj in objects:
unsorted_observations.append( (obj, observe(obj)) ) # appending tuple
sorted_observations = sorted(unsorted_observations, key=lambda o: sort_key(o[0]))
for obj, observation in sorted_observations:
if isinstance(observation, (list, tuple)):
assert len(observation) == observation_size
observations += observation
else:
observations.append(observation)
# padding/truncation
missing_objects = num_observations - len(objects)
if missing_objects > 0:
# TODO: I think non-communication should send None instead of zero, because zero has real meaning
# however this causes a problem with action function
padding = [0]*observation_size*missing_objects
observations += padding
return observations
else:
return observations[:num_observations*observation_size]
def agent_histogram_observation(cur_agent, agents, obs_distance, n_radial_bins, n_angular_bins):
''' generate observation histogram of agents and list of terminated agents
'''
# generate radial histogram bins based on sensing limitations
bin_depth = obs_distance/10.0
radial_bins = np.logspace(np.log10(bin_depth), np.log10(obs_distance), num=n_radial_bins)
# generate angular histogram bins
bin_angle = 2.0*np.pi/float(n_angular_bins)
angular_bins = np.linspace(bin_angle/2.0, 2*np.pi - bin_angle/2.0, num=n_angular_bins)
agent_histogram_2d = np.array([[0]*n_angular_bins]*n_radial_bins)
# establish observation of failures
observed_terminations_2d = []
observed_terminations_dists = []
# count agents in each bin
for a in agents:
dist = distance(a, cur_agent)
# skip if agent is agent
if a == cur_agent:
continue
# record observed termination
if a.terminated:
insert_index = bisect(observed_terminations_dists, dist)
observed_terminations_dists.insert(insert_index, dist)
observed_terminations_2d.insert(insert_index, delta_pos(a, cur_agent))
continue
# skip if outside of observation range
if dist > obs_distance or (
isinstance(cur_agent, SensingLimitedMortalAgent) and not cur_agent.is_entity_observable(a)):
continue
# find radial bin
rad_bin = np.searchsorted(radial_bins, dist)
# calculate angle
dx, dy = delta_pos(a, cur_agent)
ang = np.arctan2(dy, dx)
if ang < 0:
ang += 2*np.pi
# find angular bin
ang_bin = np.searchsorted(angular_bins, ang)
if ang_bin == n_angular_bins:
ang_bin = 0
# add count to histogram
agent_histogram_2d[rad_bin][ang_bin] = agent_histogram_2d[rad_bin][ang_bin] + 1
return agent_histogram_2d, observed_terminations_2d
def landmark_histogram_observation(cur_agent, landmarks, obs_distance, n_radial_bins, n_angular_bins):
''' generate observation histogram of landmarks and list of hazards
'''
# generate radial histogram bins based on sensing limitations
bin_depth = obs_distance/10.0
radial_bins = np.logspace(np.log10(bin_depth), np.log10(obs_distance), num=n_radial_bins)
# generate angular histogram bins
bin_angle = 2.0*np.pi/float(n_angular_bins)
angular_bins = np.linspace(bin_angle/2.0, 2*np.pi - bin_angle/2.0, num=n_angular_bins)
landmark_histogram_2d = np.array([[0]*n_angular_bins]*n_radial_bins)
# establish observation of failures
observed_hazards_2d = []
observed_hazards_dists = []
# count agents in each bin
for lm in landmarks:
dist = distance(lm, cur_agent)
# check if landmark is giving reward or hazard warning
# NOTE: This modifies the landmarks list
if dist < lm.size:
if lm.is_hazard:
lm.hazard_tag = 1.0
lm.color = np.array([1.1, 0, 0])
# record observed hazard
if lm.hazard_tag > 0.0:
insert_index = bisect(observed_hazards_dists, dist)
observed_hazards_dists.insert(insert_index, dist)
observed_hazards_2d.insert(insert_index, delta_pos(lm, cur_agent))
continue
# skip if outside of observation range
if dist > obs_distance or (
isinstance(cur_agent, SensingLimitedMortalAgent) and not cur_agent.is_entity_observable(lm)):
continue
# find radial bin
rad_bin = np.searchsorted(radial_bins, dist)
# calculate angle
dx, dy = delta_pos(lm, cur_agent)
ang = np.arctan2(dy, dx)
if ang < 0:
ang += 2*np.pi
# find angular bin
ang_bin = np.searchsorted(angular_bins, ang)
if ang_bin == n_angular_bins:
ang_bin = 0
# add count to histogram
landmark_histogram_2d[rad_bin][ang_bin] = landmark_histogram_2d[rad_bin][ang_bin] + 1
return landmark_histogram_2d, observed_hazards_2d | 2.40625 | 2 |
src/act/arc/aCTCleaner.py | bryngemark/aCT | 0 | 12773552 | <filename>src/act/arc/aCTCleaner.py
# aCTCleaner.py
#
# Cleans jobs from CE and ARC DB
#
import arc
from act.common.aCTProcess import aCTProcess
class aCTCleaner(aCTProcess):
def processToClean(self):
jobstoclean = self.db.getArcJobs("arcstate='toclean' and cluster='"+self.cluster+"' limit 100")
if not jobstoclean:
return
self.log.info("Cleaning %d jobs" % sum(len(v) for v in jobstoclean.values()))
for proxyid, jobs in jobstoclean.items():
self.uc.CredentialString(str(self.db.getProxy(proxyid)))
job_supervisor = arc.JobSupervisor(self.uc, [j[2] for j in jobs])
job_supervisor.Update()
job_supervisor.Clean()
notcleaned = job_supervisor.GetIDsNotProcessed()
for (id, appjobid, job, created) in jobs:
if job.JobID in notcleaned:
self.log.error("%s: Could not clean job %s" % (appjobid, job.JobID))
self.db.deleteArcJob(id)
def process(self):
# clean jobs
self.processToClean()
if __name__ == '__main__':
st=aCTCleaner()
st.run()
st.finish()
| 2.296875 | 2 |
capstone/capdb/migrations/0114_auto_20210420_2105.py | rachelaus/capstone | 134 | 12773553 | <filename>capstone/capdb/migrations/0114_auto_20210420_2105.py<gh_stars>100-1000
# Generated by Django 3.2 on 2021-04-20 21:05
import capdb.storages
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('capdb', '0113_auto_20210414_1532'),
]
operations = [
migrations.AlterField(
model_name='caseanalysis',
name='value',
field=models.JSONField(),
),
migrations.AlterField(
model_name='casebodycache',
name='json',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='caseinitialmetadata',
name='metadata',
field=models.JSONField(),
),
migrations.AlterField(
model_name='casemetadata',
name='attorneys',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='docket_numbers',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='judges',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='no_index_elided',
field=models.JSONField(blank=True, help_text='Elided text will be shown on click. Example: {"Text to elide (must be exact match)": "Extra text that\'s currently not used. Can be left as empty string."}', null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='no_index_redacted',
field=models.JSONField(blank=True, help_text='Redacted text will be hidden from view and replaced with key\'s value specified above. Example: {"Text to redact (must be exact match)": "Text to replace redacted text."}', null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='opinions',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='casemetadata',
name='parties',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='casestructure',
name='opinions',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='alto_xml_changed',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='alto_xml_rollback',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='case_xml_changed',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='case_xml_rollback',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='volume_xml_changed',
field=models.JSONField(),
),
migrations.AlterField(
model_name='datamigration',
name='volume_xml_rollback',
field=models.JSONField(),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='attorneys',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='docket_numbers',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='judges',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='no_index_elided',
field=models.JSONField(blank=True, help_text='Elided text will be shown on click. Example: {"Text to elide (must be exact match)": "Extra text that\'s currently not used. Can be left as empty string."}', null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='no_index_redacted',
field=models.JSONField(blank=True, help_text='Redacted text will be hidden from view and replaced with key\'s value specified above. Example: {"Text to redact (must be exact match)": "Text to replace redacted text."}', null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='opinions',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalcasemetadata',
name='parties',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalpagestructure',
name='blocks',
field=models.JSONField(),
),
migrations.AlterField(
model_name='historicalpagestructure',
name='duplicates',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalpagestructure',
name='extra_redacted_ids',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalpagestructure',
name='font_names',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalpagestructure',
name='spaces',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalvolumemetadata',
name='bibliographic_review',
field=models.CharField(blank=True, choices=[('No', 'No'), ('Complete', 'Complete'), ('Yes', 'Yes')], max_length=8, null=True),
),
migrations.AlterField(
model_name='historicalvolumemetadata',
name='ingest_errors',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='historicalvolumemetadata',
name='task_statuses',
field=models.JSONField(default=dict, help_text='Date and results of tasks run for this volume'),
),
migrations.AlterField(
model_name='historicalvolumemetadata',
name='xml_metadata',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='pagestructure',
name='blocks',
field=models.JSONField(),
),
migrations.AlterField(
model_name='pagestructure',
name='duplicates',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='pagestructure',
name='extra_redacted_ids',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='pagestructure',
name='font_names',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='pagestructure',
name='spaces',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='volumemetadata',
name='bibliographic_review',
field=models.CharField(blank=True, choices=[('No', 'No'), ('Complete', 'Complete'), ('Yes', 'Yes')], max_length=8, null=True),
),
migrations.AlterField(
model_name='volumemetadata',
name='ingest_errors',
field=models.JSONField(blank=True, null=True),
),
migrations.AlterField(
model_name='volumemetadata',
name='pdf_file',
field=models.FileField(blank=True, help_text='Exported volume PDF', max_length=1000, storage=capdb.storages.DownloadOverlayStorage(base_url='http://case.test:8000/download/', location='/Users/jcushman/Documents/capstone/capstone/test_data/downloads'), upload_to=''),
),
migrations.AlterField(
model_name='volumemetadata',
name='task_statuses',
field=models.JSONField(default=dict, help_text='Date and results of tasks run for this volume'),
),
migrations.AlterField(
model_name='volumemetadata',
name='xml_metadata',
field=models.JSONField(blank=True, null=True),
),
]
| 2.015625 | 2 |
arcade-romhacking-framework/make/patch_apply.py | zengfr/arcade-romhacking-framework | 2 | 12773554 | <filename>arcade-romhacking-framework/make/patch_apply.py
#!/usr/bin/python
# -*- coding:utf-8 -*-
# (c) 2016 XingXing(<EMAIL>)
# (c) 2020 zengfr(362505707#qq.com)(https://github.com/zengfr)
import os
import re
import sys
def patch_file(crosstools,fileelf,filemap,fileori,fileout):
print 'patch_file processing',fileelf,filemap,fileori,fileout
map_list = []
for c in open(filemap,'r').readlines():
if c.startswith('.rom'):
c=c.strip(' ').strip('\n')
c=re.split('\s*',c)
rom_base = int(c[1],16)
if '__patch_end_' in c or '__patch_start_' in c:
c=c.strip(' ').strip('\n')
c=re.split('\s*',c)
map_list.append( (int(c[0],16)-rom_base,c[1]) )
print c
map_list.sort()
print map_list
if os.path.exists("rom.bin"):
os.remove("rom.bin")
if os.path.exists("ram.bin"):
os.remove("ram.bin")
print "objcopy objdump start"
os.system( "%sobjcopy.exe -Obinary -j.rom %s rom.bin" % (crosstools,fileelf))
os.system( "%sobjcopy.exe -Obinary -j.ram %s ram.bin" % (crosstools,fileelf))
os.system( "%sobjdump.exe -C -S %s > %s.patch.asm" % (crosstools,fileelf,fileelf))
dat_pat = open("rom.bin",'rb').read()+open("ram.bin",'rb').read()
dat_ori = open(fileori,'rb').read()
len_ori = len(dat_ori)
#dat_pat = open(filerom,'rb').read()+open(fileram,'rb').read()
for i in range(len(map_list)/2):
pat_start = map_list[2*i][0]
pat_end = map_list[2*i+1][0]
if(pat_end!=pat_start):
print "%x-%x %s" % (pat_start,pat_end,dat_pat[pat_start:pat_end].encode('hex'))
dat_ori = dat_ori[:pat_start]+dat_pat[pat_start:pat_end]+dat_ori[pat_end:]
dat_ori = dat_ori[:map_list[-1][0]]+dat_pat[map_list[-1][0]:]+dat_ori[len(dat_pat):]
print "%x %x %x" % (map_list[-1][0],map_list[-1][0],len(dat_pat))
len_now = len(dat_ori)
dat_ori += '\xFF'*(len_ori-len_now)
open(fileout,'wb').write(dat_ori)
print "Saved OK.",fileout
if __name__=='__main__':
if len(sys.argv)>5:
patch_file(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5])
else:
print "%s <CROSS-PREFIX> <ELF> <MAP> <ORI_BIN> <OUTPUT>" % sys.argv[0]
sys.exit(1) | 2.234375 | 2 |
faas/fastapi-lambda-docker/main.py | AtmaMani/pyChakras | 0 | 12773555 | # main app file
from typing import Optional
from fastapi import FastAPI, UploadFile, File, Request
from fastapi.responses import HTMLResponse
import json, os
from pydantic import BaseModel
from fastapi.templating import Jinja2Templates
from mangum import Mangum
# Create app, just like in Flask
app = FastAPI()
templates = Jinja2Templates(directory='templates')
@app.get('/hello')
def hello():
return {'Hello': 'World'}
@app.get('/items/{item_id}')
def read_item(item_id: int, q: Optional[str] = None):
return {'item_id': item_id,
'q': q}
@app.get('/genRandomNumbers/{num_random}')
async def gen_random_ints(num_random: int, upper_limit: Optional[int] = None):
from random import randint
if not upper_limit:
upper_limit = num_random + 20
unique_random_list = set()
if num_random > upper_limit:
num_random = upper_limit
while len(unique_random_list) < num_random:
unique_random_list.add(randint(0, upper_limit))
return json.dumps(list(unique_random_list))
# Pydantic types for complex JSON Post operations
class Item(BaseModel):
name: str
description: Optional[str] = "What an amazing item"
price: Optional[float] = 10.0
tax: Optional[float] = price/10
item_list = []
@app.post('/items')
async def create_item(item: Item):
item.tax = item.price / 10
item_list.append(item)
return item
@app.get('/items')
async def get_items():
return item_list
@app.get('/listFiles')
async def list_uploaded_files():
return os.listdir('./imgs')
# Upload files
@app.post('/uploadImg')
async def create_upload_img(file: UploadFile = File(...)):
if not os.path.exists('./imgs'):
os.mkdir('./imgs')
file_list = os.listdir('./imgs')
file_name = f'{len(file_list)+1}.jpg'
with open(f'./imgs/{file_name}', 'wb+') as fp:
fp.write(file.file.read())
return {'Status':'Uploaded',
'uploaded_files': await list_uploaded_files()}
@app.get('/', response_class=HTMLResponse)
async def index(request: Request):
return templates.TemplateResponse('index.html', {'request':request})
handler = Mangum(app) | 2.71875 | 3 |
SQL_best_practice_folder_scan.py | JanosLaci/scan_files_for_regex | 0 | 12773556 | import os
import pathlib
path_to_folder = pathlib.Path('.', 'folder_to_scan')
def scan_folder_select_star(folder_to_scan=path_to_folder):
with os.scandir(folder_to_scan) as dir_iterator:
for file in dir_iterator:
print(file.name)
def main():
scan_folder_select_star()
if __name__ == '__main__':
main()
list_dir = os.listdir(path_to_folder)
print(list_dir) | 3.40625 | 3 |
webcode/app/modules/submission_manager/views.py | joverbey/WebCode | 1 | 12773557 | # pylint: disable=no-name-in-module, f0401
from flask import request
from flask.ext.login import login_required, current_user
from app import app
from app.database import session
from app.util import serve_response, serve_error
from app.modules.event_manager.models import Event
from app.modules.project_manager.models import Project
from app.modules.submission_manager.models import Submission
from app.modules.submission_manager.runner import Runner
import time
import os
FILE_EXTENSIONS_FROM_TYPE = {
'cuda': '.cu',
'oacc': '.c'
}
def directory_for_submission(job):
return os.path.join(
app.config['DATA_FOLDER'], 'submits', str(job))
@app.route('/api/submissions', methods=['POST'])
@login_required
def create_submission():
try:
project = (session.query(Project).filter(
Project.project_id == int(request.form['project_id']) and
Project.username == current_user.username).first())
project.body = request.form['body']
submission = Submission(
username=current_user.username,
submit_time=int(time.time()),
type=project.type,
project_id=int(request.form['project_id']),
run=int(request.form['run'])
)
except KeyError:
return serve_error('Form data missing.')
submission.commit_to_session()
project.commit_to_session()
Event.log(current_user.username, 'execute', submission.job)
directory = directory_for_submission(submission.job)
os.mkdir(directory)
file_name = 'submit' + FILE_EXTENSIONS_FROM_TYPE[submission.type]
source_file = open(os.path.join(directory, file_name), 'w')
source_file.write(project.body)
source_file.close()
runner = Runner(submission, file_name)
runner.run_queued()
return serve_response({
'job': submission.job
})
@app.route('/api/submissions')
@login_required
def get_submissions():
submissions = session.query(Submission).filter(Submission.username == current_user.username).all()
subs = list()
for s in submissions:
subs.append(s.to_dict())
return serve_response({'submissions': subs})
@app.route('/api/submissions/<int:job>')
@login_required
def get_submission(job):
submission = session.query(Submission).filter(Submission.username == current_user.username and
Submission.job == job).first()
directory = directory_for_submission(job)
file_name = 'submit' + FILE_EXTENSIONS_FROM_TYPE[submission.type]
source_file = open(os.path.join(directory, file_name))
body = source_file.read()
return serve_response({
'body': body
})
| 2.046875 | 2 |
projects/forms.py | 18F/acquisitions.18f.gov | 3 | 12773558 | from django import forms
from django.contrib.auth.models import User
from django.contrib.postgres.forms import SimpleArrayField
from projects.models import Buy, Project, IAA, AgencyOffice
from projects.widgets import DurationMultiWidget
from form_utils.forms import BetterModelForm
from form_utils.widgets import AutoResizeTextarea
class ClientForm(forms.ModelForm):
class Meta:
model = AgencyOffice
fields = '__all__'
class IAAForm(forms.ModelForm):
class Meta:
model = IAA
fields = '__all__'
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = '__all__'
class CreateBuyForm(forms.ModelForm):
class Meta:
model = Buy
fields = [
'name',
'description',
'project',
'budget',
'procurement_method'
]
class EditBuyForm(forms.ModelForm):
requirements = SimpleArrayField(
forms.CharField(),
delimiter='\n',
help_text='Multiple requirements are allowed. Enter each one on its '
'own line. Additional formatting, like bullet points, will '
'be added later, so leave that out.',
required=False,
widget=forms.Textarea,
)
skills_needed = SimpleArrayField(
forms.CharField(),
delimiter='\n',
help_text='Multiple skills are allowed. Enter each one on its '
'own line. Additional formatting, like bullet points, will '
'be added later, so leave that out.',
required=False,
widget=forms.Textarea,
)
def __init__(self, *args, **kwargs):
super(EditBuyForm, self).__init__(*args, **kwargs)
buy = kwargs['instance']
team_members = User.objects.filter(project=buy.project.id)
self.fields['technical_evaluation_panel'].queryset = team_members
self.fields['product_owner'].queryset = team_members
self.fields['product_lead'].queryset = team_members
self.fields['acquisition_lead'].queryset = team_members
self.fields['technical_lead'].queryset = team_members
class Meta:
model = Buy
fields = '__all__'
widgets = {
# 'description': AutoResizeTextarea(),
'base_period_length': DurationMultiWidget(),
'option_period_length': DurationMultiWidget(),
# 'technical_evaluation_panel': forms.CheckboxSelectMultiple()
}
| 2.140625 | 2 |
demo/demo_reverse.py | VoraciousFour/VorDiff | 0 | 12773559 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 9 14:34:04 2019
@author: weiruchen
"""
from VorDiff.reverse_operator import ReverseOperator as rop
from VorDiff.reverse_autodiff import ReverseAutoDiff as rad
def create_reverse_vector(array):
x, y = rad.reverse_vector(array)
return x, y
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
# for scalar
f = 1 / (x[1]) + rop.sin(1/x[1])
print(rad.partial_scalar(f))
# for vector
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
a = x + 1
print(rad.partial_vector(a,x))
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
h = rop.sin(x)
print(rad.partial_vector(h,x))
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
g = rop.cos(y)**2
print(rad.partial_vector(g,y))
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
f = 2*x + y
print(rad.partial_vector(f,x))
x,y = create_reverse_vector([[1, 2, 3], [1,3,6]])
# for multiple functions
def F1(array):
x, y = create_reverse_vector(array)
return 3*x + rop.cos(y)**2 + 1, x, y
def F2(array):
x, y = create_reverse_vector(array)
return rop.sin(x) + 2*rop.sin(y), x, y
array = [[1, 2, 3], [1,3,6]]
vec_functions = [F1(array), F2(array)]
for func in vec_functions:
function, x, y = func
print("The values of the function is ", function._val)
print("The derivatives of the function with respect to values of variable x is", rad.partial_vector(func[0], x))
print("The derivatives of the function with respect to values of variable y is", rad.partial_vector(func[0], y))
| 3.578125 | 4 |
ga/ga.py | pfrag/vassign | 0 | 12773560 | <reponame>pfrag/vassign<filename>ga/ga.py
# -*- coding: utf-8 -*-
"""
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
#################################################################
A genetic algorithm for video quality assignment.
The relevant parameters can be tuned in the configuration file.
The scenario_file parameter must point to a valid JSON-formatted
file with all the scenario parameters.
#################################################################
"""
from random import *
from operator import itemgetter, attrgetter
import sys
import time
import string
import math
import json
import os
import copy
import logging
import random
from datetime import datetime
import msgpack
import ujson
import numpy
def randint(low, high):
""" A faster (?) randint
"""
# One way to do it is as follows:
#k = numpy.frombuffer(numpy.random.bytes(4), dtype=numpy.uint32)
#return numpy.mod(k, high - low + 1)[0] + low
# Another way: just call random() and scale appropriately
return int(low + (high+1)*random.random())
def _copy_users(users):
""" Creates a copy of the list given as argument (to avoid calling deepcopy in crossover())
"""
retval = []
for u in users:
data = {}
for key, val in u.items():
data[key] = val
retval.append(data)
return retval
class chromosome(object):
def __init__(self, genes, fitness = 0.0):
self.genes = genes
self.fitness = fitness
def __str__(self):
return str({"genes": self.genes, "fitness": self.fitness})
def __repr__(self):
return str({"genes": self.genes, "fitness": self.fitness})
@staticmethod
def create_empty(ngenes):
genes = []
for i in range(ngenes):
genes.append(gene(i))
return chromosome(genes)
@staticmethod
def copy(c):
genes = []
if c.genes:
for i in range(len(c.genes)):
genes.append(gene(c.genes[i].uid, c.genes[i].rid, c.genes[i].bitrate, c.genes[i].mos, c.genes[i].max_throughput, c.genes[i].prb_throughput))
return chromosome(genes, c.fitness)
class gene(object):
def __init__(self, uid, rid = None, bitrate = 0, mos = 0.0, max_throughput = 0.0, prb_throughput = 0.0):
self.uid = uid
self.rid = rid
self.bitrate = bitrate
self.mos = mos
self.max_throughput = max_throughput
self.prb_throughput = prb_throughput
def __str__(self):
return str({"user": self.uid, "rid": self.rid, "bitrate": self.bitrate, "mos": self.mos, "max_throughput": self.max_throughput})
def __repr__(self):
return str({"user": self.uid, "rid": self.rid, "bitrate": self.bitrate, "mos": self.mos, "max_throughput": self.max_throughput})
class GA(object):
"""Genetic Algorithm class.
"""
ERR_MALFORMED_INPUT = 400
ERR_NO_INPUT_PROVIDED = 410
ERR_NO_FEASIBLE_SOLUTION = 420
def __init__(self, configuration):
"""Constructor function.
"""
# seed random number generator (if this option is set in the config file)
if "seed" in configuration:
self.seed = configuration["seed"]
else:
self.seed = int(datetime.now().timestamp()*1000)
random.seed(self.seed)
# Load GA-specific params (S, G, Rc, Rm)
# Load VNFFG, host graph, constraints, etc.
self.solution_pool_size = configuration['solution_pool_size']
self.generations = configuration['generations']
self.crossover_rate = configuration['crossover_rate']
self.mutation_rate = configuration['mutation_rate']
self.convergence_check = configuration['convergence_check']
self.error = None
self.error_string = None
if self.convergence_check:
self.delta = configuration['delta']
self.stop_after = configuration['stop_after']
if "generations_file" in configuration:
self.generations_file = configuration['generations_file']
else:
self.generations_file = None
loglevel = getattr(logging, configuration["loglevel"].upper(), None)
logging.basicConfig(level=loglevel)
if 'scenario' in configuration:
self.scenario = configuration["scenario"]
if "scenario_file" not in configuration:
self.scenario_file = None
elif 'scenario_file' in configuration:
self.scenario_file = configuration["scenario_file"]
try:
with open(configuration['scenario_file']) as fp:
self.scenario = json.load(fp)
except:
logging.error("Error loading scenario file. Possibly malformed...")
self.error_string = "Error loading scenario file. Possibly malformed."
self.error = GA.ERR_MALFORMED_INPUT
else:
logging.error("Scenario not provided.")
self.error_string = "No input provided."
self.error = GA.ERR_NO_INPUT_PROVIDED
if configuration["loglevel"].upper() != "NONE":
self.print_configuration()
def print_configuration(self):
"""Print the configuration
"""
print("Configuration:\n-------------------------------" +
"\n- Solution pool size:\t\t" + str(self.solution_pool_size) +
"\n- Number of generations:\t" + str(self.generations) +
"\n- Crossover rate:\t\t" + str(self.crossover_rate) +
"\n- Mutation rate:\t\t" + str(self.mutation_rate) +
"\n- Scenario file:\t\t" + str(self.scenario_file) +
"\n- Log data per generation:\t" + str(self.generations_file) +
"\n- Enable convergence check:\t" + str(self.convergence_check))
if self.convergence_check:
print("\t+ Delta:\t\t" + str(self.delta) +
"\n\t+ Stop after:\t\t" + str(self.stop_after) +
"\n- RNG seed:\t\t\t" + str(self.seed) +
"\n-------------------------------\n")
def fitness(self, solution):
"""Calculate the fitness of chromosome C.
The fitness of a solution is the sum of the MOS values of all users.
"""
retval = 0
for g in solution.genes:
if g.mos:
retval += g.mos
return retval
def get_used_capacity(self, s):
"""Get the used capacity in terms of prbs
"""
used_capacity = 0.0
for g in s.genes:
if g.bitrate:
used_capacity += g.bitrate/g.prb_throughput
return used_capacity
def check_capacity_constraint(self, solution):
"""Chech whether the capacity constraint of a solution is respected (i.e., the number of needed prbs are less than the maximum available).
"""
if self.get_used_capacity(solution) > self.scenario["nprb"]:
return False
return True
def init_solution_pool(self):
"""Initialize solution pool.
Generate S feasible assignments as follows:
- Pick a random user
- Assign a random feasible representation
- Loop until the global capacity is exceeded or all users have a representation assigned
"""
self.solution_pool = []
solutions_to_generate = self.solution_pool_size
sorted_reps = sorted(self.scenario["representations"], key=itemgetter('bitrate'))
while solutions_to_generate > 0:
solution = chromosome.create_empty(len(self.scenario["users"]))
fitness = 0
# Create a vector of user indexes to keep track which users are remaining to be assigned a representation
user_indexes = range(0, len(self.scenario["users"]))
available_capacity = self.scenario["nprb"]
while len(user_indexes) > 0:
# while there are still users without a video,
# draw a random user
ptr = randint(0, len(user_indexes) - 1)
uindex = user_indexes[ptr]
if self.scenario["users"][uindex]["cqi"] == 0:
# user disconnected -> ignore
user_indexes.remove(uindex)
continue
# Check if there's still capacity
minbitrate = sorted_reps[0]["bitrate"]
# prbs the user needs for the minimum bitrate representation
minprb = minbitrate/self.scenario["users"][uindex]["prb_throughput"]
if available_capacity < minprb:
# ok, no more available capacity even for the lowest bitrate representation
# so we're done
break
# find which representations can be supported by the user (and the network)
admissible_reps = copy.copy(sorted_reps)
admissible_reps[:] = [r for r in admissible_reps if r["bitrate"] <= self.scenario["users"][uindex]["max_throughput"] and r["bitrate"]/self.scenario["users"][uindex]["prb_throughput"] <= available_capacity]
# if user has the capacity to host at least the minimal bitrate representation
if len(admissible_reps) > 0:
#if rexists:
# pick a random (admissible) representation
r = choice(admissible_reps)
#R = randint(0, rindex - 1)
#r = sorted_reps[R]
solution.genes[uindex].rid = r["id"]
solution.genes[uindex].bitrate = r["bitrate"]
solution.genes[uindex].mos = r["mos"]
solution.genes[uindex].prb_throughput = self.scenario["users"][uindex]["prb_throughput"]
solution.genes[uindex].max_throughput = self.scenario["users"][uindex]["max_throughput"]
available_capacity -= r["bitrate"]/self.scenario["users"][uindex]["prb_throughput"]
solution.fitness += r["mos"]
user_indexes.remove(uindex) # done with this user (either gets the above representation or nothing)
self.solution_pool.append(solution)
solutions_to_generate -= 1
def init_solution_pool_v2(self):
"""Initialize solution pool.
Generate S feasible assignments as follows:
- First create a number of solutions where all users take the same representation
- Do a number of crossover operations to create the rest of the solution pool
"""
self.solution_pool = []
solutions_to_generate = self.solution_pool_size
#sort representations by bitrate
sorted_reps = sorted(self.scenario["representations"], key=itemgetter('bitrate'))
#sort users by channel quality
sorted_users = sorted(self.scenario["users"], key=itemgetter('cqi'), reverse = True)
for r in sorted_reps:
solution = chromosome.create_empty(len(self.scenario["users"]))
available_capacity = self.scenario["nprb"]
for u in sorted_users:
uindex = int(u["id"])
if u["cqi"] > 0 and available_capacity >= r["bitrate"]/u["prb_throughput"] and u["max_throughput"] >= r["bitrate"]:
# assign r to user and move to next one
solution.genes[uindex].rid = r["id"]
solution.genes[uindex].bitrate = r["bitrate"]
solution.genes[uindex].mos = r["mos"]
solution.genes[uindex].prb_throughput = u["prb_throughput"]
solution.genes[uindex].max_throughput = u["max_throughput"]
available_capacity -= r["bitrate"]/u["prb_throughput"]
solution.fitness += r["mos"]
else:
solution.genes[uindex].rid = None
solution.genes[uindex].bitrate = 0
solution.genes[uindex].mos = 0.0
solution.genes[uindex].prb_throughput = 0
solution.genes[uindex].max_throughput = 0
self.solution_pool.append(solution)
solutions_to_generate -= 1
# now that we have some initial solutions, create the rest based on them via crossover
while solutions_to_generate > 0:
offspring = self.crossover_multi()
for o in offspring:
solutions_to_generate -= 1
self.solution_pool.append(o)
def crossover(self):
"""Crossover operation.
- Select two random chromosomes (solutions) from the pool
- Draw a random split point
- Create two new chromosomes joining one part from the one and one from the other parent
- Return the one with the better quality or None if none is ok with the
network capacity constraint
"""
# Pick two random chromosomes (C1 and C2 could coincide)
C1 = choice(self.solution_pool)
C2 = choice(self.solution_pool)
# select split point
split_point = randint(0, len(self.scenario["users"]) - 1)
# each offspring is the crossed-over array of the users of the parents
offspring1 = chromosome(C1.genes[:split_point] + C2.genes[split_point:])
offspring2 = chromosome(C2.genes[:split_point] + C1.genes[split_point:])
used_capacity1 = 0
fitness1 = 0
ok1 = False
used_capacity2 = 0
fitness2 = 0
ok2 = False
# check capacity constraints and calculate fitness in one pass
for i in range(0, len(offspring1.genes)):
if offspring1.genes[i].rid and offspring1.genes[i].prb_throughput > 0:
used_capacity1 += offspring1.genes[i].bitrate/offspring1.genes[i].prb_throughput
fitness1 += offspring1.genes[i].mos
if offspring2.genes[i].rid and offspring2.genes[i].prb_throughput > 0:
used_capacity2 += offspring2.genes[i].bitrate/offspring2.genes[i].prb_throughput
fitness2 += offspring2.genes[i].mos
if used_capacity1 <= self.scenario["nprb"]:
ok1 = True
if used_capacity2 <= self.scenario["nprb"]:
ok2 = True
offspring1.fitness = fitness1
offspring2.fitness = fitness2
if not ok1 and not ok2:
return None
elif ok1 and not ok2:
return offspring1
elif ok2 and not ok1:
return offspring2
else:
if fitness1 > fitness2:
return offspring1
else:
return offspring2
def crossover_multi(self):
"""Crossover operation.
- Select two random chromosomes (solutions) from the pool
- Draw a random split point
- Create two new chromosomes joining one part from the one and one from the other parent
- Return up to two chromosomes (depending on whether they're ok with the capacity constraint)
"""
offsprings = []
# Pick two random chromosomes (C1 and C2 could coincide)
C1 = choice(self.solution_pool)
C2 = choice(self.solution_pool)
# select split point
split_point = randint(0, len(self.scenario["users"]) - 1)
# each offspring is the crossed-over array of the users of the parents
offspring1 = chromosome(C1.genes[:split_point] + C2.genes[split_point:])
offspring2 = chromosome(C2.genes[:split_point] + C1.genes[split_point:])
used_capacity1 = 0
fitness1 = 0
ok1 = False
used_capacity2 = 0
fitness2 = 0
ok2 = False
# check capacity constraints and calculate fitness in one pass
for i in range(0, len(offspring1.genes)):
if offspring1.genes[i].rid and offspring1.genes[i].prb_throughput > 0:
used_capacity1 += offspring1.genes[i].bitrate/offspring1.genes[i].prb_throughput
fitness1 += offspring1.genes[i].mos
if offspring2.genes[i].rid and offspring2.genes[i].prb_throughput > 0:
used_capacity2 += offspring2.genes[i].bitrate/offspring2.genes[i].prb_throughput
fitness2 += offspring2.genes[i].mos
if used_capacity1 <= self.scenario["nprb"]:
ok1 = True
if used_capacity2 <= self.scenario["nprb"]:
ok2 = True
offspring1.fitness = fitness1
offspring2.fitness = fitness2
if ok1:
offsprings.append(offspring1)
if ok2:
offsprings.append(offspring2)
return offsprings
def mutation(self):
"""Mutation operator.
For each chromosome in the solution pool, decide according to the mutation
rate if we'll modify it or not. If it's selected for mutation, we select a random
user and change its video representation assignment.
"""
for s in self.solution_pool:
if uniform(0, 1) <= self.mutation_rate:
logging.debug("Mutating solution: " + str(s))
#u = choice(s["users"])
u = choice(s.genes)
# find which representations can be supported by the user
admissible_reps = copy.copy(sorted(self.scenario["representations"], key=itemgetter('bitrate')))
admissible_reps[:] = [r for r in admissible_reps if r["bitrate"] <= u.max_throughput]
# if the user cannot admit any video, do nothing
if not admissible_reps:
continue
# Find a feasible *different* representation
# pick a random representation from the ones admissible by the user
# TODO: looks ugly, refactor
r = randint(0, len(admissible_reps))
# assumption/hack: the "virtual" representation #len corresponds to no video
if r == len(admissible_reps):
if u.rid is None:
pass
else:
# otherwise, remove the assigned representation (aka switch user video off)
s.fitness -= u.mos # update fitness value in place, to save time
u.rid = None
u.bitrate = None
u.mos = 0.0
else:
# drew an existing representation. if it's different than the one the user already has,
# assign it. Otherwise, keep loopin.
if u.rid is not None and u.rid == admissible_reps[r]["id"]:
pass
else:
# Done with this guy. Move to next chromosome/solution
# check if the new representation violates the constraint and if so undo
if u.rid is not None:
s.fitness -= u.mos
old_rid = u.rid
old_b = u.bitrate
old_m = u.mos
else:
old_rid = -1
u.rid = admissible_reps[r]["id"]
u.bitrate = admissible_reps[r]["bitrate"]
u.mos = admissible_reps[r]["mos"]
s.fitness += u.mos
if not self.check_capacity_constraint(s):
# undo if the mutant is not a feasible solution
if old_rid == -1:
s.fitness -= u.mos
u.rid = None
u.bitrate = None
u.mos = 0.0
else:
s.fitness = s.fitness - u.mos + old_m
u.rid = old_rid
u.bitrate = old_b
u.mos = old_m
s.fitness = self.fitness(s)
def generation(self):
"""A GA generation to produce a new solution pool.
This executes crossover and mutation operations to produce
offspring and keeps the top-ranking solutions according to the
fitness function.
It also returns the fitness function of the best solution
"""
# select number of offspring
nOffspring = int(self.crossover_rate * self.solution_pool_size)
# for each offspring
while nOffspring > 0:
# generate a new chromosome from crossover
offspring = self.crossover_multi()
for o in offspring:
self.solution_pool.append(o)
nOffspring -= 2
# mutation
self.mutation()
# select top-S chromosomes according to their fitness value
# and create new solution pool
#for s in self.solution_pool:
## update fitness values of all candidate solutions
#s["fitness"] = self.fitness(s)
# keep the top-S solutions
self.solution_pool = sorted(self.solution_pool, key=attrgetter('fitness'), reverse = True)[0:self.solution_pool_size]
return self.solution_pool[0].fitness
def execute(self):
"""Run the genetic algorithm.
Returns the scenario JSON object with placement decisions plus some
algorithm-specific information.
"""
# check if we will store data per generation in a file
log_generation_data = False
if self.generations_file is not None:
try:
gen_fp = open(self.generations_file, "w+")
log_generation_data = True
gen_fp.write("# Scenario: " + str(self.scenario_file) + "\n")
gen_fp.write("# Seed: " + str(self.seed) + "\n")
gen_fp.write("#-----------------------------------------------\n")
gen_fp.write("# Generation\tFitness\t\tTimestamp\n")
except:
logging.warn("Error opening/writing at " + self.generations_file)
pass
prev_obj_value = -1
if self.convergence_check:
remaining_generations = self.stop_after
# Keep track of the best solution we've seen, in case we arrive at a worse solution when the algorithm terminates
current_best = None
start_time = datetime.now()
# Create initial solution pool
self.init_solution_pool_v2()
for i in range(0, self.generations):
obj_value = self.generation()
# get a timestamp for this generation
dt = datetime.now() - start_time
# convert to seconds. dt.days should really not matter...
time_taken = dt.days*24*3600 + dt.seconds + dt.microseconds/1000000.0
logging.info("Generation/fitness/timestamp: " + str(i) + "\t" + str(obj_value) + "\t" + str(time_taken))
if log_generation_data:
gen_fp.write(str(i) + "\t\t" + str(obj_value) + "\t" + str(time_taken) + "\n")
# maintain best solution we've seen thus far
if current_best is None or current_best.fitness <= self.solution_pool[0].fitness:
current_best = chromosome.copy(self.solution_pool[0]) #copy.deepcopy(self.solution_pool[0])
# if we're checking for convergence to finish execution faster
# we have to do some checks
if self.convergence_check:
if abs(obj_value - prev_obj_value) < self.delta:
# the solution fitness did not significantly change
remaining_generations -= 1
else:
remaining_generations = self.stop_after
# the algorithm converged
if remaining_generations < 0:
break
prev_obj_value = obj_value
#final_solution = self.solution_pool[0]
final_solution = current_best
# convert from chromosome to json
retval = self.from_chromosome(final_solution)
# add extra information about solution performance (cost, availability, latency, time taken, # generations)
# and indications about constraint violations
info = {}
info["generations"] = i + 1
info["execution_time"] = time_taken
info["used_capacity"] = self.get_used_capacity(final_solution)
# some final checks
retval["solution_performance"] = info
return retval
def from_chromosome(self, s):
output = copy.deepcopy(self.scenario)
for i in range(len(output["users"])):
if s.genes[i] is not None and s.genes[i].rid is not None:
output["users"][i]["rid"] = s.genes[i].rid
output["users"][i]["mos"] = s.genes[i].mos
output["users"][i]["bitrate"] = s.genes[i].bitrate
output["fitness"] = s.fitness
return output
| 2.125 | 2 |
tests/integration-tests/test_data.py | Krocodial/classy | 0 | 12773561 | <filename>tests/integration-tests/test_data.py<gh_stars>0
from selenium import webdriver
from django.test import TestCase
from selenium.webdriver.firefox.options import Options
from django.urls import reverse
import os, time
from selenium.webdriver.common.keys import Keys
import requests
from requests.auth import HTTPBasicAuth
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.select import Select
class DataTestCase(TestCase):
def setUp(self):
self.classifications = ['UNCLASSIFIED', 'PUBLIC', 'CONFIDENTIAL', 'PROTECTED A', 'PROTECTED B', 'PROTECTED C']
options = Options()
options.headless = True
selenium = webdriver.Firefox(options=options)
domain = os.getenv('REDIRECT_URI')
#Log the dev-tester account in and establish the session
selenium.get(domain + reverse('classy:index'))
username = selenium.find_element_by_name('username')
password = selenium.find_element_by_name('password')
username.send_keys(os.getenv('TEST_ACCOUNT_USERNAME'))
password.send_keys(os.getenv('TEST_ACCOUNT_PASSWORD'))
selenium.find_element_by_name('login').click()
self.selenium = selenium
self.domain = domain
def tearDown(self):
self.selenium.quit()
def test_login(self):
selenium = self.selenium
domain = self.domain
selenium.get(domain + reverse('classy:index'))
time.sleep(5)
assert 'Welcome dev-tester' in selenium.page_source
def test_no_data_permissions(self):
browser = self.selenium
domain = self.domain
browser.get(domain + reverse('classy:data'))
browser.find_element_by_link_text('Advanced Search').click()
datasource = browser.find_element_by_name('data_source')
schema = browser.find_element_by_name('schema')
table = browser.find_element_by_name('table')
column = browser.find_element_by_name('column')
datasource.send_keys('dbq01')
schema.send_keys('DBSNMP')
schema.submit()
time.sleep(5)
assert '<td>DBSNMP</td>' not in browser.page_source
def test_data_permissions(self):
browser = self.selenium
domain = self.domain
browser.get(domain + reverse('classy:data'))
browser.find_element_by_link_text('Advanced Search').click()
datasource = browser.find_element_by_name('data_source')
schema = browser.find_element_by_name('schema')
table = browser.find_element_by_name('table')
column = browser.find_element_by_name('column')
datasource.send_keys('dbq01')
schema.send_keys('backup')
table.send_keys('<KEY>')
column.send_keys('data')
column.submit()
time.sleep(5)
assert '<td>DATA</td>' in browser.page_source
def test_data_modification(self):
browser = self.selenium
domain = self.domain
for clas in self.classifications:
browser.get(domain + reverse('classy:data'))
browser.find_element_by_link_text('Advanced Search').click()
ds = browser.find_element_by_name('data_source')
sc = browser.find_element_by_name('schema')
ta = browser.find_element_by_name('table')
co = browser.find_element_by_name('column')
ds.send_keys('maltest4.database')
sc.send_keys('DBSNMP')
ta.send_keys('CNLDATA')
co.send_keys('TAGS')
co.submit()
time.sleep(3)
browser.find_element_by_xpath("//table[@id='data-table']/tbody/tr[1]/td[1]/button").click()
time.sleep(1)
select = Select(browser.find_element_by_xpath("//select[@id='1']"))
select.select_by_visible_text(clas)
browser.find_element_by_id('subby').click()
browser.find_element_by_id('finSubby').click()
time.sleep(1)
assert 'Success!</strong> Changes submitted' in browser.page_source
browser.get(domain + reverse('classy:data'))
browser.find_element_by_link_text('Advanced Search').click()
ds = browser.find_element_by_name('data_source')
sc = browser.find_element_by_name('schema')
ta = browser.find_element_by_name('table')
co = browser.find_element_by_name('column')
ds.send_keys('maltest4.database')
sc.send_keys('DBSNMP')
ta.send_keys('CNLDATA')
co.send_keys('TAGS')
co.submit()
time.sleep(3)
browser.find_element_by_xpath("//table[@id='data-table']/tbody/tr[1]/td[1]/button").click()
time.sleep(1)
select = Select(browser.find_element_by_xpath("//select[@id='1']"))
assert select.first_selected_option.text == clas
'''
def test_mass_data_modification(self):
browser = self.selenium
domain = self.domain
for clas in self.classifications:
browser.get(domain + reverse('classy:data'))
q = browser.find_element_by_id('id_query')
q.send_keys('')
q.submit()
time.sleep(2)
row1 = browser.find_element_by_xpath("//table[@id='data-table']/tbody/tr[1]")
row2 = browser.find_element_by_id('2')
row3 = browser.find_element_by_id('3')
ActionChains(browser).key_down(Keys.CONTROL).click(row1).click(row2).click(row3).key_up(Keys.CONTROL).perform()
browser.find_element_by_id('edito').click()
select = Select(browser.find_element_by_xpath("//select[@id='newC']"))
select.select_by_visible_text(clas)
browser.find_element_by_id('changeC').click()
time.sleep(1)
browser.find_element_by_id('subby').click()
browser.find_element_by_id('finSubby').click()
assert 'Success!</strong> Changes submitted' in browser.page_source
browser.get(domain + reverse('classy:data'))
q = browser.find_element_by_id('id_query')
q.send_keys('')
q.submit()
row1 = browser.find_element_by_xpath("//tr[@id='1']/td[6]").text
row2 = browser.find_element_by_xpath("//tr[@id='2']/td[6]").text
row3 = browser.find_element_by_xpath("//tr[@id='3']/td[6]").text
assert row1 == row2 == row3 == clas
#ActionChains(browser).key_down(Keys.CONTROL).click(row).key_up(Keys.CONTROL).perform()
'''
| 2.28125 | 2 |
commercia/offers/collections.py | commoncode/economica | 2 | 12773562 | <reponame>commoncode/economica<filename>commercia/offers/collections.py
from cqrs.mongo import mongodb
from cqrs.collections import DRFDocumentCollection
from .models import Collection, Offer, OfferResourceContract
from .serializers import (
CollectionSerializer, OfferResourceContractSerializer, OfferSerializer
)
class CollectionDocumentCollection(DRFDocumentCollection):
model = Collection
serializer_class = CollectionSerializer
name = 'economica__collections'
class OfferResourceContractDocumentCollection(DRFDocumentCollection):
model = OfferResourceContract
serializer_class = OfferResourceContractSerializer
name = 'economica__resource_contracts'
class OfferDocumentCollection(DRFDocumentCollection):
model = Offer
serializer_class = OfferSerializer
name = 'economica__offers'
mongodb.register(CollectionDocumentCollection())
mongodb.register(OfferResourceContractDocumentCollection())
mongodb.register(OfferDocumentCollection())
| 1.890625 | 2 |
claircli/http_handler.py | jfabdo/claircli | 16 | 12773563 | <filename>claircli/http_handler.py
# -*- coding: utf-8 -*-
import logging
import os
from os.path import join, relpath
from six.moves.SimpleHTTPServer import SimpleHTTPRequestHandler
from functools import partial
from threading import Thread
from six.moves.BaseHTTPServer import HTTPServer
logger = logging.getLogger(__name__)
class PathHTTPHandler(SimpleHTTPRequestHandler):
def __init__(self, http_request, client_address,
server, serve_path=os.getcwd()):
self.serve_path = serve_path
SimpleHTTPRequestHandler.__init__(
self, http_request, client_address, server)
def translate_path(self, path):
path = SimpleHTTPRequestHandler.translate_path(self, path)
relpath_ = relpath(path, os.getcwd())
abspath_ = join(self.serve_path, relpath_)
return abspath_
def log_message(self, format, *args):
logger.debug('%s - - [%s] %s\n',
self.client_address[0],
self.log_date_time_string(),
format % args)
def start_http_server(port, path):
logger.info('Starting local http server')
handler = partial(PathHTTPHandler, serve_path=path)
thread = Thread(target=HTTPServer(('', port), handler).serve_forever)
thread.daemon = True
thread.start()
logger.info('Local http server serving at port: %s', port)
| 2.328125 | 2 |
Contrib-Inspur/openbmc/poky/meta-selftest/lib/oeqa/runtime/cases/virgl.py | opencomputeproject/Rack-Manager | 5 | 12773564 | <filename>Contrib-Inspur/openbmc/poky/meta-selftest/lib/oeqa/runtime/cases/virgl.py
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
import subprocess
import oe.lsb
class VirglTest(OERuntimeTestCase):
@OETestDepends(['ssh.SSHTest.test_ssh'])
def test_kernel_driver(self):
status, output = self.target.run('dmesg|grep virgl')
self.assertEqual(status, 0, "Checking for virgl driver in dmesg returned non-zero: %d\n%s" % (status, output))
self.assertIn("virgl 3d acceleration enabled", output, "virgl acceleration seems to be disabled:\n%s" %(output))
@OETestDepends(['virgl.VirglTest.test_kernel_driver'])
def test_kmscube(self):
distro = oe.lsb.distro_identifier()
if distro and distro == 'centos-7':
self.skipTest('kmscube is not working when centos 7 is the host OS')
status, output = self.target.run('kmscube', timeout=30)
self.assertEqual(status, 0, "kmscube exited with non-zero status %d and output:\n%s" %(status, output))
self.assertIn('renderer: "virgl"', output, "kmscube does not seem to use virgl:\n%s" %(output))
| 2.140625 | 2 |
hanibal/crm_gestion_faces/wizard/reporte_log_crm_listado_clientes.py | Christian-Castro/castro_odoo8 | 0 | 12773565 | <filename>hanibal/crm_gestion_faces/wizard/reporte_log_crm_listado_clientes.py<gh_stars>0
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class log_reporte(osv.osv):
_name = "crm.listado.clientes"
_description = "Listado de Clientes"
_auto = False
_rec_name = 'cliente'
_columns = {
'id': fields.float('Id', readonly=True),
'cliente': fields.char('Cliente', readonly=True),
'comercial': fields.char('Comercial', readonly=True),
'ciudad': fields.char('Direccion', readonly=True),
'direccion': fields.char('Direccion', readonly=True),
'ruc': fields.char('Ruc', readonly=True),
'telefono': fields.char('Telefono', readonly=True),
'celular': fields.char('Mobil', readonly=True),
'correo': fields.char('Correo', readonly=True),
}
_order = 'cliente'
def _tabla(self):
group_by_strxx1 = """
select
cli.id id,
cli.name cliente,
ven.name comercial,
cli.city ciudad,
cli.street direccion,
cli.vat ruc,
cli.phone telefono,
cli.mobile celular,
cli.email correo
from res_partner cli
left join res_users as usr on (cli.user_id = usr.id )
left join res_partner as ven on ( usr.partner_id = ven.id)
where usr.partner_id = ven.id and
cli.is_company = 't'
order by 1,2,3
"""
return group_by_strxx1
def init(self, cr):
# self._table = sale_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
)""" % (self._table, self._tabla()))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| 1.710938 | 2 |
tests/detail/serialization/test_nodes_impl.py | intdata-bsc/idact | 5 | 12773566 | import pytest
from idact.core.auth import AuthMethod
from idact.detail.allocation.allocation_parameters import AllocationParameters
from idact.detail.config.client.client_cluster_config import ClusterConfigImpl
from idact.detail.nodes.node_impl import NodeImpl
from idact.detail.nodes.nodes_impl import NodesImpl
from idact.detail.slurm.slurm_allocation import SlurmAllocation
def get_data_for_test():
config = ClusterConfigImpl(host='localhost1',
port=1,
user='user-1',
auth=AuthMethod.ASK)
access_node = NodeImpl(config=config)
return config, access_node
def test_serialize_deserialize():
config, access_node = get_data_for_test()
nodes = [NodeImpl(config=config),
NodeImpl(config=config)]
uuid = '1111'
value = NodesImpl(nodes=nodes,
allocation=SlurmAllocation(
job_id=1,
access_node=access_node,
nodes=nodes,
entry_point_script_path='a',
parameters=AllocationParameters()),
uuid=uuid)
serialized = value.serialize()
assert serialized == {
'type': 'SerializableTypes.NODES_IMPL',
'nodes': [{'type': 'SerializableTypes.NODE_IMPL',
'host': None,
'port': None,
'cores': None,
'memory': None,
'allocated_until': None},
{'type': 'SerializableTypes.NODE_IMPL',
'host': None,
'port': None,
'cores': None,
'memory': None,
'allocated_until': None}],
'allocation': {
'type': 'SerializableTypes.SLURM_ALLOCATION',
'job_id': 1,
'entry_point_script_path': 'a',
'parameters': {'type': 'SerializableTypes.ALLOCATION_PARAMETERS',
'nodes': None,
'cores': None,
'memory_per_node': None,
'walltime': None,
'native_args': {}},
'done_waiting': False}}
deserialized = NodesImpl.deserialize(config=config,
access_node=access_node,
serialized=serialized,
uuid=uuid)
assert deserialized == value
def test_invalid_serialized_type():
config, access_node = get_data_for_test()
serialized = {'type': 'SerializableTypes.NODES_IMPL2'}
with pytest.raises(AssertionError):
NodesImpl.deserialize(config=config,
access_node=access_node,
serialized=serialized,
uuid='1111')
def test_missing_serialized_keys():
config, access_node = get_data_for_test()
serialized = {'type': 'SerializableTypes.NODES_IMPL'}
with pytest.raises(RuntimeError):
NodesImpl.deserialize(config=config,
access_node=access_node,
uuid='1111',
serialized=serialized)
| 1.90625 | 2 |
venv/lib/python2.7/site-packages/lettuce/django/tests/functional/djangoapp.py | GrupoMazoGuay/final | 0 | 12773567 | """
A minimal Django app, just one file.
See: http://olifante.blogs.com/covil/2010/04/minimal-django.html
"""
import os
from django.conf.urls.defaults import patterns
from django.core.mail import send_mail
from django.http import HttpResponse
filepath, extension = os.path.splitext(__file__)
ROOT_URLCONF = os.path.basename(filepath)
INSTALLED_APPS = (
"lettuce.django"
)
def mail(request):
send_mail('Subject here', 'Here is the message.', '<EMAIL>',
['<EMAIL>'], fail_silently=False)
return HttpResponse('Mail has been sent')
urlpatterns = patterns('', (r'^mail/$', mail))
| 2.40625 | 2 |
tools/demo_text.py | VDIGPKU/SReN_MM | 1 | 12773568 | <filename>tools/demo_text.py
# --------------------------------------------------------
# Pytorch Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, based on code from <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import time
import cv2
import torch
from torch.autograd import Variable
from scipy.misc import imread
from lib.model.utils.config import cfg, cfg_from_file, cfg_from_list
from lib.model.rpn.bbox_transform import clip_boxes
from lib.model.nms.nms_wrapper import nms
from lib.model.rpn.bbox_transform import bbox_transform_inv_reg
from lib.model.utils.net_utils import vis_detections
from lib.model.utils.blob import im_list_to_blob
from lib.model.faster_rcnn.vgg16 import vgg16
from lib.model.faster_rcnn.resnet import resnet
from lib.datasets.factory import adjust_args
import pdb
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='icdar_mlt', type=str)
parser.add_argument('--image_dir', dest='image_dir',
help='directory to load images for demo',
default="images")
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--model', dest='model',
help='model to test', type=str)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
args = parser.parse_args()
return args
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
if __name__ == '__main__':
args = parse_args()
adjust_args(args)
args.cuda = True
print('Called with args:')
print(args)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
np.random.seed(cfg.RNG_SEED)
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
load_name = args.model
if not os.path.exists(load_name):
raise Exception('There is no input file for loading network from ' + load_name)
text_classes = np.asarray(['__background__',
'frame'])
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(text_classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(text_classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(text_classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(text_classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# pdb.set_trace()
print("load checkpoint %s" % (load_name))
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data, volatile=True)
im_info = Variable(im_info, volatile=True)
num_boxes = Variable(num_boxes, volatile=True)
gt_boxes = Variable(gt_boxes, volatile=True)
if args.cuda > 0:
cfg.CUDA = True
if args.cuda > 0:
fasterRCNN.cuda()
fasterRCNN.eval()
start = time.time()
max_per_image = 100
thresh = 0.05
vis = True
imglist = os.listdir(args.image_dir)
num_images = len(imglist)
print('Loaded Image: {} images.'.format(num_images))
for i in range(num_images):
# Load the demo image
im_file = os.path.join(args.image_dir, imglist[i])
# im = cv2.imread(im_file)
im_in = np.array(imread(im_file))
if len(im_in.shape) == 2:
im_in = im_in[:, :, np.newaxis]
im_in = np.concatenate((im_in, im_in, im_in), axis=2)
# rgb -> bgr
im = im_in[:, :, ::-1]
blobs, im_scales = _get_image_blob(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs
im_info_np = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
im_data_pt = torch.from_numpy(im_blob)
im_data_pt = im_data_pt.permute(0, 3, 1, 2)
im_info_pt = torch.from_numpy(im_info_np)
im_data.data.resize_(im_data_pt.size()).copy_(im_data_pt)
im_info.data.resize_(im_info_pt.size()).copy_(im_info_pt)
gt_boxes.data.resize_(1, 1, 13).zero_()
num_boxes.data.resize_(1).zero_()
# pdb.set_trace()
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, RCNN_loss_edge, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS_REG).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS_REG).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 8) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS_REG).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS_REG).cuda()
box_deltas = box_deltas.view(1, -1, 8 * len(text_classes))
pred_boxes = bbox_transform_inv_reg(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= im_scales[0]
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
if vis:
im2show = np.copy(im)
for j in xrange(1, len(text_classes)):
inds = torch.nonzero(scores[:, j] > thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[:, j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_quads = pred_boxes[inds, :]
else:
cls_quads = pred_boxes[inds][:, j * 8:(j + 1) * 8]
x1, _ = torch.min(cls_quads[:, 0::2], dim=1)
x2, _ = torch.max(cls_quads[:, 0::2], dim=1)
y1, _ = torch.min(cls_quads[:, 1::2], dim=1)
y2, _ = torch.max(cls_quads[:, 1::2], dim=1)
cls_boxes = torch.cat((x1.view(-1, 1), y1.view(-1, 1), x2.view(-1, 1), y2.view(-1, 1)), dim=1)
cls_scores = cls_scores.view(-1, 1)
cls_dets = torch.cat((cls_quads, cls_scores), 1)
cls_dets = cls_dets[order]
cls_dets_box = torch.cat((cls_boxes, cls_scores), 1)
cls_dets_box = cls_dets_box[order]
keep = nms(cls_dets_box, cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
cls_dets_box = cls_dets_box[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, cls_dets.cpu().numpy(), thresh=0.75)
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect {}: {:d}/{:d} {:.3f}s {:.3f}s \n' \
.format(imglist[i], i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
if vis:
# cv2.imshow('test', im2show)
# cv2.waitKey(0)
result_path = os.path.join(args.image_dir, imglist[i][:-4] + "_det.jpg")
cv2.imwrite(result_path, im2show)
| 2.171875 | 2 |
__init__.py | yulkang/augment3D | 11 | 12773569 | """
Augment 3D images of a focal lesion with 3D rotation, flip, and translation.
import_mhd:
Prepare patches from lung CT scan images and annotations.
(See readme.md - Installation for details.)
datasets:
Retrieve patches after augmentation for training.
classify_patch:
Demonstrate the use of datasets for training 3D deep convolution net.
""" | 1.875 | 2 |
src/Utils/__init__.py | bakkiaraj/WallPaperChanger | 1 | 12773570 | <filename>src/Utils/__init__.py
# -*- coding: utf-8 -*-
# @Author : <NAME> (nxp33721)
# @EMail : <EMAIL>
# @Software : Radar SoC GUI
| 0.964844 | 1 |
examples/combining_draggable_with_magnet.py | gottadiveintopython/draggable | 13 | 12773571 | from kivy.app import App
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.clock import Clock
from kivy.properties import (
NumericProperty, StringProperty, BooleanProperty,
)
import asynckivy as ak
import kivy_garden.draggable
class Magnet(Factory.Widget):
'''
Inspired by
https://github.com/kivy-garden/garden.magnet
'''
do_anim = BooleanProperty(True)
anim_duration = NumericProperty(1)
anim_transition = StringProperty('out_quad')
# default value of the instance attributes
_coro = ak.sleep_forever()
def __init__(self, **kwargs):
self._anim_trigger = trigger = \
Clock.create_trigger(self._start_anim, -1)
super().__init__(**kwargs)
self.fbind('pos', trigger)
self.fbind('size', trigger)
def add_widget(self, widget, *args, **kwargs):
if self.children:
raise ValueError('Magnet can have only one child')
widget.pos = self.pos
widget.size = self.size
return super().add_widget(widget, *args, **kwargs)
def _start_anim(self, *args):
if self.children:
child = self.children[0]
self._coro.close()
if not self.do_anim:
child.pos = self.pos
child.size = self.size
return
self._coro = ak.start(ak.animate(
child,
d=self.anim_duration,
t=self.anim_transition,
x=self.x, y=self.y, width=self.width, height=self.height,
))
KV_CODE = '''
#:import create_spacer kivy_garden.draggable._utils._create_spacer
<ReorderableGridLayout@KXReorderableBehavior+GridLayout>:
<DraggableItem@KXDraggableBehavior+Magnet>:
do_anim: not self.is_being_dragged
anim_duration: .2
drag_cls: 'test'
drag_timeout: 50
font_size: 30
text: ''
opacity: .5 if self.is_being_dragged else 1.
size_hint_min: 50, 50
pos_hint: {'center_x': .5, 'center_y': .5, }
canvas.after:
Color:
rgba: .5, 1, 0, 1 if root.is_being_dragged else .5
Line:
width: 2 if root.is_being_dragged else 1
rectangle: [*self.pos, *self.size, ]
Label:
font_size: 30
text: root.text
<MyButton@Button>:
font_size: sp(20)
size_hint_min_x: self.texture_size[0] + dp(10)
size_hint_min_y: self.texture_size[1] + dp(10)
ReorderableGridLayout:
spacing: 10
padding: 10
drag_classes: ['test', ]
cols: 6
spacer_widgets:
[create_spacer(color=color)
for color in "#000044 #002200 #440000".split()]
'''
class SampleApp(App):
def build(self):
return Builder.load_string(KV_CODE)
def on_start(self):
gl = self.root
DraggableItem = Factory.DraggableItem
DraggableItem()
for i in range(23):
gl.add_widget(DraggableItem(text=str(i)))
if __name__ == '__main__':
SampleApp().run()
| 2.578125 | 3 |
test/components/stream_test.py | sytelus/longview | 3,453 | 12773572 | from tensorwatch.stream import Stream
s1 = Stream(stream_name='s1', console_debug=True)
s2 = Stream(stream_name='s2', console_debug=True)
s3 = Stream(stream_name='s3', console_debug=True)
s1.subscribe(s2)
s2.subscribe(s3)
s3.write('S3 wrote this')
s2.write('S2 wrote this')
s1.write('S1 wrote this')
| 2.140625 | 2 |
test_strategy.py | Nino-SEGALA/HuaweiChallenge | 0 | 12773573 | <gh_stars>0
# -*- coding: utf-8 -*-
'''
Copyright (c) 2020 Huawei Technologies Sweden AB, All rights reserved.
Authors:
<NAME>
'''
from robot_explorers import Strategy
class Test(Strategy):
'''Demo of strategy, observation, and action APIs'''
def __init__(self):
super().__init__()
def init(self):
'''Called once before the first observation has been received.'''
self.print('--- strategy init ---')
self.print('num_robots: {}'.format(self.num_robots))
self.print('shape: {}'.format(self.shape))
self.print('max_energy: {}'.format(self.max_energy))
self.print('coin_box (x,y): {}'.format(self.coin_box))
self.print('')
def step(self, observation):
''' Called every time an observation has been received
Args:
observations (strategy.Observation):
Container with all robots' observations
Returns:
actions (strategy.Action):
Container with all robots actions
'''
# Observation
self.print('score: {}'.format(observation.score))
self.print('opponent_score: {}'.format(observation.opponent_score))
self.print('added_coins: {}'.format(observation.added_coins))
for robot_id in range(self.num_robots):
robot = observation.robot(robot_id)
self.print('Observation (robot {})'.format( robot_id ))
self.print('energy: {}'.format( robot.energy ))
self.print('penalty: {}'.format( robot.penalty ))
self.print('has_item: {}'.format( robot.has_item ))
self.print('home_base: {}'.format( robot.home_base ))
self.print('position(x,y): ({},{})'.format( *robot.position ))
self.print('left: {} {}'.format( robot.left.distance, robot.left.object ))
self.print('right: {} {}'.format( robot.right.distance, robot.right.object ))
self.print('top: {} {}'.format( robot.up.distance, robot.up.object ))
self.print('bottom: {} {}'.format( robot.down.distance, robot.down.object ))
self.print('')
# Action
action = self.action()
for robot_id in range(self.num_robots):
self.print('Action (robot {})'.format( robot_id ))
self.print('detect: {}'.format( action.detect(robot_id) ))
self.print('none: {}'.format( action.none(robot_id) ))
self.print('move: {}'.format( action.move(robot_id, 'left') ))
self.print('fake coin: {}'.format( action.fake_coin(robot_id, 'up') ))
self.print('share_energy: {}'.format( action.share_energy(robot_id, 'down') ))
self.print('recharge {}'.format( action.recharge(robot_id) ))
self.print('move: {}'.format( action.move(robot_id, 'right') ))
self.print('move: {}'.format( action.move(robot_id, 'aaaa') ))
self.print('fake coin: {}'.format( action.fake_coin(robot_id, 'bbbb') ))
self.print('share_energy: {}'.format( action.share_energy(robot_id, 'cccc') ))
self.print('move: {}'.format( action.detect(-1) ))
self.print('')
self.print(action.get())
return action
# Run strategy
if __name__ == "__main__":
test = Test()
test.run()
| 3.015625 | 3 |
detect.py | ecr23xx/kp6d | 3 | 12773574 | import os
import argparse
from detect.eval.src.config import prepare_cfg, prepare_weight
from detect.eval.src.dataset import prepare_dataset
from detect.eval.src.detector import Detector
def parse_arg():
parser = argparse.ArgumentParser(description='YOLO v3 evaluation')
parser.add_argument('--bs', type=int, help="Batch size")
parser.add_argument('--reso', type=int, help="Image resolution")
parser.add_argument('--gpu', default='0,1,2,3', help="GPU ids")
parser.add_argument('--name', type=str, choices=['linemod-single', 'linemod-occ'])
parser.add_argument('--seq', type=str, help="Sequence number")
parser.add_argument('--ckpt', type=str, help="Checkpoint path")
return parser.parse_args()
args = parse_arg()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if __name__ == '__main__':
print(args)
detector = Detector(
cfgfile=prepare_cfg(args.name),
seq=args.seq,
weightfile=prepare_weight(args.ckpt),
reso=args.reso
)
_, val_dataloder = prepare_dataset(
name=args.name,
reso=args.reso,
bs=args.bs,
seq=args.seq
)
detector.detect_all(val_dataloder, savedir='./results/detect')
| 2.234375 | 2 |
algorithms/RNN.py | DavidFellner/Malfunctions_in_LV_grid_datase | 0 | 12773575 | '''
An Elman Network is implemented, taking the output of the last time step of the time series as prediction, and also to
compute the training loss. This is done because this output is thought of as the most informed one.
'''
import torch
from torch import nn
from sklearn.preprocessing import MaxAbsScaler
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import accuracy_score
import random
import numpy as np
import copy
import importlib
import os
import einops
from experiment_config import experiment_path, chosen_experiment
spec = importlib.util.spec_from_file_location(chosen_experiment, experiment_path)
config = importlib.util.module_from_spec(spec)
spec.loader.exec_module(config)
configuration = config.learning_config
def choose_best(models_and_losses):
index_best = [i[1] for i in models_and_losses].index(min([i[1] for i in models_and_losses]))
epoch = index_best+1
return models_and_losses[index_best], epoch
def save_model(model, epoch, loss):
path = os.path.join(config.models_folder, configuration['classifier'])
if not os.path.exists(path):
os.makedirs(path)
try:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': model.optimizer.state_dict(),
'loss': loss,
}, os.path.join(path, 'model.pth'))
except TypeError:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict,
'optimizer_state_dict': model.optimizer.state_dict(),
'loss': loss,
}, os.path.join(path, 'model.pth'))
class RNN(nn.Module):
def __init__(self, input_size, output_size, hidden_dim, n_layers):
super(RNN, self).__init__()
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.output_size = output_size
self.input_size = input_size
self._device = self.choose_device()
self._rnn = nn.RNN(input_size, hidden_dim, n_layers, nonlinearity=configuration["activation function"]).to(self._device)
self._fc = nn.Linear(hidden_dim, output_size).to(self._device)
self.optimizer = self.choose_optimizer(alpha=configuration["learning rate"] * configuration["mini batch size"]) # linear scaling of LR
self._estimator_type = 'classifier'
def forward(self, x):
seq_length = len(x[0])
# Initializing hidden state for first input using method defined below
hidden = self.init_hidden(seq_length).to(self._device)
if x.dim() == 2:
x = x.view(-1,seq_length, 1)
# Passing in the input and hidden state into the model and obtaining outputs
if x.device == torch.device("cpu"):
self._rnn = self._rnn.to(torch.device("cpu"))
self._fc = self._fc.to(torch.device("cpu"))
out, hidden = self._rnn(x, hidden)
out = self._fc(out)
else:
self._rnn = self._rnn.to(self.choose_device())
self._fc = self._fc.to(self.choose_device())
out, hidden = self._rnn(x, hidden)
# feed output into the fully connected layer
out = self._fc(out)
return out, hidden
def init_hidden(self, seq_length):
device = self._device
# This method generates the first hidden state of zeros which we'll use in the forward pass
hidden = torch.zeros(self.n_layers, seq_length, self.hidden_dim).to(device)
# We'll send the tensor holding the hidden state to the device we specified earlier as well
return hidden
def fit(self, train_loader=None, test_loader=None, X_train=None, y_train=None, X_test=None, y_test=None, early_stopping=True, control_lr=None, prev_epoch=1, prev_loss=1, grid_search_parameter=None):
torch.cuda.empty_cache()
self.early_stopping = early_stopping
self.control_lr = control_lr
if X_train and y_train:
X = X_train
y = y_train
mini_batch_size = configuration["mini batch size"]
criterion = nn.CrossEntropyLoss()
nominal_lr = configuration["learning rate"] * mini_batch_size # linear scaling of LR
lr = nominal_lr
loss = 10000000000 #set initial dummy loss
lrs = []
training_losses = []
models_and_val_losses = []
pause = 0 # for early stopping
if prev_epoch is None or grid_search_parameter:
prev_epoch = 1
if grid_search_parameter is not None:
configuration[configuration["grid search"][0]] = grid_search_parameter
for epoch in range(prev_epoch, configuration["number of epochs"] + 1):
if configuration["optimizer"] == 'SGD' and not epoch == prev_epoch: #ADAM optimizer has internal states and should therefore not be reinitialized every epoch; only for SGD bc here changing the learning rate makes sense
self.optimizer, lr = self.control_learning_rate(lr=lr, loss=loss, losses=training_losses, nominal_lr=nominal_lr, epoch=epoch)
lrs.append(lr)
if X_train and y_train:
zipped_X_y = list(zip(X, y))
random.shuffle(zipped_X_y) #randomly shuffle samples to have different mini batches between epochs
X, y = zip(*zipped_X_y)
X = np.array(X)
y = list(y)
if len(X) % mini_batch_size > 0: #drop some samples if necessary to fit with batch size
samples_to_drop = len(X) % mini_batch_size
X = X[:-samples_to_drop]
y = y[:-samples_to_drop]
mini_batches = X.reshape((int(len(X) / mini_batch_size), mini_batch_size, len(X[0])))
mini_batch_targets = np.array(y).reshape(int(len(y) / mini_batch_size), mini_batch_size)
input_seq = [torch.Tensor(i).view(len(i), -1, 1) for i in mini_batches]
target_seq = [torch.Tensor([i]).view(-1).long() for i in mini_batch_targets]
inout_seq = list(zip(input_seq, target_seq))
#optimizer.zero_grad() # Clears existing gradients from previous epoch
for sequences, labels in inout_seq:
labels = labels.to(self._device)
sequences = sequences.to(self._device)
self.optimizer.zero_grad() # Clears existing gradients from previous batch so as not to backprop through entire dataset
output, hidden = self(sequences)
if configuration['decision criteria'] == 'majority vote':
start_voting_outputs = int((configuration['calibration rate']) * output.size()[1])
voting_outputs = torch.stack([i[start_voting_outputs:] for i in
output]) # choose last n outputs of timeseries to do majority vote
relevant_outputs = voting_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
labels = einops.repeat(labels, 'b -> (b copy)', copy=relevant_outputs.size()[1])
labels = torch.stack(torch.split(labels, relevant_outputs.size()[1]), dim=0)
loss = sum([criterion(relevant_outputs[i], labels[i]) for i in list(range(labels.size()[0]))]) / \
labels.size()[0]
else:
last_outputs = torch.stack(
[i[-1] for i in output]) # choose last output of timeseries (most informed output)
relevant_outputs = last_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
loss = criterion(relevant_outputs, labels)
loss.backward() # Does backpropagation and calculates gradients
loss.backward() # Does backpropagation and calculates gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), configuration["gradient clipping"]) # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
self.optimizer.step() # Updates the weights accordingly
self.detach([last_outputs, sequences, labels, hidden]) #detach tensors from GPU to free memory
elif train_loader and test_loader:
import sys
toolbar_width = len(train_loader)
# setup toolbar
print('Epoch {}/{} completed:'.format(epoch, configuration["number of epochs"]))
sys.stdout.write("[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1)) # return to start of line, after '['
sys.stdout.flush()
for i, (sequences, labels, raw_seq) in enumerate(train_loader):
labels = labels.to(self._device)
sequences = sequences.to(self._device)
self.optimizer.zero_grad() # Clears existing gradients from previous batch so as not to backprop through entire dataset
output, hidden = self(sequences)
if configuration['decision criteria'] == 'majority vote':
if configuration['calibration rate'] == 1:
start_voting_outputs = int((configuration['calibration rate']) * output.size()[
1]) - 1 # equal to only using the last output
else:
start_voting_outputs = int((configuration['calibration rate']) * output.size()[1])
voting_outputs = torch.stack([i[start_voting_outputs:] for i in output]) #choose last n outputs of timeseries to do majority vote
relevant_outputs = voting_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
labels = einops.repeat(labels, 'b -> (b copy)', copy=relevant_outputs.size()[1])
labels = torch.stack(torch.split(labels, relevant_outputs.size()[1]), dim=0)
loss = sum([criterion(relevant_outputs[i], labels[i]) for i in list(range(labels.size()[0]))])/labels.size()[0]
else:
last_outputs = torch.stack([i[-1] for i in output]) #choose last output of timeseries (most informed output)
relevant_outputs = last_outputs.to(self._device)
labels = torch.stack([i[-1] for i in labels]).long()
loss = criterion(relevant_outputs, labels)
loss.backward() # Does backpropagation and calculates gradients
torch.nn.utils.clip_grad_norm_(self.parameters(), configuration["gradient clipping"]) # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
self.optimizer.step() # Updates the weights accordingly
self.detach([relevant_outputs, sequences, labels, hidden]) #detach tensors from GPU to free memory
progress = (i+1) / len(train_loader)
sys.stdout.write("- %.1f%% " %(progress*100))
sys.stdout.flush()
if config.dev_mode:
break
sys.stdout.write("]\n") # this ends the progress bar
sys.stdout.flush()
else:
print('Either provide X and y or dataloaders!')
if X_train and y_train:
training_losses.append(loss)
val_outputs = torch.stack([i[-1].view(-1) for i in self.predict(X_test)[1]]).to(self._device)
val_loss = criterion(val_outputs, torch.Tensor([np.array(y_test)]).view(-1).long().to(self._device))
else:
training_losses.append(loss)
pred, val_outputs, y_test = self.predict(test_loader=test_loader)
val_outputs = torch.stack([i[-1] for i in val_outputs]).to(self._device)
y_test = y_test.view(-1).long().to(self._device)
val_loss = criterion(val_outputs, y_test).to(self._device)
self.detach([val_outputs])
models_and_val_losses.append((copy.deepcopy(self.state_dict), val_loss.item()))
if configuration["save_model"]:
clf, ep = choose_best(models_and_val_losses)
if ep == epoch:
save_model(self, epoch, val_loss.item())
if self.early_stopping:
try:
if abs(models_and_val_losses[-1][1] - models_and_val_losses[-2][1]) < 1*10**-6:
pause += 1
if pause == 5:
print('Validation loss has not changed for {0} epochs! Early stopping of training after {1} epochs!'.format(pause, epoch))
return models_and_val_losses, training_losses, lrs
except IndexError:
pass
if not configuration["cross_validation"] and epoch % 10 == 0:
print('Epoch: {}/{}.............'.format(epoch, configuration["number of epochs"]), end=' ')
print("Loss: {:.4f}".format(loss.item()))
return models_and_val_losses, training_losses, lrs
def predict(self, test_loader=None, X=None):
if X is not None:
input_sequences = torch.stack([torch.Tensor(i).view(len(i), -1) for i in X])
input_sequences = input_sequences.to(self._device)
outputs, hidden = self(input_sequences)
last_outputs = torch.stack([i[-1] for i in outputs]).to(self._device)
probs = nn.Softmax(dim=-1)(last_outputs)
pred = torch.argmax(probs, dim=-1) # chose class that has highest probability
self.detach([input_sequences, hidden, outputs])
return [i.item() for i in pred], outputs
elif test_loader:
pred = torch.Tensor()
y_test = torch.Tensor()
outputs_cumm = torch.Tensor()
for i, (input_sequences, labels, raw_seq) in enumerate(test_loader):
input_sequences = input_sequences.to(self._device)
outputs, hidden = self(input_sequences)
if configuration['decision criteria'] == 'majority vote':
if configuration['calibration rate'] == 1:
start_voting_outputs = int((configuration['calibration rate']) * outputs.size()[
1]) - 1 # equal to only using the last output
else:
start_voting_outputs = int((configuration['calibration rate']) * outputs.size()[1])
voting_outputs = torch.stack([i[start_voting_outputs:] for i in outputs]) #choose last n outputs of timeseries to do majority vote
relevant_outputs = voting_outputs.to(self._device)
most_likely_outputs = torch.argmax(nn.Softmax(dim=-1)(relevant_outputs), dim=-1)
majority_vote_result = torch.mode(most_likely_outputs, dim=-1)[0]
pred_new = majority_vote_result.float()
else:
last_outputs = torch.stack([i[-1] for i in outputs]).to(self._device)
probs = nn.Softmax(dim=-1)(last_outputs)
pred_new = torch.argmax(probs, dim=-1).float()
outputs_cumm = torch.cat((outputs_cumm.to(self._device), outputs.float()), 0)
pred = torch.cat((pred.to(self._device), pred_new), 0) # chose class that has highest probability
y_test = torch.cat((y_test, labels.float()), 0) # chose class that has highest probability
self.detach([input_sequences, hidden, outputs])
if configuration["train test split"] <= 1:
share_of_test_set = len(test_loader)*configuration["train test split"]*labels.size()[0]
else:
share_of_test_set = configuration["train test split"]
if y_test.size()[0] >= share_of_test_set: #to choose the test set size (memory issues!!)
break
return [i.item() for i in pred], outputs_cumm, y_test
else:
print('Either provide X or a dataloader!')
def choose_optimizer(self, alpha=configuration["learning rate"]):
if configuration["optimizer"] == 'Adam':
optimizer = torch.optim.Adam(self.parameters(), lr=alpha)
else:
optimizer = torch.optim.SGD(self.parameters(), lr=alpha)
return optimizer
def control_learning_rate(self, lr=None, loss=None, losses=None, epoch=None, nominal_lr=None):
warm_up_share = configuration["percentage of epochs for warm up"] / 100
if self.control_lr == 'warm up' and epoch < int(warm_up_share * configuration["number of epochs"]):
lr = nominal_lr * epoch / int((warm_up_share * configuration["number of epochs"]))
optimizer = self.choose_optimizer(alpha=lr)
elif self.control_lr == 'warm up' and epoch >= int(warm_up_share * configuration["number of epochs"]):
lr = nominal_lr * (configuration["number of epochs"] - epoch) / int((1-warm_up_share) * configuration["number of epochs"])
optimizer = self.choose_optimizer(alpha=lr)
elif self.control_lr == 'LR controlled':
if losses[-1] > loss:
lr = lr * 1.1
optimizer = self.choose_optimizer(alpha=lr)
elif losses[-1] <= loss:
lr = lr * 0.90
optimizer = self.choose_optimizer(alpha=lr)
else:
lr = lr
optimizer = self.choose_optimizer(alpha=lr)
return optimizer, lr
def preprocess(self, X_train, X_test):
scaler = self.fit_scaler(X_train)
X_train = self.preprocessing(X_train, scaler)
X_test = self.preprocessing(X_test, scaler)
return X_train, X_test
def fit_scaler(self, X):
X_zeromean = np.array([x - x.mean() for x in X]) # deduct it's own mean from every sample
maxabs_scaler = MaxAbsScaler().fit(X_zeromean) # fit scaler as to scale training data between -1 and 1
return maxabs_scaler
def preprocessing(self, X, scaler):
X_zeromean = np.array([x - x.mean() for x in X])
X = scaler.transform(X_zeromean)
return X
def score(self, y_test, y_pred):
metrics = precision_recall_fscore_support(y_test, y_pred, average='macro')
accuracy = accuracy_score(y_test, y_pred)
return [accuracy, metrics]
def get_params(self, deep=True):
return {"hidden_dim": self.hidden_dim, "n_layers": self.n_layers, "output_size": self.output_size, "input_size" : self.input_size}
def choose_device(self):
is_cuda = torch.cuda.is_available()
if is_cuda:
device = torch.device("cuda")
else:
device = torch.device("cpu")
return device
def detach(self, inputs=[]):
for i in inputs:
torch.detach(i)
torch.cuda.empty_cache()
return
| 2.890625 | 3 |
player.py | gorel/poker-server | 0 | 12773576 | from gamehelper import card2str
import sqlite3
# Database column information
PLAYER_ROUND_ID_COLUMN = 0
PLAYER_USER_ID_COLUMN = 1
PLAYER_TABLE_ID_COLUMN = 2
PLAYER_INITIAL_STACK_COLUMN = 3
PLAYER_STACK_COLUMN = 4
PLAYER_MY_TURN_COLUMN = 5
PLAYER_HAND1_COLUMN = 6
PLAYER_HAND2_COLUMN = 7
PLAYER_IS_FOLDED_COLUMN = 8
PLAYER_DISPLAY_COLUMN = 9
# Constants
SELECT_QUERY = "SELECT Player.*, User.display FROM players Player INNER JOIN users User on Player.user_id = User.id WHERE User.apikey=? ORDER BY Player.id DESC LIMIT 1"
SELECT_OPPONENTS_QUERY = "SELECT * FROM players WHERE table_id=? AND round_id=? AND user_id!=?"
INSERT_ACTION_QUERY = "INSERT INTO player_actions ('round_id', 'player_id', 'action', 'amount') VALUES (?, ?, ?, ?)"
GET_DISPLAY_QUERY = "SELECT User.* FROM users User INNER JOIN players Player on User.id = Player.user_id WHERE Player.user_id=?"
class PlayerNotFoundError(Exception):
pass
class Player:
def __init__(self, player):
self.round_id = player[PLAYER_ROUND_ID_COLUMN]
self.player_id = player[PLAYER_USER_ID_COLUMN]
self.table_id = player[PLAYER_TABLE_ID_COLUMN]
self.initial_stack = player[PLAYER_INITIAL_STACK_COLUMN]
self.stack = player[PLAYER_STACK_COLUMN]
self.my_turn = bool(player[PLAYER_MY_TURN_COLUMN])
card1 = card2str(player[PLAYER_HAND1_COLUMN])
card2 = card2str(player[PLAYER_HAND2_COLUMN])
self.hand = [card1, card2]
self.is_folded = bool(player[PLAYER_IS_FOLDED_COLUMN])
self.display = None
self.display = player.get_display()
def get_round_id(self):
return self.round_id
def get_player_id(self):
return self.player_id
def get_table_id(self):
return self.table_id
def get_display(self):
if self.display is None:
conn = sqlite3.connect(app.config['DATABASE'])
c = conn.cursor()
data = (self.get_player_id(), )
c.execute(GET_DISPLAY_QUERY, data)
user = c.fetchone()
conn.close()
self.display = user[USER_DISPLAY_COLUMN]
return self.display
def get_initial_stack(self):
return self.initial_stack
def get_stack(self):
return self.stack
def get_my_turn(self):
return self.my_turn
def get_hand(self):
return [card for card in self.hand if card is not None]
def get_is_folded(self):
return self.is_folded
def post_action(self, action, amount=0):
action = action2db(action)
conn = sqlite3.connect(app.config['DATABASE'])
c = conn.cursor()
data = (self.get_round_id(), self.get_player_id(), action, amount, )
c.execute(INSERT_ACTION_QUERY, data)
conn.close()
def get_public_info(self):
info = {}
info['name'] = self.get_display()
info['initial_stack'] = self.get_initial_stack()
info['stack'] = self.get_stack()
info['folded'] = self.get_is_folded()
return info
@classmethod
def get(self_class, api_key):
conn = sqlite3.connect(app.config['DATABASE'])
c = conn.cursor()
data = (api_key, )
c.execute(SELECT_QUERY, data)
player = c.fetchone()
conn.close()
try:
if player is None:
raise PlayerNotFoundError
return Player(player)
except PlayerNotFoundError:
return None
@classmethod
def get_opponents(self_class, player):
conn = sqlite3.connect(app.config['DATABASE'])
c = conn.cursor()
data = (player.get_table_id(), player.get_round_id(), player.get_user_id(), )
c.execute(SELECT_OPPONENTS_QUERY, data)
opponents = c.fetchall()
conn.close()
return [Player(opponent).get_public_info() for opponent in opponents]
| 3.125 | 3 |
reactive_robot/parsers/string.py | yusufcanb/reactive-robot | 10 | 12773577 | from reactive_robot.parsers.base import BaseParser
class RawPayloadParser(BaseParser):
def get_variables(self, message: bytes):
if message:
variables = message.decode("utf-8").split()
return variables
else:
return []
| 2.40625 | 2 |
bucketsort.py | MichaelORegan/46887-COMPUTATIONAL-THINKING-WITH-ALGORITHMS-PROJECT | 0 | 12773578 | # <NAME> 05/May/2019
# https://www.sanfoundry.com/python-program-implement-bucket-sort/
def bucketSort(alist):
largest = max(alist)
length = len(alist)
size = largest/length
buckets = [[] for _ in range(length)]
for i in range(length):
j = int(alist[i]/size)
if j != length:
buckets[j].append(alist[i])
else:
buckets[length - 1].append(alist[i])
for i in range(length):
insertionSort(buckets[i])
result = []
for i in range(length):
result = result + buckets[i]
return result
def insertionSort(alist):
for i in range(1, len(alist)):
temp = alist[i]
j = i - 1
while (j >= 0 and temp < alist[j]):
alist[j + 1] = alist[j]
j = j - 1
alist[j + 1] = temp
alist = [54,26,93,17,77,31,44,55,20]
bucketSort(alist)
print(bucketSort(alist)) | 3.875 | 4 |
source/src/molecule-unfolding/utility/QMUQUBO.py | qingfengtommy/quantum-ready-solution-for-drug-discovery | 0 | 12773579 | <reponame>qingfengtommy/quantum-ready-solution-for-drug-discovery<filename>source/src/molecule-unfolding/utility/QMUQUBO.py
########################################################################################################################
# The following class is the construction of QUBO model
########################################################################################################################
import dimod
from .MolGeoCalc import atom_distance_func
from collections import defaultdict
import time
import logging
import pickle
import os
class QMUQUBO():
def __init__(self, mol_data, method, **param):
# prepare parameters
self.param = param
self.mol_data = mol_data
self.name = f"qmu_{self.mol_data.name}_model"
# init model_info to store the information for model of different methods
# init model_qubo to store the qubo for model of different methods
self.model_info = {}
self.model_qubo = {}
# define vars/var_rb_map/rb_var_map for different models
self.var = None
self.var_rb_map = None
self.rb_var_map = None
for mt in method:
self.model_info[f"{mt}"] = {}
self.model_qubo[f"{mt}"] = {}
if mt == "pre-calc":
logging.info(
"initial pre-calculate for constructing molecule QUBO")
for param in self.param[mt]["param"]:
self.model_info[mt][param] = set()
elif mt == "after-calc":
logging.info(
"initial after calculate for constructing molecule QUBO not implemented !!")
else:
logging.info(f"only pre-calculate(method='pre-calc') and after-calculate(method='after-calc') are supported, \
method {mt} not support !!")
def build_model(self, **param):
for method, config in param.items():
model_param = config
if method == "pre-calc":
self._build_pre_calc_model(**model_param)
return 0
def _build_pre_calc_model(self, **model_param):
for M in model_param["M"]:
for D in model_param["D"]:
for A in model_param["A"]:
for hubo_qubo_val in model_param["hubo_qubo_val"]:
# update var_map
# prepare variables
self.var, self.var_rb_map, self.rb_var_map = self._prepare_var(
self.mol_data, D)
# check availability
if self._check_duplicate([M, D, A, hubo_qubo_val], ["M", "D", "A", "hubo_qubo_val"], "pre-calc"):
logging.info(
f"duplicate model !! pass !! M:{M},D:{D},A:{A},hubo_qubo_val {hubo_qubo_val}")
continue
start = time.time()
hubo = {}
theta_option = [x * 360/D for x in range(D)]
hubo_constraints, hubo_distances = self._build_qubo_pre_calc(self.mol_data, M, D, A, self.var,
self.rb_var_map, self.var_rb_map,
theta_option)
hubo.update(hubo_constraints)
hubo.update(hubo_distances)
# transfer hubo to qubo
# TODO why make_quadratic not work?
# qubo_raw = dimod.make_quadratic(
# hubo, hubo_qubo_val, dimod.BINARY).to_qubo()
qubo_raw = dimod.make_quadratic(
hubo, hubo_qubo_val, dimod.BINARY)
qubo = self._manual_qubo(qubo_raw.to_qubo())
end = time.time()
model_name = f"{M}_{D}_{A}_{hubo_qubo_val}"
self.model_qubo["pre-calc"][model_name] = {}
self.model_qubo["pre-calc"][model_name]["qubo"] = qubo
self.model_qubo["pre-calc"][model_name]["var"] = self.var
self.model_qubo["pre-calc"][model_name]["var_rb_map"] = self.var_rb_map
self.model_qubo["pre-calc"][model_name]["rb_var_map"] = self.rb_var_map
self.model_qubo["pre-calc"][model_name]["time"] = end-start
self.model_qubo["pre-calc"][model_name]["model_name"] = model_name
ris_name = list(self.mol_data.bond_graph.sort_ris_data[str(M)].keys()).copy()
valid_rb_name = []
for name in ris_name:
if len(name.split(',')) == 1:
valid_rb_name.append(name)
self.model_qubo["pre-calc"][model_name]["rb_name"] = valid_rb_name
# # optimize results
# self.model_qubo["pre-calc"][model_name]["optimizer"] = {}
# self.model_qubo["pre-calc"][model_name]["optimizer"]["post"] = {}
logging.info(
f"Construct model for M:{M},D:{D},A:{A},hubo_qubo_val:{hubo_qubo_val} {(end-start)/60} min")
def _manual_qubo(self, qubo_raw):
qubo = defaultdict(float)
for key, value in qubo_raw[0].items():
qubo[key] = value
return qubo
def _check_duplicate(self, values, names, method):
initial_size = 0
update_size = 0
for value, name in zip(values, names):
initial_size = initial_size + len(self.model_info[method][name])
self.model_info[method][name].add(value)
update_size = update_size + len(self.model_info[method][name])
if initial_size == update_size:
return True
else:
return False
def clear_model(self, method):
for mt in method:
self.model_info[f"{mt}"] = {}
self.model_qubo[f"{mt}"] = {}
return 0
def describe_model(self):
# information for model
for method, info in self.model_info.items():
logging.info(f"method: {method}")
if method == "pre-calc":
logging.info(
"The model_name should be {M}_{D}_{A}_{hubo_qubo_val}")
for param, value in info.items():
logging.info("param: {}, value {}".format(param, value))
return self.model_info
def get_model(self, method, model_name):
return self.model_qubo[method][model_name]
def save(self, version, path=None):
save_path = None
save_name = f"{self.name}_{version}.pickle"
if path != None:
save_path = os.path.join(path, save_name)
else:
save_path = os.path.join(".", save_name)
with open(save_path, "wb") as f:
pickle.dump(self, f)
logging.info(f"finish save {save_name}")
return save_path
@classmethod
def load(cls, filename):
with open(filename, "rb") as f:
return pickle.load(f)
def _prepare_var(self, mol_data, D):
var = {}
var_rb_map = {}
rb_var_map = {}
for m, name in enumerate(mol_data.bond_graph.rb_name):
x_d = {}
var_rb_map[str(m+1)] = name
rb_var_map[str(name)] = str(m+1)
for d in range(D):
x_d[str(d+1)] = f"x_{m+1}_{d+1}"
var[str(m+1)] = x_d
return var, var_rb_map, rb_var_map
def _build_qubo_pre_calc(self, mol_data, M, D, A, var, rb_var_map, var_rb_map, theta_option):
# initial constraint
hubo_constraints = {}
# for m in range(M):
# for d1 in range(D):
# var_1 = var[str(m+1)][str(d1+1)]
# for d2 in range(D):
# var_2 = var[str(m+1)][str(d2+1)]
# if (var_2, var_1) in hubo_constraints.keys():
# hubo_constraints[(var_2, var_1)] = hubo_constraints[(
# var_2, var_1)] + A
# elif var_1 == var_2:
# hubo_constraints[(var_1, var_1)] = -A
# else:
# hubo_constraints[(var_1, var_2)] = A
def update_constraint(ris, hubo_constraints):
for d1 in range(D):
var_1 = var[rb_var_map[ris]][str(d1+1)]
for d2 in range(D):
var_2 = var[rb_var_map[ris]][str(d2+1)]
if (var_2, var_1) in hubo_constraints.keys():
hubo_constraints[(var_2, var_1)] = hubo_constraints[(
var_2, var_1)] + A
elif var_1 == var_2:
hubo_constraints[(var_1, var_1)] = -A
else:
hubo_constraints[(var_1, var_2)] = A
# update distance term
hubo_distances = {}
def update_hubo(torsion_group, up_list):
if len(torsion_group) == 1:
# print(tor_group)
for d in range(D):
final_list = up_list + \
[var[rb_var_map[torsion_group[0]]][str(d+1)]]
# distance
final_list_name = []
if len(final_list) == 1:
final_list_name = final_list + final_list
else:
final_list_name = final_list
# hubo_distances[tuple(final_list_name)] = -1
distance = -atom_distance_func(tuple(final_list), mol_data, var_rb_map, theta_option, M)
hubo_distances[tuple(final_list_name)] = distance
logging.debug(f"final list {final_list} with distance {distance}")
else:
for d in range(D):
final_list = up_list + \
[var[rb_var_map[torsion_group[0]]][str(d+1)]]
update_hubo(torsion_group[1:], final_list)
for ris in mol_data.bond_graph.sort_ris_data[str(M)].keys():
start = time.time()
logging.debug(f"ris group {ris} ")
end = time.time()
torsion_group = ris.split(",")
if len(torsion_group) == 1:
# update constraint
update_constraint(ris, hubo_constraints)
logging.info(torsion_group)
# update hubo terms
update_hubo(torsion_group, [])
logging.debug(
f"elapsed time for torsion group {ris} : {(end-start)/60} min")
return hubo_constraints, hubo_distances
| 2.546875 | 3 |
walking.py | Cocodidou/projetFusee | 0 | 12773580 | <reponame>Cocodidou/projetFusee<filename>walking.py<gh_stars>0
# animate a stick person
# stick figure by <NAME>
import turtle
import engine
import random
import math
WIDTH = 640
HEIGHT = 480
FGCOLOR = 'grey70'
BGCOLOR = 'grey23'
POPCOLOR = 'white'
POPPROB = 0.3
POPDURATION = 1
GROUNDPROB = 0.2
GROUNDX = -WIDTH / 6 + 75
GROUNDY = -HEIGHT / 3 + 27
GROUNDLEN = 200
NGROUNDLINES = 3
FIGUREX = 100
FIGUREY = 50
LONGDISTY = 150
MEDDISTY = 100
NSTEPS = 10 # tweened steps between key frames
UPDATESPERSTEP = 1 # how long to hold each step's position
### trees - code from eg-retracepoly.py
class Tree(engine.GameObject):
def __init__(self, shape, x, y, deltax, color):
super().__init__(x, y, deltax, 0, shape, color)
def heading(self):
return 90
def maketree_r(S, L, scale):
assert type(L) == type( [] )
for elem in L:
if type(elem) == type( [] ):
maketree_r( [ S[-1] ], elem, scale)
else:
assert type(elem) == type( () )
assert len(elem) == 2
elem = (scale * elem[0], scale * elem[1])
turtle.goto(elem)
S.append(elem)
# now unwind backwards
while len(S) > 0:
elem = S.pop()
turtle.goto(elem)
def maketree(name, scale, L):
turtle.home()
turtle.begin_poly()
stack = [ (0, 0) ]
maketree_r(stack, L, scale)
turtle.end_poly()
poly = turtle.get_poly()
turtle.register_shape(name, poly)
def maketrees():
L = [
(-3,-3), (0.5,0), (4,-2.7), (0.4,-0.5), (0.4,-3),
(-3.2,-5), (0.4,-3.5), (3.8,-5), (0.45,-4), (0.45,-5.8),
(-2,-8.2), (0.51,-6.3), (3.9,-8), (0.51,-7),
# trunk
(0.55,-8), (0.6,-9), (0.7,-10), (1,-11), (1.1,-12),
# ground
(-2,-12.2), (-0.5,-12.5), [ (3,-12.1) ], [ (2,-12) ],
[ (-1,-12.75), [ (-2.8,-12.6) ], (0.8,-12.7), (2.5,-12.5) ]
]
maketree('smalltree', 5, L)
maketree('bigtree', 10, L)
### screen "pop" effect to hint at film
class Pop(engine.GameObject):
def __init__(self, x, y, size, shape, color):
super().__init__(x, y, 0, 0, shape, color)
def isstatic(self):
return False
def isoob(self):
if self.age > POPDURATION:
# OOB in the temporal sense
return True
return False
POPS = []
def makepop(fn, *args):
turtle.home()
turtle.begin_poly()
fn(*args)
turtle.end_poly()
name = 'pop%d' % len(POPS)
turtle.register_shape(name, turtle.get_poly())
POPS.append(name)
def makepops():
# a few lines of various heights
for i in (2, 10, 25, 50):
makepop(lambda x: turtle.fd(x), i)
# add some circles - bias so big ones show up more rarely
for i in (2, 2, 2, 5):
makepop(lambda x: turtle.circle(x), i)
def pop_cb():
x = random.randint(-WIDTH / 2, WIDTH / 2)
y = random.randint(-HEIGHT / 2, HEIGHT / 2)
kind = random.choice(POPS)
size = random.randint(1, 3)
color = random.choice([POPCOLOR, FGCOLOR])
engine.add_obj(Pop(x, y, size, kind, color))
### ground
GROUND = []
class Ground(engine.GameObject):
def __init__(self, origx, origy, color):
self.origx = origx
self.origy = origy
self.regenerate()
super().__init__(self.x, self.y, 0, 0, self.shape, color)
def heading(self):
return self.h
def isstatic(self):
return False
def update(self):
if random.random() < GROUNDPROB:
self.regenerate()
super().update()
def regenerate(self):
# cue TARDIS whooping noise
self.x = self.origx + random.randint(-3, +9)
self.y = self.origy + random.randint(-3, +3)
self.h = 90 + random.randint(-1, +1)
self.shape = random.choice(GROUND)
def makeground():
for i in range(GROUNDLEN-3, GROUNDLEN+9):
turtle.home()
turtle.begin_poly()
turtle.fd(i)
turtle.end_poly()
name = 'gr%d' % i
turtle.register_shape(name, turtle.get_poly())
GROUND.append(name)
### tweening and figure classes
def makenothing():
# there's much ado about it
turtle.begin_poly()
turtle.end_poly()
turtle.register_shape('none', turtle.get_poly())
class Bird(engine.GameObject):
# note that this class can't be static, because the segments
# look to it for current x and y values
def __init__(self, L, x, y, color):
# keep weak references to component segments; it doesn't
# prevent GC from removing them, but allows us to keep
# this parent class around until all the segments are gone -
# otherwise, the Bird parent class gets deleted once it's
# OOB, and the last part of the flapping wing (no longer
# seeing the parent's motion updates) just stays at the
# edge of the screen flapping away
import weakref
self.components = weakref.WeakSet()
# init parent object first
super().__init__(x, y, -0.1, -0.025, 'none', BGCOLOR)
# create all segments; see comments in Figure class
for i in range(len(L[0])):
segL = [ X[i] for X in L ]
seg = Segment(segL, self, color)
engine.add_obj(seg)
self.components.add(seg)
def isoob(self):
if len(self.components) > 0:
return False
return True
class Figure(engine.GameObject):
# note that this class can't be static, because the segments
# look to it for current x and y values
def __init__(self, L, x, y, color):
# init parent object first
super().__init__(x, y, 0, 0, 'none', BGCOLOR)
# create all segments; each segment gets a list of just
# its tweening data - ideally a game object should be
# able to catch notifications of being added to the
# game engine and we could move the add_obj to there,
# but that doesn't happen at present
for i in range(len(L[0])):
segL = [ X[i] for X in L ]
seg = Segment(segL, self, color)
engine.add_obj(seg)
# create head
engine.add_obj(Head(self, color))
class Head(engine.GameObject):
def __init__(self, parent, color):
self.parent = parent
super().__init__(parent.x-7, parent.y+9,
parent.deltax, parent.deltay, 'circle', color)
class Segment(engine.GameObject):
def __init__(self, L, parent, color):
self.i = 0
self.L = L
self.count = UPDATESPERSTEP
self.stepcount = NSTEPS
self.parent = parent
t = L[self.i]
self.h = t.heading
super().__init__(t.x + parent.x, t.y + parent.y,
parent.deltax, parent.deltay, t.name, color)
def heading(self):
return self.h
def update(self):
# XXX should apply parent's delta x/y
if self.count == 0:
self.count = UPDATESPERSTEP
t = self.L[self.i]
self.h += t.dh
self.x += t.dx
self.y += t.dy
if self.stepcount == 0:
self.stepcount = NSTEPS
# switch to next shape and reset params
self.i = (self.i + 1) % len(self.L)
t = self.L[self.i]
self.x = t.x + self.parent.x
self.y = t.y + self.parent.y
self.h = t.heading
self.shape = t.name
else:
self.stepcount -= 1
else:
self.count -= 1
super().update()
# bird key frame data
BKF1 = (
[ (0,0), (3,0) ], # right inner wing
[ (3,0), (6,1) ], # right outer wing
[ (0,0), (-3,0) ], # left inner wing
[ (-3,0), (-6,1) ], # left outer wing
)
BKF2 = (
[ (0,0), (3,-1) ], # right inner wing
[ (3,-1), (6,0) ], # right outer wing
[ (0,0), (-3,-1) ], # left inner wing
[ (-3,-1), (-6,0) ], # left outer wing
)
BKF3 = (
[ (0,0), (3,-2) ], # right inner wing
[ (3,-2), (6,-1) ], # right outer wing
[ (0,0), (-3,-2) ], # left inner wing
[ (-3,-2), (-6,-1) ], # left outer wing
)
BKF4 = (
[ (0,0), (3,-2) ], # right inner wing
[ (3,-2), (6,-2.5) ], # right outer wing
[ (0,0), (-3,-2) ], # left inner wing
[ (-3,-2), (-6,-2.5) ], # left outer wing
)
BKF5 = (
[ (0,0), (3,-2) ], # right inner wing
[ (3,-2), (6,-4) ], # right outer wing
[ (0,0), (-3,-2) ], # left inner wing
[ (-3,-2), (-6,-4) ], # left outer wing
)
BKF6 = (
[ (0,0), (3,-1) ], # right inner wing
[ (3,-1), (6,-2.5) ], # right outer wing
[ (0,0), (-3,-1) ], # left inner wing
[ (-3,-1), (-6,-2.5) ], # left outer wing
)
BKF7 = (
[ (0,0), (3,0) ], # right inner wing
[ (3,0), (6,-1) ], # right outer wing
[ (0,0), (-3,0) ], # left inner wing
[ (-3,0), (-6,-1) ], # left outer wing
)
BKF8 = (
[ (0,0), (3,0) ], # right inner wing
[ (3,0), (6,0) ], # right outer wing
[ (0,0), (-3,0) ], # left inner wing
[ (-3,0), (-6,0) ], # left outer wing
)
# figure key frame data
KF1 = (
[ (700,225), (717,454) ], # body
[ (717,454), (717,598) ], # top of moving leg
[ (717,598), (819,698) ], # bottom of moving leg
[ (717,454), (731,595) ], # top of standing leg
[ (731,595), (744,734) ] # bottom of standing leg
)
KF2 = (
[ (698,223), (718,456) ], # body
[ (718,456), (637,584) ], # top of moving leg
[ (637,584), (601,737) ], # bottom of moving leg
[ (718,456), (728,593) ], # top of standing leg
[ (728,593), (742,731) ] # bottom of standing leg
)
KF3 = (
[ (699,225), (718,457) ], # body
[ (718,457), (657,593) ], # top of moving leg
[ (657,593), (604,738) ], # bottom of moving leg
[ (718,457), (769,587) ], # top of standing leg
[ (769,587), (825,732) ] # bottom of standing leg
)
KF4 = (
[ (697,224), (717,462) ], # body
[ (717,462), (730,592) ], # top of moving leg
[ (730,592), (744,732) ], # bottom of moving leg
[ (717,462), (787,583) ], # top of standing leg
[ (787,583), (895,712) ] # bottom of standing leg
)
# at KF5, the legs swap positions, i.e., KF5 is a modified KF1, KF6
# is a modified KF2, &c.
KF5 = (
[ (700,225), (717,454) ], # body
[ (717,454), (731,595) ], # top of standing leg
[ (731,595), (744,734) ], # bottom of standing leg
[ (717,454), (717,598) ], # top of moving leg
[ (717,598), (819,698) ] # bottom of moving leg
)
KF6 = (
[ (698,223), (718,456) ], # body
[ (718,456), (728,593) ], # top of standing leg
[ (728,593), (742,731) ], # bottom of standing leg
[ (718,456), (637,584) ], # top of moving leg
[ (637,584), (601,737) ] # bottom of moving leg
)
KF7 = (
[ (699,225), (718,457) ], # body
[ (718,457), (769,587) ], # top of standing leg
[ (769,587), (825,732) ], # bottom of standing leg
[ (718,457), (657,593) ], # top of moving leg
[ (657,593), (604,738) ] # bottom of moving leg
)
KF8 = (
[ (697,224), (717,462) ], # body
[ (717,462), (787,583) ], # top of standing leg
[ (787,583), (895,712) ], # bottom of standing leg
[ (717,462), (730,592) ], # top of moving leg
[ (730,592), (744,732) ] # bottom of moving leg
)
_kfsegments = 0
def getheading(x1, y1, x2, y2):
# return 90 - math.degrees(math.atan2(y2 - y1, x2 - x1))
# math tweak because key frame coords assume (0,0) at top left
return 90 + math.degrees(math.atan2(y2 - y1, x2 - x1))
def getkfstart(kf):
# starting x, y are all relative to the first segment in list
return kf[0][0]
def normalize(kf, i, scale):
# adjust coords to be relative to start of key frame segments
startx, starty = getkfstart(kf)
[ (x1, y1), (x2, y2) ] = kf[i]
# return [ ( (x1 - startx)*scale, (y1 - starty)*scale ),
# ( (x2 - startx)*scale, (y2 - starty)*scale ) ]
# math tweak because key frame coords assume (0,0) at top left
return [ ( (x1 - startx)*scale, -(y1 - starty)*scale ),
( (x2 - startx)*scale, -(y2 - starty)*scale ) ]
class Tween:
# just a container for all this data
def __init__(self):
self.name = ''
self.x = self.y = self.heading = 0
self.dx = self.dy = self.dh = 0
def maketweendata(kf1, kf2, steps, scale):
global _kfsegments
assert len(kf1) == len(kf2) # must be able to match segments up
L = []
for i in range(len(kf1)):
[ (x1, y1), (x2, y2) ] = normalize(kf1, i, scale)
# Euclidean distance gives segment length
seglen = ( (x2 - x1) ** 2 + (y2 - y1) ** 2 ) ** 0.5
# make line segment into shape
turtle.home()
turtle.begin_poly()
turtle.fd(seglen)
turtle.end_poly()
name = 'kf%d' % _kfsegments
_kfsegments += 1
turtle.register_shape(name, turtle.get_poly())
# and compute initial heading
heading = getheading(x1, y1, x2, y2)
# extract out corresponding segment from key frame 2
[ (x1b, y1b), (x2b, y2b) ] = normalize(kf2, i, scale)
# use it to compute deltas for x, y, and heading; this is
# where we need to be after N steps
dx = x1b - x1
dy = y1b - y1
dh = getheading(x1b, y1b, x2b, y2b) - heading
# weird special case that cropped up between BKF3 and BKF4 of
# bird flap, where the computed delta in the heading takes the
# long way around, as it were - adjust it to compensate
if dh > 180:
dh = dh - 360
elif dh < -180:
dh = dh + 360
dx /= steps
dy /= steps
dh /= steps
# place everything in a container
c = Tween()
c.name = name
c.x, c.y = x1, y1
c.heading = heading
c.dx, c.dy = dx, dy
c.dh = dh
L.append(c)
return L
if __name__ == '__main__':
engine.init_screen(WIDTH, HEIGHT)
engine.init_engine()
turtle.bgcolor(BGCOLOR)
engine.add_random_event(POPPROB, pop_cb)
makepops()
# parallax scrolling
maketrees()
# distant background
engine.add_obj(Tree('smalltree', -50, LONGDISTY, +0.1, FGCOLOR))
# medium-distance background
engine.add_obj(Tree('bigtree', -WIDTH/2, MEDDISTY, +0.5, FGCOLOR))
# ground
makeground()
for i in range(NGROUNDLINES):
engine.add_obj(Ground(GROUNDX, GROUNDY, FGCOLOR))
# precompute tweening data from key frames and add figure object
makenothing()
T = []
KFL = [KF1, KF2, KF3, KF4, KF5, KF6, KF7, KF8]
for (kf1, kf2) in zip(KFL, KFL[1:] + [KFL[0]]):
T.append(maketweendata(kf1, kf2, NSTEPS, 0.35))
engine.add_obj(Figure(T, FIGUREX, FIGUREY, FGCOLOR))
# precompute tweening data from key frames and add bird object
# slowly increase its size each animation cycle so it appears
# to fly closer
BT = []
KFL = [BKF1, BKF2, BKF3, BKF4, BKF5, BKF6, BKF7, BKF8]
for i in range(10, 50):
for (kf1, kf2) in zip(KFL, KFL[1:] + [KFL[0]]):
BT.append(maketweendata(kf1, kf2, NSTEPS, i / 10.0))
engine.add_obj(Bird(BT, -100, 200, FGCOLOR))
engine.engine()
| 3.21875 | 3 |
MSCL_Examples/Inertial/Python/setCurrentConfig.py | contagon/MSCL | 53 | 12773581 | <filename>MSCL_Examples/Inertial/Python/setCurrentConfig.py
#import the mscl library
import sys
sys.path.append("../../dependencies/Python")
import mscl
#TODO: change these constants to match your setup
COM_PORT = "COM4"
try:
#create a Serial Connection with the specified COM Port, default baud rate of 921600
connection = mscl.Connection.Serial(COM_PORT)
#create an InertialNode with the connection
node = mscl.InertialNode(connection)
#many other settings are available than shown below
#reference the documentation for the full list of commands
#if the node supports AHRS/IMU
if node.features().supportsCategory(mscl.MipTypes.CLASS_AHRS_IMU):
ahrsImuChs = mscl.MipChannels()
ahrsImuChs.append(mscl.MipChannel(mscl.MipTypes.CH_FIELD_SENSOR_SCALED_ACCEL_VEC, mscl.SampleRate.Hertz(500)))
ahrsImuChs.append(mscl.MipChannel(mscl.MipTypes.CH_FIELD_SENSOR_SCALED_GYRO_VEC, mscl.SampleRate.Hertz(100)))
#apply to the node
node.setActiveChannelFields(mscl.MipTypes.CLASS_AHRS_IMU, ahrsImuChs)
#if the node supports Estimation Filter
if node.features().supportsCategory(mscl.MipTypes.CLASS_ESTFILTER):
estFilterChs = mscl.MipChannels()
estFilterChs.append(mscl.MipChannel(mscl.MipTypes.CH_FIELD_ESTFILTER_ESTIMATED_GYRO_BIAS, mscl.SampleRate.Hertz(100)))
#apply to the node
node.setActiveChannelFields(mscl.MipTypes.CLASS_ESTFILTER, estFilterChs)
#if the node supports GNSS
if node.features().supportsCategory(mscl.MipTypes.CLASS_GNSS):
gnssChs = mscl.MipChannels()
gnssChs.append(mscl.MipChannel(mscl.MipTypes.CH_FIELD_GNSS_LLH_POSITION, mscl.SampleRate.Hertz(1)))
#apply to the node
node.setActiveChannelFields(mscl.MipTypes.CLASS_GNSS, gnssChs)
node.setPitchRollAid(True)
node.setAltitudeAid(False)
offset = mscl.PositionOffset(0.0, 0.0, 0.0)
node.setAntennaOffset(offset)
except mscl.Error, e:
print "Error:", e
| 2.109375 | 2 |
Scripts/allele_specific_expression/summarize_ae.py | LijiangLong/2020-peel-paper | 0 | 12773582 | <filename>Scripts/allele_specific_expression/summarize_ae.py
import pdb
import pandas as pd
import os
def main():
read_count_file = '/Users/lijiang/Downloads/Reads_count.xlsx'
ae_folder = '/Users/lijiang/Desktop/results'
output_file = '/Users/lijiang/Desktop/summary_results.csv'
df = pd.read_excel(read_count_file)
sample_count = {}
for i in range(len(df)):
sample,replicate,count = df.iloc[i,:]
try:
sample_count[sample] += count
except KeyError:
sample_count[sample] = count
record = {}
pdb.set_trace()
for file in os.listdir(ae_folder):
if not file.endswith('.csv'):
continue
key = file[6:9]
if not key in record:
record[key] = {}
df = pd.read_csv(os.path.join(ae_folder,file))
for i in range(len(df)):
gene_name,public_name,n2_read,cb_read = df.iloc[i,:]
gene_key = ','.join([gene_name,public_name])
if not gene_key in record[key]:
record[key][gene_key] = {'n2':0,'cb':0}
record[key][gene_key]['n2'] += n2_read
record[key][gene_key]['cb'] += cb_read
with open(output_file,'w') as output_f:
output_f.write('name,public_name,sample,n2_read,cb4856_read\n')
for sample in record:
for gene in record[sample]:
n2_read = record[sample][gene]['n2']
n2_read = n2_read/sample_count[sample]*1000000
cb4856_read = record[sample][gene]['cb']
cb4856_read = cb4856_read/sample_count[sample]*1000000
output_f.write(','.join([gene,sample,'{:2f}'.format(n2_read),'{:2f}'.format(cb4856_read)]))
output_f.write('\n')
if __name__ == "__main__":
main() | 2.625 | 3 |
ACME/loss/ReconstructionLoss.py | mauriziokovacic/ACME | 3 | 12773583 | from ..utility.strcmpi import *
from .Loss import *
class ReconstructionLoss(Loss):
"""
A class representing the standard autoencoder reconstruction loss
Attributes
----------
fcn : str
the reconstruction function to apply, either 'bce' or 'mse'
"""
def __init__(self, *args, fcn='bce', name='Reconstruction', **kwargs):
"""
Parameters
----------
fcn : str (optional)
the reconstruction function to apply, either 'bce' for binary cross entropy or
'mse' for mean squared error (default is 'bce')
"""
super(ReconstructionLoss, self).__init__(*args, name=name, **kwargs)
self.__evalFcn = []
self.__fcn = fcn
def __eval__(self, x, x_hat):
return self.__evalFcn(x_hat, x)
@property
def fcn(self):
return self.__fcn
@fcn.setter
def fcn(self, value):
if strcmpi(value, 'bce'):
self.__fcn = value
self.__evalFcn = torch.nn.BCELoss()
return
if strcmpi(value, 'mse'):
self.__fcn = value
self.__evalFcn = torch.nn.MSELoss()
return
raise RuntimeError('Unknown value for type. Expected ''bce'' or ''mse'', but got {}.'.format(value))
| 2.578125 | 3 |
scripts/keras_benchmarks/run_benchmark.py | MikulasZelinka/benchmarks | 2 | 12773584 | <gh_stars>1-10
""" Main entry point for running benchmarks with different Keras backends."""
from models import mnist_mlp_benchmark
from models import cifar10_cnn_benchmark
from models import lstm_benchmark
import upload_benchmarks_bq as bq
import argparse
import keras
import json
if keras.backend.backend() == "tensorflow":
import tensorflow as tf
if keras.backend.backend() == "theano":
import theano
if keras.backend.backend() == "cntk":
import cntk
parser = argparse.ArgumentParser()
parser.add_argument('--mode',
help='The benchmark can be run on cpu, gpu and multiple gpus.')
args = parser.parse_args()
# Load the json config file for the requested mode.
config_file = open("benchmarks/scripts/keras_benchmarks/config.json", 'r')
config_contents = config_file.read()
config = json.loads(config_contents)[args.mode]
def get_backend_version():
if keras.backend.backend() == "tensorflow":
return tf.__version__
if keras.backend.backend() == "theano":
return theano.__version__
if keras.backend.backend() == "cntk":
return cntk.__version__
return "undefined"
def _upload_metrics(current_model):
bq.upload_metrics_to_bq(test_name=current_model.test_name,
total_time=current_model.total_time,
epochs=current_model.epochs,
batch_size=current_model.batch_size,
backend_type=keras.backend.backend(),
backend_version=get_backend_version(),
cpu_num_cores=config['cpu_num_cores'],
cpu_memory=config['cpu_memory'],
cpu_memory_info=config['cpu_memory_info'],
gpu_count=config['gpus'],
gpu_platform=config['gpu_platform'],
platform_type=config['platform_type'],
platform_machine_type=config['platform_machine_type'],
keras_version=keras.__version__,
sample_type=current_model.sample_type)
# MNIST MLP
model = mnist_mlp_benchmark.MnistMlpBenchmark()
model.run_benchmark(gpus=config['gpus'])
_upload_metrics(model)
# CIFAR10 CNN
model = cifar10_cnn_benchmark.Cifar10CnnBenchmark()
model.run_benchmark(gpus=config['gpus'])
_upload_metrics(model)
# LSTM
model = lstm_benchmark.LstmBenchmark()
model.run_benchmark(gpus=config['gpus'])
_upload_metrics(model)
| 2.328125 | 2 |
files/myMain.py | JessevanKempen/nutils | 0 | 12773585 | import numpy as np
import pymc3 as pm
import theano
import theano.tensor as tt
# for reproducibility here's some version info for modules used in this notebook
import platform
import IPython
import matplotlib
import matplotlib.pyplot as plt
import emcee
import corner
import os
from autograd import grad
from files.myIOlib import show_seaborn_plot
print("Python version: {}".format(platform.python_version()))
print("IPython version: {}".format(IPython.__version__))
print("Numpy version: {}".format(np.__version__))
print("Theano version: {}".format(theano.__version__))
print("PyMC3 version: {}".format(pm.__version__))
print("Matplotlib version: {}".format(matplotlib.__version__))
print("emcee version: {}".format(emcee.__version__))
print("corner version: {}".format(corner.__version__))
import numpy as np
import pymc3 as pm
import arviz as az
#Ordering imports
from myIOlib import *
from myModel import *
from myFUQ import *
#Ordering tools
import numpy as np
import arviz as az
from scipy import stats
import matplotlib as mpl
from theano import as_op
import theano.tensor as tt
import scipy.special as sc
import math
import time
# Start timing code execution
t0 = time.time()
################# User settings ###################
# Define the amount of samples
N = 1
# Define time of simulation
timestep = 30
endtime = 10320
t1steps = round(endtime / timestep)
Nt = 2*t1steps+1
x = timestep * np.linspace(0, 2 * t1steps, Nt)
# Units
MPA = 1e6
# Forward/Bayesian Inference calculation
performInference = True
useFEA = False
# Location to store output
outfile = 'output/output_%d.png' % N
#################### Core #########################
# Generate text file for parameters
generate_txt( "parameters.txt" )
# Import parameters.txt to variables
print("Reading model parameters...")
params_aquifer, params_well = read_from_txt( "parameters.txt" )
# Construct the objects for the doublet model
print("Constructing the doublet model...")
aquifer = Aquifer(params_aquifer)
# doublet = DoubletGenerator(aquifer)
from myUQ import *
from files.myUQlib import *
######## Forward Uncertainty Quantification #########
if not performInference:
# Run Bayesian FUQ (input parameters not np. but pm. -> random values, as pdf not work in FEA -> output array of values -> mean, stdv -> pm. )
import pymc3 as pm
from pymc3.distributions import Interpolated
print('Running on PyMC3 v{}'.format(pm.__version__))
# Run Forward Uncertainty Quantification
print("\r\nSolving Forward Uncertainty Quantification...")
# Set input from stoichastic parameters
print("\r\nSetting input from stoichastic parameters...")
parametersRVS = generateRVSfromPDF(N)
print("Stoichastic parameters", parametersRVS)
if useFEA:
# Run Finite Element Analysis (Forward)
print("\r\nRunning FEA...")
sol = performFEA(parametersRVS, aquifer, N, timestep, endtime)
else:
# # Run Analytical Analysis (Forward)
print("\r\nRunning Analytical Analysis...")
sol = performAA(parametersRVS, x)
###########################
# Post processing #
###########################
# Output pressure/temperature matrix and plot for single point in time
fig, ax = plt.subplots(1, 1, figsize=(10, 7), tight_layout=True)
ax.set(xlabel='Wellbore pressure [Pa]', ylabel='Probability')
ax.hist(sol[0][:, t1steps], density=True, histtype='stepfilled', alpha=0.2, bins=20)
# plt.show()
# Evaluate the doublet model
print("\r\nEvaluating numerical solution for the doublet model...")
doublet = DoubletGenerator(aquifer, sol)
pnodelist, Tnodelist = evaluateDoublet(doublet)
######## Inverse Uncertainty Quantification #########
else:
# Run Bayesian Inference
import pymc3 as pm
from pymc3.distributions import Interpolated
from pymc3.distributions.timeseries import EulerMaruyama
print('Running on PyMC3 v{}'.format(pm.__version__))
# Set distribution settings
chains = 4
ndraws = 15 # number of draws from the distribution
nburn = 5 # number of "burn-in points" (which we'll discard)
# Library functions
def get_𝜇_K(porosity, size):
constant = np.random.uniform(low=10, high=100, size=size) # np.random.uniform(low=3.5, high=5.8, size=size)
tau = np.random.uniform(low=0.3, high=0.5, size=size)
tothepower = np.random.uniform(low=3, high=5, size=size)
rc = np.random.uniform(low=10e-6, high=30e-6, size=size)
SSA = 3 / rc
permeability = constant * tau ** 2 * (porosity.random(size=N) ** tothepower / SSA ** 2)
𝜇_K = np.mean(permeability)
# constant = np.random.uniform(low=3.5, high=5.8, size=N)
# tothepower = np.random.uniform(low=3, high=5, size=N)
# Tau = (2) ** (1 / 2)
# S0_sand = np.random.uniform(low=1.5e2, high=2.2e2, size=N) # specific surface area [1/cm]
# K_samples = constant * (φpdf.random(size=N) ** tothepower / S0_sand ** 2)
# Kpdf = pm.Lognormal('K', mu=math.log(np.mean(K_samples)), sd=1) #joined distribution
return 𝜇_K
###########################
# Synthetic data #
###########################
# Set up our data
Nt = Nt # number of data points
CV = 0.001 # coefficient of variation noise
# True data
K_true = 1e-12 # 2.2730989084434785e-08
φ_true = 0.163
H_true = 70
ct_true = 1e-10
Q_true = 0.07
cs_true = 2650
# Lognormal priors for true parameters
Hpdf = stats.lognorm(scale=H_true, s=0.01)
φpdf = stats.lognorm(scale=φ_true, s=0.01)
Kpdf = stats.lognorm(scale=K_true, s=0.01)
ctpdf = stats.lognorm(scale=ct_true, s=0.01)
Qpdf = stats.lognorm(scale=Q_true, s=0.01)
cspdf = stats.lognorm(scale=cs_true, s=0.01)
theta = parametersRVS = [Hpdf.rvs(size=1), φpdf.rvs(size=1), Kpdf.rvs(size=1), ctpdf.rvs(size=1),
Qpdf.rvs(size=1), cspdf.rvs(size=1)]
# parametersRVS = [H_true, φ_true, K_true, ct_true, Q_true, cs_true]
# theta = parametersRVS = [H_true, φ_true, K_true, ct_true, Q_true, cs_true]
truemodel = my_model(theta, x)
print("truemodel", truemodel)
# Make data
np.random.seed(716742) # set random seed, so the data is reproducible each time
sigma = CV * np.mean(truemodel)
# data = sigma * np.random.randn(Nt) + truemodel
# Use real data
data = get_welldata('PBH')
# plot transient test
parameters = {'axes.labelsize': 14,
'axes.titlesize': 18}
plt.rcParams.update(parameters)
plt.figure(figsize=(10, 3))
# plt.subplot(121)
plt.plot(truemodel/MPA, 'k', label='$p_{true}$', alpha=0.5), plt.plot(data/MPA, 'r', label='$σ_{noise} = 1.0e-2$', alpha=0.5),\
plt.ylabel("p(t) [MPa]"), plt.xlabel("t [min]"), #plt.legend()
plt.tight_layout()
plt.show()
# Create our Op
logl = LogLikeWithGrad(my_loglike, data, x, sigma)
print(logl)
###########################
# Synthetic data #
###########################
# with pm.Model() as SyntheticModel:
#
# # True data (what actually drives the true pressure)
# K_true = 1e-12 # 2.2730989084434785e-08
# φ_true = 0.163
# H_true = 70
# ct_true = 1e-10
# Q_true = 0.07
# cs_true = 2650
#
# # Lognormal priors for true parameters
# Hpdf = pm.Lognormal('H', mu=np.log(H_true), sd=0.01)
# φpdf = pm.Lognormal('φ', mu=np.log(φ_true), sd=0.01)
# Kpdf = pm.Lognormal('K', mu=np.log(K_true), sd=0.01)
# ctpdf = pm.Lognormal('ct', mu=np.log(ct_true), sd=0.01)
# Qpdf = pm.Lognormal('Q', mu=np.log(Q_true), sd=0.01)
# cspdf = pm.Lognormal('cs', mu=np.log(cs_true), sd=0.01)
# parametersRVS = [Hpdf.random(size=Nt), φpdf.random(size=Nt), Kpdf.random(size=Nt), ctpdf.random(size=Nt),
# Qpdf.random(size=Nt), cspdf.random(size=Nt)]
#
# # parametersRVS = [H_true, φ_true, K_true, ct_true, Q_true, cs_true]
# solAA = performAA(parametersRVS, aquifer, N, timestep, endtime)
# p_true = np.mean(solAA[0].T, axis=1)
# print(p_true)
#
# # Z_t observed data
# np.random.seed(716742) # set random seed, so the data is reproducible each time
# σnoise = 0.1
# sd_p = σnoise * np.var(p_true) ** 0.5
# z_t = p_true + np.random.randn(Nt) * sd_p
# use PyMC3 to sampler from log-likelihood
with pm.Model() as opmodel:
###########################
# Prior information #
###########################
# Mean of expert variables (the specific informative prior)
𝜇_H = aquifer.H # lower_H = 35, upper_H = 105 (COV = 50%)
𝜇_φ = aquifer.φ # lower_φ = 0.1, upper_φ = 0.3 (COV = 50%)
𝜇_ct = aquifer.ct # lower_ct = 0.5e-10, upper_ct = 1.5e-10 (COV = 50%)
𝜇_Q = aquifer.Q # lower_Q = 0.35, upper_Q = 0.105 (COV = 50%)
𝜇_cs = aquifer.cps # lower_cs = 1325 upper_cs = 3975 (COV = 50%)
# Standard deviation of variables (CV=50%)
sd_H = 0.3
sd_φ = 0.3
sd_K = 0.3
sd_ct = 0.3
sd_Q = 0.3
sd_cs = 0.001
# Lognormal priors for unknown model parameters
Hpdf = pm.Uniform('H', lower=35, upper=105)
φpdf = pm.Uniform('φ', lower=0.1, upper=0.3)
Kpdf = pm.Uniform('K', lower=0.5e-13, upper=1.5e-13)
ctpdf = pm.Uniform('ct', lower=0.5e-10, upper=1.5e-10)
Qpdf = pm.Uniform('Q', lower=0.035, upper=0.105)
cspdf = pm.Uniform('cs', lower=1325, upper=3975)
# Hpdf = pm.Lognormal('H', mu=np.log(𝜇_H), sd=sd_H)
# φpdf = pm.Lognormal('φ', mu=np.log(𝜇_φ), sd=sd_φ)
# Kpdf = pm.Lognormal('K', mu=np.log(get_𝜇_K(φpdf, N)), sd=sd_K)
# ctpdf = pm.Lognormal('ct', mu=np.log(𝜇_ct), sd=sd_ct)
# Qpdf = pm.Lognormal('Q', mu=np.log(𝜇_Q), sd=sd_Q)
# cspdf = pm.Lognormal('cs', mu=np.log(𝜇_cs), sd=sd_cs)
thetaprior = [Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf]
# convert thetaprior to a tensor vector
theta = tt.as_tensor_variable([Hpdf, φpdf, Kpdf, ctpdf, Qpdf, cspdf])
# use a DensityDist
pm.DensityDist(
'likelihood',
lambda v: logl(v),
observed={'v': theta}
# random=my_model_random
)
with opmodel:
# Inference
trace = pm.sample(ndraws, cores=1, chains=chains, tune=nburn, discard_tuned_samples=True)
# plot the traces
print(az.summary(trace, round_to=2))
_ = pm.traceplot(trace, lines=(('K', {}, [K_true ]), ('φ', {}, [φ_true]), ('H', {}, [H_true]), ('ct', {}, [ct_true])
, ('Q', {}, [Q_true]), ('cs', {}, [cs_true])))
# put the chains in an array (for later!)
# samples_pymc3_2 = np.vstack((trace['K'], trace['φ'], trace['H'], trace['ct'], trace['Q'], trace['cs'])).T
# just because we can, let's draw posterior predictive samples of the model
# ppc = pm.sample_posterior_predictive(trace, samples=250, model=opmodel)
# _, ax = plt.subplots()
#
# for vals in ppc['likelihood']:
# plt.plot(x, vals, color='b', alpha=0.05, lw=3)
# ax.plot(x, my_model([H_true, φ_true, K_true, ct_true, Q_true, cs_true], x), 'k--', lw=2)
#
# ax.set_xlabel("Predictor (stdz)")
# ax.set_ylabel("Outcome (stdz)")
# ax.set_title("Posterior predictive checks");
###########################
# Post processing #
###########################
# print('Posterior distributions.')
# cmap = mpl.cm.autumn
# for param in ['K', 'φ', 'H', 'ct', 'Q', 'cs']:
# plt.figure(figsize=(8, 2))
# samples = trace[param]
# smin, smax = np.min(samples), np.max(samples)
# x = np.linspace(smin, smax, 100)
# y = stats.gaussian_kde(samples)(x)
# plt.axvline({'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q': Q_true, 'cs': cs_true}[param], c='k')
# plt.ylabel('Probability density')
# plt.title(param)
#
# plt.tight_layout();
data_spp = az.from_pymc3(trace=trace)
trace_K = az.plot_posterior(data_spp, var_names=['K'], kind='hist')
trace_φ = az.plot_posterior(data_spp, var_names=['φ'], kind='hist')
trace_H = az.plot_posterior(data_spp, var_names=['H'], kind='hist')
trace_Q = az.plot_posterior(data_spp, var_names=['Q'], kind='hist')
trace_ct = az.plot_posterior(data_spp, var_names=['ct'], kind='hist')
trace_cs = az.plot_posterior(data_spp, var_names=['cs'], kind='hist')
joint_plt = az.plot_joint(data_spp, var_names=['K', 'φ'], kind='kde', fill_last=False);
# trace_fig = az.plot_trace(trace, var_names=[ 'H', 'φ', 'K', 'ct', 'Q', 'cs'], compact=True);
plt.show()
# a = np.random.uniform(0.1, 0.3)
# b = np.random.uniform(0.5e-12, 1.5e-12)
# _, ax = plt.subplots(1, 2, figsize=(10, 4))
# az.plot_dist(a, color="C1", label="Prior", ax=ax[0])
# az.plot_posterior(data_spp, color="C2", var_names=['φ'], ax=ax[1], kind='hist')
# az.plot_dist(b, color="C1", label="Prior", ax=ax[1])
# az.plot_posterior(data_spp, color="C2", var_names=['K'], label="Posterior", ax=ax[0], kind='hist')
plt.show()
with pm.Model() as PriorModel:
###########################
# Prior information #
###########################
# Mean of expert variables (the specific informative prior)
𝜇_H = aquifer.H # lower_H = 35, upper_H = 105 (COV = 50%)
𝜇_φ = aquifer.φ # lower_φ = 0.1, upper_φ = 0.3 (COV = 50%)
𝜇_ct = aquifer.ct # lower_ct = 0.5e-10, upper_ct = 1.5e-10 (COV = 50%)
𝜇_Q = aquifer.Q # lower_Q = 0.35, upper_Q = 0.105 (COV = 50%)
𝜇_cs = aquifer.cps # lower_cs = 1325 upper_cs = 3975 (COV = 50%)
# Standard deviation of variables (CV=50%)
sd_H = 0.3
sd_φ = 0.3
sd_K = 0.3
sd_ct = 0.3
sd_Q = 0.3
sd_cs = 0.001
# Lognormal priors for unknown model parameters
Hpdf = pm.Lognormal('H', mu=np.log(𝜇_H), sd=sd_H)
φpdf = pm.Lognormal('φ', mu=np.log(𝜇_φ), sd=sd_φ)
Kpdf = pm.Lognormal('K', mu=np.log(get_𝜇_K(φpdf, N)), sd=sd_K)
ctpdf = pm.Lognormal('ct', mu=np.log(𝜇_ct), sd=sd_ct)
Qpdf = pm.Lognormal('Q', mu=np.log(𝜇_Q), sd=sd_Q)
cspdf = pm.Lognormal('cs', mu=np.log(𝜇_cs), sd=sd_cs)
# Uniform priors for unknown model parameters
# Hpdf = pm.Uniform('H', lower=35, upper=105)
# φpdf = pm.Lognormal('φ', mu=np.log(𝜇_φ), sd=sd_φ)
#φpdf = pm.Uniform('φ', lower=0.1, upper=0.3)
# Kpdf = pm.Lognormal('K', mu=np.log(get_𝜇_K(φpdf, N)), sd=sd_K)
# ctpdf = pm.Uniform('ct', lower=0.5e-10, upper=1.5e-10)
# Qpdf = pm.Uniform('Q', lower=0.035, upper=0.105)
# cspdf = pm.Uniform('cs', lower=1325, upper=3975)
theta = [Hpdf.random(size=1), φpdf.random(size=1), Kpdf.random(size=1), ctpdf.random(size=1), Qpdf.random(size=1), cspdf.random(size=1)]
# Run Analytical Analysis (Backward)
print("\r\nRunning Analytical Analysis... (Prior, pymc3)")
# p_t = my_model(theta, x) # draw single sample multiple points in time
# p_t = np.mean(solAA[0].T, axis=1) # draw single sample multiple points in time
# Likelihood (sampling distribution) of observations
# z_h = pm.Lognormal('z_h', mu=np.log(p_t), sd=sigma, observed=np.log(data))
# plot 95% CI with seaborn
# with open('pprior.npy', 'wb') as pprior:
# np.save(pprior, p)
# show_seaborn_plot('pprior.npy', "pwell")
# plt.show()
# mu_p = np.mean(p_t)
# sd_p = np.var(p_t) ** 0.5
# p = pm.Lognormal('p', mu=np.log(mu_p), sd=sd_p)
# # Likelihood (predicted distribution) of observations
# y = pm.Normal('y', mu=p, sd=1e4, observed=z_t)
# with PriorModel:
# # Inference
# start = pm.find_MAP() # Find starting value by optimization
# step = pm.NUTS(scaling=start) # Instantiate MCMC sampling algoritm #HamiltonianMC
#
# trace = pm.sample(10000, start=start, step=step, cores=1, chains=chains)
#
# print(az.summary(trace, round_to=2))
# chain_count = trace.get_values('K').shape[0]
# T_pred = pm.sample_posterior_predictive(trace, samples=chain_count, model=PriorModel)
# data_spp = az.from_pymc3(trace=trace)
# joint_plt = az.plot_joint(data_spp, var_names=['K', 'φ'], kind='kde', fill_last=False);
# trace_fig = az.plot_trace(trace, var_names=[ 'H', 'φ', 'K', 'ct', 'Q', 'cs'], figsize=(12, 8));
# az.plot_trace(trace, var_names=['H', 'φ', 'K', 'ct', 'Q'], compact=True);
# fig, axes = az.plot_forest(trace, var_names=['H', 'φ', 'K', 'ct', 'Q'], combined=True) #94% confidence interval with only lines (must normalize the means!)
# axes[0].grid();
# trace_H = az.plot_posterior(data_spp, var_names=['φ'], kind='hist')
# trace_p = az.plot_posterior(data_spp, var_names=['p'], kind='hist')
# pm.traceplot(trace)
# plt.show()
traces = [trace]
for _ in range(2):
with pm.Model() as InferenceModel:
# Priors are posteriors from previous iteration
H = from_posterior('H', trace['H'])
φ = from_posterior('φ', trace['φ'])
K = from_posterior('K', trace['K'])
ct = from_posterior('ct', trace['ct'])
Q = from_posterior('Q', trace['Q'])
cs = from_posterior('cs', trace['cs'])
# Random sample method
# parametersRVS = [H.random(size=Nt), φ.random(size=Nt), K.random(size=Nt), ct.random(size=Nt), Q.random(size=Nt), cs.random(size=Nt)]
print("\r\nRunning Analytical Analysis... (Backward, pymc3)")
# solAA = performAA(parametersRVS, aquifer, N, timestep, endtime)
# p_t = np.mean(solAA[0].T, axis=1) # draw single sample multiple points in time
# Likelihood (sampling distribution) of observations
# z_h = pm.Lognormal('z_h', mu=np.log(p_t), sd=sd_p, observed=np.log(z_t))
# Inference
# start = pm.find_MAP()
# step = pm.NUTS(scaling=start)
# trace = pm.sample(ndraws, start=start, step=step, cores=1, chains=chains)
thetaprior = [H, φ, K, ct, Q, cs]
# convert thetaprior to a tensor vector
theta = tt.as_tensor_variable([H, φ, K, ct, Q, cs])
# use a DensityDist
pm.DensityDist(
'likelihood',
lambda v: logl(v),
observed={'v': theta}
# random=my_model_random
)
trace = pm.sample(ndraws, cores=1, chains=chains)
traces.append(trace)
# plt.figure(figsize=(10, 3))
# plt.subplot(121)
# plt.plot(np.percentile(trace[ph], [2.5, 97.5], axis=0).T, 'k', label='$\hat{x}_{95\%}(t)$')
# plt.plot(p_t, 'r', label='$p(t)$')
# plt.legend()
#
# plt.subplot(122)
# plt.hist(trace[lam], 30, label='$\hat{\lambda}$', alpha=0.5)
# plt.axvline(porosity_true, color='r', label='$\lambda$', alpha=0.5)
# plt.legend();
#
# plt.figure(figsize=(10, 6))
# plt.subplot(211)
# plt.plot(np.percentile(trace[ph][..., 0], [2.5, 97.5], axis=0).T, 'k', label='$\hat{p}_{95\%}(t)$')
# plt.plot(ps, 'r', label='$p(t)$')
# plt.legend(loc=0)
# plt.subplot(234), plt.hist(trace['Kh']), plt.axvline(K), plt.xlim([1e-13, 1e-11]), plt.title('K')
# plt.subplot(235), plt.hist(trace['φh']), plt.axvline(φ), plt.xlim([0, 1.0]), plt.title('φ')
# plt.subplot(236), plt.hist(trace['Hh']), plt.axvline(m), plt.xlim([50, 100]), plt.title('H')
# plt.tight_layout()
#
# plt.show()
###########################
# Post processing #
###########################
print('Posterior distributions after ' + str(len(traces)) + ' iterations.')
cmap = mpl.cm.autumn
for param in ['K', 'φ', 'H', 'ct', 'Q']:
plt.figure(figsize=(8, 2))
for update_i, trace in enumerate(traces):
samples = trace[param]
smin, smax = np.min(samples), np.max(samples)
x = np.linspace(smin, smax, 100)
y = stats.gaussian_kde(samples)(x)
plt.plot(x, y, color=cmap(1 - update_i / len(traces)))
plt.axvline({'K': K_true, 'φ': φ_true, 'H': H_true, 'ct': ct_true, 'Q': Q_true}[param], c='k')
plt.ylabel('Frequency')
plt.title(param)
plt.tight_layout();
plt.show()
# Stop timing code execution
t2 = time.time()
print("CPU time [s] : ", t2 - t0)
# Stop timing code execution
print("\r\nDone. Post-processing...")
#################### Postprocessing #########################
print('Post processing. Plot 95% CI with seaborn')
cmap = mpl.cm.autumn
plt.figure(figsize=(8, 2))
for node in range(len(pnodelist)):
with open('pnode' + str(node+2) + '.npy', 'wb') as f:
np.save(f, pnodelist[node])
show_seaborn_plot('pnode' + str(node+2) + '.npy', str(node+2))
# plt.legend(str(node+2))
plt.xlabel("t [min]", size=14)
plt.ylabel("p(t) [MPa]", size=14)
plt.tight_layout();
plt.figure(figsize=(8, 2))
for node in range(len(Tnodelist)):
with open('Tnode' + str(node+2) + '.npy', 'wb') as f:
np.save(f, Tnodelist[node])
show_seaborn_plot('Tnode' + str(node+2) + '.npy', str(node+2))
plt.legend(str(node+2))
plt.xlabel("t [min]", size=14)
plt.ylabel("T(t) [K]", size=14)
plt.tight_layout();
# plt.figure(figsize=(8, 2))
# with open('power.npy', 'wb') as f:
# np.save(f, doublet.Phe/1e6)
# show_seaborn_plot('power.npy', 'power output')
# plt.xlabel("t [min]", size=14)
# plt.ylabel("P(t) [MW]", size=14)
plt.show()
# plot 95% CI with seaborn
# with open('pprior.npy', 'wb') as pprior:
# np.save(pprior, sol[0])
#
# show_seaborn_plot('pprior.npy', "p9")
# plt.show()
# with open('pmatrix.npy', 'rb') as f:
# a = np.load(f)
# print("saved solution matrix", a)
# plot 95% CI with seaborn
# with open('pnode9.npy', 'wb') as f9:
# np.save(f9, doublet.pnode9)
#
# with open('pnode8.npy', 'wb') as f8:
# np.save(f8, doublet.pnode8)
# plot_solution(sol, outfile)
# plt.show()
| 2 | 2 |
resources/lib/parsers/AVIParser.py | mkwilliams14/service.tvtunes | 0 | 12773586 | <reponame>mkwilliams14/service.tvtunes
# Copied (and slightly altered) from script.pseudotv.live
# with permission of Lunatixz:
# http://forum.xbmc.org/showthread.php?tid=177296
# On 21st January 2014
# https://github.com/Lunatixz/script.pseudotv.live/tree/master/resources/lib/parsers
import os, struct
from resources.lib.parsers.FileAccess import FileAccess
class AVIChunk:
def __init__(self):
self.empty()
def empty(self):
self.size = 0
self.fourcc = ''
self.datatype = 1
self.chunk = ''
def read(self, thefile):
data = thefile.read(4)
try:
self.size = struct.unpack('<i', data)[0]
except:
self.size = 0
# Putting an upper limit on the chunk size, in case the file is corrupt
if self.size > 0 and self.size < 10000:
self.chunk = thefile.read(self.size)
else:
self.chunk = ''
self.size = 0
class AVIList:
def __init__(self):
self.empty()
def empty(self):
self.size = 0
self.fourcc = ''
self.datatype = 2
def read(self, thefile):
data = thefile.read(4)
try:
self.size = struct.unpack('<i', data)[0]
except:
self.size = 0
self.fourcc = thefile.read(4)
class AVIHeader:
def __init__(self):
self.empty()
def empty(self):
self.dwMicroSecPerFrame = 0
self.dwMaxBytesPerSec = 0
self.dwPaddingGranularity = 0
self.dwFlags = 0
self.dwTotalFrames = 0
self.dwInitialFrames = 0
self.dwStreams = 0
self.dwSuggestedBufferSize = 0
self.dwWidth = 0
self.dwHeight = 0
class AVIStreamHeader:
def __init__(self):
self.empty()
def empty(self):
self.fccType = ''
self.fccHandler = ''
self.dwFlags = 0
self.wPriority = 0
self.wLanguage = 0
self.dwInitialFrame = 0
self.dwScale = 0
self.dwRate = 0
self.dwStart = 0
self.dwLength = 0
self.dwSuggestedBuffer = 0
self.dwQuality = 0
self.dwSampleSize = 0
self.rcFrame = ''
class AVIParser:
def __init__(self):
self.Header = AVIHeader()
self.StreamHeader = AVIStreamHeader()
def log(self, msg):
FileAccess.log("AVIParser: %s" % msg)
def determineLength(self, filename):
self.log("determineLength " + filename)
try:
self.File = FileAccess.open(filename, "rb", None)
except:
self.log("Unable to open the file")
return 0
dur = self.readHeader()
self.File.close()
self.log('Duration: ' + str(dur))
return dur
def readHeader(self):
# AVI Chunk
data = self.getChunkOrList()
if data.datatype != 2:
self.log("Not an avi")
return 0
if data.fourcc[0:4] != "AVI ":
self.log("Wrong FourCC")
return 0
# Header List
data = self.getChunkOrList()
if data.fourcc != "hdrl":
self.log("Header not found")
return 0
# Header chunk
data = self.getChunkOrList()
if data.fourcc != 'avih':
self.log('Header chunk not found')
return 0
self.parseHeader(data)
# Stream list
data = self.getChunkOrList()
if self.Header.dwStreams > 10:
self.Header.dwStreams = 10
for i in range(self.Header.dwStreams):
if data.datatype != 2:
self.log("Unable to find streams")
return 0
listsize = data.size
# Stream chunk number 1, the stream header
data = self.getChunkOrList()
if data.datatype != 1:
self.log("Broken stream header")
return 0
self.StreamHeader.empty()
self.parseStreamHeader(data)
# If this is the video header, determine the duration
if self.StreamHeader.fccType == 'vids':
return self.getStreamDuration()
# If this isn't the video header, skip through the rest of these
# stream chunks
try:
if listsize - data.size - 12 > 0:
self.File.seek(listsize - data.size - 12, 1)
data = self.getChunkOrList()
except:
self.log("Unable to seek")
self.log("Video stream not found")
return 0
def getStreamDuration(self):
try:
return int(self.StreamHeader.dwLength / (float(self.StreamHeader.dwRate) / float(self.StreamHeader.dwScale)))
except:
return 0
def parseHeader(self, data):
try:
header = struct.unpack('<iiiiiiiiiiiiii', data.chunk)
self.Header.dwMicroSecPerFrame = header[0]
self.Header.dwMaxBytesPerSec = header[1]
self.Header.dwPaddingGranularity = header[2]
self.Header.dwFlags = header[3]
self.Header.dwTotalFrames = header[4]
self.Header.dwInitialFrames = header[5]
self.Header.dwStreams = header[6]
self.Header.dwSuggestedBufferSize = header[7]
self.Header.dwWidth = header[8]
self.Header.dwHeight = header[9]
except:
self.Header.empty()
self.log('Unable to parse the header')
def parseStreamHeader(self, data):
try:
self.StreamHeader.fccType = data.chunk[0:4]
self.StreamHeader.fccHandler = data.chunk[4:8]
header = struct.unpack('<ihhiiiiiiiid', data.chunk[8:])
self.StreamHeader.dwFlags = header[0]
self.StreamHeader.wPriority = header[1]
self.StreamHeader.wLanguage = header[2]
self.StreamHeader.dwInitialFrame = header[3]
self.StreamHeader.dwScale = header[4]
self.StreamHeader.dwRate = header[5]
self.StreamHeader.dwStart = header[6]
self.StreamHeader.dwLength = header[7]
self.StreamHeader.dwSuggestedBuffer = header[8]
self.StreamHeader.dwQuality = header[9]
self.StreamHeader.dwSampleSize = header[10]
self.StreamHeader.rcFrame = ''
except:
self.StreamHeader.empty()
self.log("Error reading stream header")
def getChunkOrList(self):
data = self.File.read(4)
if data == "RIFF" or data == "LIST":
dataclass = AVIList()
elif len(data) == 0:
dataclass = AVIChunk()
dataclass.datatype = 3
else:
dataclass = AVIChunk()
dataclass.fourcc = data
# Fill in the chunk or list info
dataclass.read(self.File)
return dataclass
| 2.328125 | 2 |
utils/csvwriter.py | mfg92/Hantek1008Driver | 12 | 12773587 | <gh_stars>10-100
from typing import List, Any, Sequence, Callable, IO
import threading
import queue
import csv
# marking a child classes method with overrides makes sure the method overrides a parent class method
# this check is only needed during development so its no problem if this package is not installed
# to avoid errors we need to define a dummy decorator
try:
from overrides import overrides
except ImportError:
def overrides(method: Callable) -> Callable:
return method
class CsvWriter:
def __init__(self, file: IO[str], delimiter: str) -> None:
self.__csv_file = file
self.__csv_writer = csv.writer(file, delimiter=delimiter)
def write_comment(self, comment: str) -> None:
self.__csv_file.write(f"# {comment}\n")
def write_row(self, row: Sequence[Any]) -> None:
self.__csv_writer.writerow(row)
def write_rows(self, rows: Sequence[Sequence[Any]]) -> None:
self.__csv_writer.writerows(rows)
def close(self) -> None:
self.__csv_file.close()
class ThreadedCsvWriter(CsvWriter):
"""
Writes content to a csv file using an extra thread
"""
def __init__(self, file: IO[str], delimiter: str) -> None:
super().__init__(file, delimiter)
self.__closed: bool = False
self.__work_queue: queue.Queue = queue.Queue() # a thread-safe FIFO queue
self.__work_thread = threading.Thread(target=self.__do_work)
self.__work_thread.start()
@overrides
def write_comment(self, comment: str) -> None:
self.__enqueue_work(super().write_comment, comment)
@overrides
def write_row(self, row: Sequence[Any]) -> None:
self.__enqueue_work(super().write_row, row)
@overrides
def write_rows(self, rows: Sequence[Sequence[Any]]) -> None:
self.__enqueue_work(super().write_rows, rows)
def __enqueue_work(self, func: Callable, *params: Any) -> None:
self.__work_queue.put((func, params))
def __do_work(self) -> None:
while not self.__closed:
func, params = self.__work_queue.get()
func(*params)
def close(self) -> None:
def stop() -> None:
self.__closed = True
# super without arguments does not work here inside a locally defined function
super(ThreadedCsvWriter, self).close()
self.__enqueue_work(stop)
self.__work_thread.join()
| 3.34375 | 3 |
tomography/src/Display.py | SamuelDoud/tomography | 0 | 12773588 | <gh_stars>0
"""A user interface for interacting with the network."""
from tkinter import Tk, Frame, Button, Label, Entry, Menu, messagebox, Toplevel
from tomography import Tomography
from Node import Node, Server, EndUser
from Connection import Connection
from Link import Link
class Display(Frame):
"""Provide the user with a visualization of the network and a way to interact with it."""
def __init__(self, master):
Frame.__init__(self, master)
self.master = master
self.master.title("Tomography")
self.master.protocol("WM_DELETE_WINDOW", self.shutdown)
self.tomography = Tomography()
self.menu_creation()
def menu_creation(self):
"""
Helper method to define the menu bar
"""
self.menu_bar = Menu(self.master)
self.file_menu = Menu(self.menu_bar, tearoff=0)
self.edit_menu = Menu(self.menu_bar, tearoff=0)
self.help_menu = Menu(self.menu_bar, tearoff=0)
self.menu_bar.add_cascade(label="File", menu=self.file_menu)
self.menu_bar.add_cascade(label="Edit", menu=self.edit_menu)
self.menu_bar.add_cascade(label="Help", menu=self.help_menu)
self.edit_menu_create()
self.file_menu_create()
self.help_menu_create()
self.master.config(menu=self.menu_bar)
def edit_menu_create(self):
self.edit_menu.add_command(label="Node", command=self.node_popup)
self.edit_menu.add_command(label="Link", command=self.link_popup)
self.edit_menu.add_command(label="Connection", command=self.connection_popup)
self.edit_menu.add_separator
#self.edit_menu.add_command(label="Remove First", command=self.remove_first)
#self.edit_menu.add_command(label="Remove Last", command=self.remove_last)
self.edit_menu.add_command(label="Remove All", command=self.master_blaster)
def master_blaster(self):
"""Deletes all objects on the graph."""
#pass an empty list
self.tomography.connections = []
self.tomography.nodes = []
def help_menu_create(self):
self.help_menu.add_command(label="View Help", command=self.help_message_box)
def help_message_box(self):
#pause the animation
self.help_message_str = "<NAME> needs to write this up"
#launch the message box
messagebox.showinfo("Help", self.help_message_str)
def file_menu_create(self):
#self.file_menu.add_command(label="Save", command=self.save)
#self.file_menu.add_command(label="Open", command=self.open)
self.file_menu.add_command(label="Exit", command=self.master.quit)
#self.file_menu.add_separator()
#self.file_menu.add_command(label="Save as GIF", command=self.save_gif_handler)
#self.file_menu.add_command(label="Save as Video", command=self.save_video_handler)
def node_popup(self):
self.width = 5
self.top = Toplevel(self.master)
self.master.wait_window(self.top)
self.type_label = Label(self.top, text="Select a node type")
self.type_entry = Entry(self.top, width=self.width, bd=self.bd)
self.create_node_submit = Button(self.top, text="Create Node", command=self.node_cleanup)
self.top.bind("<Return>", self.node_cleanup)
def node_cleanup(self):
if self.type_entry.get():
type_of_node = self.type_entry.get()
if type_of_node.lower is 'server':
new_node = Server()
if type_of_node.lower is 'enduser':
new_node = EndUser()
if new_node:
self.add_node(new_node)
self.top.destroy()
def link_popup(self):
self.width = 5
self.default_lag = 1
self.default_buffer = 1
self.top = Toplevel(self.master)
self.master.wait_window(self.top)
self.connection_label = Label(self.top, text="Select a Connection to add a link to")
self.connection_entry = Entry(self.top, width=self.width, bd=self.bd)
self.lag_label = Label(self.top, text="Select a lag time")
self.lag_entry = Entry(self.top, width=self.width, bd=self.bd)
self.buffer_label = Label(self.top, text="Select a buffer size")
self.buffer_entry = Entry(self.top, width=self.width, bd=self.bd)
self.create_node_submit = Button(self.top, text="Create link", command=self.link_cleanup)
self.top.bind("<Return>", self.node_cleanup)
def link_cleanup(self):
if self.connection_entry.get():
#get the connection. dummy code for now
c = Connection(1, 2)
link = Link(c.start_node, c.end_node,
self.lag_entry.get()
if self.lag_entry.get()
else self.default_lag,
self.buffer_entry.get()
if self.buffer_entry.get()
else self.default_buffer)
self.tomography.connections[self.tomography.connections.index(c)].add_link(link=link)
self.top.destroy()
def connection_popup(self):
self.width = 5
self.top = Toplevel(self.master)
self.master.wait_window(self.top)
if len(self.tomography.nodes) < 2:
#message that a connection cannot be made
self.top.destroy()
self.start_label = Label(self.top, text="Select a start node")
self.start_entry = Entry(self.top, width=self.width, bd=self.bd)
self.end_label = Label(self.top, text="Select a end node")
self.end_entry = Entry(self.top, width=self.width, bd=self.bd)
self.create_node_submit = Button(self.top, text="Create Node", command=self.node_cleanup)
self.top.bind("<Return>", self.node_cleanup)
def connection_cleanup(self):
if self.start_entry.get() and self.end_entry.get():
pass
self.top.destroy()
def tick(self, time=1):
"""Increment n units of time."""
if time < 1:
raise Exception("Must provide a positive real value for time" +
"(Although it really should be an integer you oaf.)")
for _counter in range(time):
self.tomography.tick()
self.draw()
def shutdown(self):
"""Closes the application 'gracefully'."""
self.master.quit()
self.master.destroy()
def add_node(self, node):
"""Add a node to the Tomography.
The Tomography will assign an address if needed."""
self.tomography.add_node(node)
def remove_node(self, node):
"""Safely remove a node from the Tomography."""
self.tomography.remove_node(node)
def connect_nodes(self, start_node, end_node):
"""Connect two Nodes. A start node should be upstream of the end_node.ß"""
self.tomography.add_connection(start_node, end_node)
def draw(self):
"""Draw all nodes and their connections (along with any notation about data flows)."""
pass
ROOT = Tk()
WINDOW = Display(master=ROOT)
WINDOW.mainloop()
| 3.125 | 3 |
test/py/splines/test10.py | Ahdhn/lar-cc | 1 | 12773589 | <gh_stars>1-10
""" B-spline curve: effect of double or triple control points """
from larlib import *
controls1 = [[0,0],[2.5,5],[6,1],[9,3]]
controls2 = [[0,0],[2.5,5],[2.5,5],[6,1],[9,3]]
controls3 = [[0,0],[2.5,5],[2.5,5],[2.5,5],[6,1],[9,3]]
knots = [0,0,0,0,1,1,1,1]
bspline1 = larMap( BSPLINE(3)(knots)(controls1) )(larDom(knots))
knots = [0,0,0,0,1,2,2,2,2]
bspline2 = larMap( BSPLINE(3)(knots)(controls2) )(larDom(knots))
knots = [0,0,0,0,1,2,3,3,3,3]
bspline3 = larMap( BSPLINE(3)(knots)(controls3) )(larDom(knots))
VIEW(STRUCT( CAT(AA(MKPOLS)([bspline1,bspline2,bspline3])) +
[POLYLINE(controls1)]) )
| 1.992188 | 2 |
coba/tests/test_environments_core.py | anrath/coba | 0 | 12773590 | <filename>coba/tests/test_environments_core.py<gh_stars>0
import unittest
from typing import List
from coba.pipes import MemoryIO
from coba.config import CobaConfig, NullLogger
from coba.environments import (
SimulatedInteraction, MemorySimulation, ClassificationSimulation,
LambdaSimulation, CsvSimulation, ArffSimulation, LibsvmSimulation,
DebugSimulation
)
CobaConfig.logger = NullLogger()
class SimulatedInteraction_Tests(unittest.TestCase):
def test_context_none(self):
interaction = SimulatedInteraction(None, (1,2,3), rewards=(4,5,6))
self.assertEqual(None, interaction.context)
def test_context_str(self):
interaction = SimulatedInteraction("A", (1,2,3), rewards=(4,5,6))
self.assertEqual("A", interaction.context)
def test_context_dense(self):
interaction = SimulatedInteraction((1,2,3), (1,2,3), rewards=(4,5,6))
self.assertEqual((1,2,3), interaction.context)
def test_context_dense_2(self):
interaction = SimulatedInteraction((1,2,3,(0,0,1)), (1,2,3), rewards=(4,5,6))
self.assertEqual((1,2,3,(0,0,1)), interaction.context)
def test_context_sparse_dict(self):
interaction = SimulatedInteraction({1:0}, (1,2,3), rewards=(4,5,6))
self.assertEqual({1:0}, interaction.context)
def test_actions_correct_1(self) -> None:
self.assertSequenceEqual([1,2], SimulatedInteraction(None, [1,2], rewards=[1,2]).actions)
def test_actions_correct_2(self) -> None:
self.assertSequenceEqual(["A","B"], SimulatedInteraction(None, ["A","B"], rewards=[1,2]).actions)
def test_actions_correct_3(self) -> None:
self.assertSequenceEqual([(1,2), (3,4)], SimulatedInteraction(None, [(1,2), (3,4)], rewards=[1,2]).actions)
def test_custom_rewards(self):
interaction = SimulatedInteraction((1,2), (1,2,3), rewards=[4,5,6])
self.assertEqual((1,2), interaction.context)
self.assertCountEqual((1,2,3), interaction.actions)
self.assertEqual({"rewards":[4,5,6] }, interaction.kwargs)
def test_reveals_results(self):
interaction = SimulatedInteraction((1,2), (1,2,3), reveals=[(1,2),(3,4),(5,6)],rewards=[4,5,6])
self.assertEqual((1,2), interaction.context)
self.assertCountEqual((1,2,3), interaction.actions)
self.assertEqual({"reveals":[(1,2),(3,4),(5,6)], "rewards":[4,5,6]}, interaction.kwargs)
class ClassificationSimulation_Tests(unittest.TestCase):
def assert_simulation_for_data(self, simulation, features, answers) -> None:
interactions = list(simulation.read())
self.assertEqual(len(interactions), len(features))
#first we make sure that all the labels are included
#in the first interactions actions without any concern for order
self.assertCountEqual(interactions[0].actions, set(answers))
#then we set our expected actions to the first interaction
#to make sure that every interaction has the exact same actions
#with the exact same order
expected_actions = interactions[0].actions
for f,l,i in zip(features, answers, interactions):
expected_context = f
expected_rewards = [ int(a == l) for a in i.actions]
actual_context = i.context
actual_actions = i.actions
actual_rewards = i.kwargs["rewards"]
self.assertEqual(actual_context, expected_context)
self.assertSequenceEqual(actual_actions, expected_actions)
self.assertSequenceEqual(actual_rewards, expected_rewards)
def test_constructor_with_good_features_and_labels1(self) -> None:
features = [1,2,3,4]
labels = [1,1,0,0]
simulation = ClassificationSimulation(zip(features, labels))
self.assert_simulation_for_data(simulation, features, labels)
def test_constructor_with_good_features_and_labels2(self) -> None:
features = ["a","b"]
labels = ["good","bad"]
simulation = ClassificationSimulation(zip(features, labels))
self.assert_simulation_for_data(simulation, features, labels)
def test_constructor_with_good_features_and_labels3(self) -> None:
features = [(1,2),(3,4)]
labels = ["good","bad"]
simulation = ClassificationSimulation(zip(features, labels))
self.assert_simulation_for_data(simulation, features, labels)
def test_sparse(self) -> None:
feature_rows = [
{0:10, 1:11},
{1:20, 2:30},
{2:30, 3:40},
{2:30, 3:40}
]
label_column = [1,1,0,2]
simulation = ClassificationSimulation(zip(feature_rows, label_column))
interactions = list(simulation.read())
self.assertEqual(feature_rows[0], interactions[0].context)
self.assertEqual(feature_rows[1], interactions[1].context)
self.assertEqual(feature_rows[2], interactions[2].context)
self.assertEqual(feature_rows[3], interactions[3].context)
self.assertEqual([0,2,1], interactions[0].actions)
self.assertEqual([0,2,1], interactions[1].actions)
self.assertEqual([0,2,1], interactions[2].actions)
self.assertEqual([0,2,1], interactions[3].actions)
self.assertEqual([0,0,1], interactions[0].kwargs["rewards"])
self.assertEqual([0,0,1], interactions[1].kwargs["rewards"])
self.assertEqual([1,0,0], interactions[2].kwargs["rewards"])
self.assertEqual([0,1,0], interactions[3].kwargs["rewards"])
class MemorySimulation_Tests(unittest.TestCase):
def test_interactions(self):
simulation = MemorySimulation([SimulatedInteraction(1, [1,2,3], rewards=[0,1,2]), SimulatedInteraction(2, [4,5,6], rewards=[2,3,4])])
interactions = list(simulation.read())
self.assertEqual(interactions[0], interactions[0])
self.assertEqual(interactions[1], interactions[1])
class LambdaSimulation_Tests(unittest.TestCase):
def test_interactions(self):
def C(i:int) -> int:
return [1,2][i]
def A(i:int,c:int) -> List[int]:
return [[1,2,3],[4,5,6]][i]
def R(i:int,c:int,a:int) -> int:
return a-c
simulation = LambdaSimulation(2,C,A,R)
interactions = list(simulation.read())
self.assertEqual(1 , interactions[0].context)
self.assertEqual([1,2,3], interactions[0].actions)
self.assertEqual([0,1,2], interactions[0].kwargs["rewards"])
self.assertEqual(2 , interactions[1].context)
self.assertEqual([4,5,6], interactions[1].actions)
self.assertEqual([2,3,4], interactions[1].kwargs["rewards"])
def test_interactions_len(self):
def C(i:int) -> int:
return [1,2][i]
def A(i:int,c:int) -> List[int]:
return [[1,2,3],[4,5,6]][i]
def R(i:int,c:int,a:int) -> int:
return a-c
simulation = LambdaSimulation(2,C,A,R)
interactions = list(simulation.read())
self.assertEqual(len(interactions), 2)
class ValidationSimulation_Tests(unittest.TestCase):
def test_simple(self):
self.assertEqual(500, len(list(DebugSimulation().read())))
class CsvSimulation_Tests(unittest.TestCase):
def test_simple(self):
source = MemoryIO(['a,b,c','1,2,3','4,5,6','7,8,6'])
simulation = CsvSimulation(source,'c')
interactions = list(simulation.read())
self.assertEqual(3, len(interactions))
self.assertEqual(('1','2'), interactions[0].context)
self.assertEqual(('4','5'), interactions[1].context)
self.assertEqual(('7','8'), interactions[2].context)
self.assertEqual(['3','6'], interactions[0].actions)
self.assertEqual(['3','6'], interactions[1].actions)
self.assertEqual([1,0], interactions[0].kwargs["rewards"])
self.assertEqual([0,1], interactions[1].kwargs["rewards"])
class ArffSimulation_Tests(unittest.TestCase):
def test_simple(self):
lines = [
"@relation news20",
"@attribute a numeric",
"@attribute B numeric",
"@attribute c {0, class_B, class_C, class_D}",
"@data",
"1,2,class_B",
"2,3,0",
]
source = MemoryIO(lines)
simulation = ArffSimulation(source,'c')
interactions = list(simulation.read())
self.assertEqual(2, len(interactions))
self.assertEqual((1,2), interactions[0].context)
self.assertEqual((2,3), interactions[1].context)
self.assertEqual(['0','class_B'], interactions[0].actions)
self.assertEqual(['0','class_B'], interactions[1].actions)
self.assertEqual([0,1], interactions[0].kwargs["rewards"])
self.assertEqual([1,0], interactions[1].kwargs["rewards"])
def test_one_hot(self):
lines = [
"@relation news20",
"@attribute a numeric",
"@attribute B {0, 1, 2, 3}",
"@attribute c {0, class_B, class_C, class_D}",
"@data",
"1,2,class_B",
"2,3,0",
"3,1,0"
]
source = MemoryIO(lines)
simulation = ArffSimulation(source,'c',)
interactions = list(simulation.read())
self.assertEqual(3, len(interactions))
self.assertEqual((1,(0,0,1,0)), interactions[0].context)
self.assertEqual((2,(0,0,0,1)), interactions[1].context)
self.assertEqual((3,(0,1,0,0)), interactions[2].context)
self.assertEqual(['0','class_B'], interactions[0].actions)
self.assertEqual(['0','class_B'], interactions[1].actions)
self.assertEqual(['0','class_B'], interactions[2].actions)
self.assertEqual([0,1], interactions[0].kwargs["rewards"])
self.assertEqual([1,0], interactions[1].kwargs["rewards"])
self.assertEqual([1,0], interactions[2].kwargs["rewards"])
class LibsvmSimulation_Tests(unittest.TestCase):
def test_simple(self):
lines = [
"0 4:2 5:3",
"1 1:1 2:1",
"1 3:4"
]
source = MemoryIO(lines)
simulation = LibsvmSimulation(source)
interactions = list(simulation.read())
self.assertEqual(3, len(interactions))
self.assertEqual({4:2,5:3}, interactions[0].context)
self.assertEqual({1:1,2:1}, interactions[1].context)
self.assertEqual({3:4 }, interactions[2].context)
self.assertEqual(['0','1'], interactions[0].actions)
self.assertEqual(['0','1'], interactions[1].actions)
self.assertEqual([1,0], interactions[0].kwargs["rewards"])
self.assertEqual([0,1], interactions[1].kwargs["rewards"])
if __name__ == '__main__':
unittest.main() | 2.453125 | 2 |
chap_02/exe_036_dog_years.py | aleattene/python-workbook | 0 | 12773591 | """
It is commonly said that one human year is equivalent to 7 dog years.
However this simple conversion fails
to recognize that dogs reach adulthood in approximately two years.
As a result, some people believe that it is better
to count each of the first two human years as 10.5 dog years,
and then count each additional human year as 4 dog years.
Write a program that implements the conversion from human years to dog years
described in the previous paragraph.
Ensure that your program works correctly
for conversions of less than two human years and
for conversions of two or more human years.
Your program should display an appropriate error message if the user enters a negative number.
Remember that:
- the FIRST TWO HUMAN YEARS are equivalent to 10.5 CANINE YEARS
- the FOLLOWING HUMAN YEARS are equivalent to FOUR CANINE YEARS.
"""
# START Definition of FUNCTIONS
def valutaIntPositive(numero):
if numero.isdigit():
if numero != "0":
return True
return False
def yearsHumanDog(etaHuman):
if etaHuman == 1:
etaDog = 10.5
return etaDog
elif etaHuman == 2:
etaDog = 21
return etaDog
else:
etaDog = 21 + ((etaHuman-2) * 4)
return etaDog
# END Definition of FUNCTIONS
# Acquisition and Control of the DATA entered by the USER
etaHuman = input("Enter the HUMAN Years: ")
etaHumanIntPositive = valutaIntPositive(etaHuman)
while not(etaHumanIntPositive):
print("Incorrect entry. Try again.")
etaHuman = input("Enter the HUMAN Years: ")
etaHumanIntPositive = valutaIntPositive(etaHuman)
# Conversion STR -> INT
etaHuman = int(etaHuman)
# DOG YEARS computing
etaDog = yearsHumanDog(etaHuman)
# Displaying the RESULTS
if etaHuman == 1:
print(str(etaHuman) + " HUMAN year is equal to " + str(etaDog) + " DOG years.")
else:
print(str(etaHuman) + " HUMAN years is equal to " + str(etaDog) + " DOG years.")
| 4.15625 | 4 |
pytropos/ast_transformer/__init__.py | helq/pytropos | 4 | 12773592 | from .miscelaneous import typed_ast3_to_ast
from .transformer import PytroposTransformer, AstTransformerError
__all__ = ["typed_ast3_to_ast", "PytroposTransformer", 'AstTransformerError']
| 1.226563 | 1 |
setup.py | fuzailpalnak/py-detect-track | 2 | 12773593 | <filename>setup.py<gh_stars>1-10
import os
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
current_dir = os.path.abspath(os.path.dirname(__file__))
try:
with open(os.path.join(current_dir, "requirements.txt"), encoding="utf-8") as f:
install_requires = f.read().split("\n")
except FileNotFoundError:
install_requires = []
setup(
name="py-detect-track",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/fuzailpalnak/py-detect-track",
description="Object Tracking",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
python_requires="~=3.6",
install_requires=install_requires,
keywords=[
"Deep Learning",
"CNN",
"Object Tracking",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
],
)
| 1.625 | 2 |
Attempts/3piClient-Alexander Baran-Harper.py | SethuEapen/PrivacyNinja-Server | 0 | 12773594 | <reponame>SethuEapen/PrivacyNinja-Server<gh_stars>0
#<NAME>: https://www.youtube.com/watch?v=xfQlPWFlSgQ
import socket
host = '192.168.1.173'
port = 5560
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
while True:
command = input("Enter your command: ")
if command == 'EXIT':
s.send(str.encode(command))
break
elif command == 'KILL':
s.send(str.encode(command))
break
s.send(str.encode(command))
reply = s.recv(1024)
print(reply.decode('utf-8'))
s.close()
| 2.578125 | 3 |
linux/youtube_dl_script_linux.py | Frxhb/youtubedl_script | 0 | 12773595 | <filename>linux/youtube_dl_script_linux.py<gh_stars>0
from linux.functions_youtube_linux import check_installation
from linux.functions_youtube_linux import type_abfrage
from linux.functions_youtube_linux import move_audios
from linux.functions_youtube_linux import move_videos
check_installation.all_func_check()
def main_func():
move_audios.mv_aud_func()
move_videos.mv_vid_func()
type_abfrage.Type_Abfrage()
global ask_run
ask_run = input ("Would you like to continue? Y/n:\n>>> ")
if ask_run in ['yes', 'Yes', 'Y', 'y']:
main_func()
move_audios.mv_aud_func()
move_videos.mv_vid_func()
else:
print("Okay, close program now...")
move_audios.mv_aud_func()
move_videos.mv_vid_func()
exit()
main_func()
#call main_func | 2.125 | 2 |
stock/urls.py | Praneeth-rdy/Innovation | 0 | 12773596 | <filename>stock/urls.py
# Django Imports
from django.urls import path, include
# Standard Package Imports
# Project Imports
from . import views
# Third Party Imports
app_name = 'stock'
urlpatterns = [
# path('', views.home, name='home'),
path('', views.risk_return, name='risk-return'),
path('portfolio/contact-form/', views.contact_form, name='contact_form'),
# path('get_quote/', views.get_quote, name='get_quote'),
# path('<int:year>/', views.diary),
# path('<int:year>/<str:name>/', views.diary),
] | 1.859375 | 2 |
homework_1/scikitlearn_pipeline/pipeline.py | aigulkhkmv/deploy_course_hse | 0 | 12773597 | from sklearn.ensemble import AdaBoostClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Normalizer
from homework_1.scikitlearn_pipeline.preprocessors import ConnectFeatures
pipeline = Pipeline(
steps=[
("connect", ConnectFeatures(all=True)),
("norm", Normalizer()),
("ada_boost_classifier", AdaBoostClassifier()),
]
)
| 1.976563 | 2 |
thenewboston_node/business_logic/tests/test_blockchain/test_add_block.py | nishp77/thenewboston-node | 30 | 12773598 | <reponame>nishp77/thenewboston-node
import pytest
from thenewboston_node.business_logic.blockchain.base import BlockchainBase
from thenewboston_node.business_logic.exceptions import ValidationError
from thenewboston_node.business_logic.models import Block, NodeDeclarationSignedChangeRequest
from thenewboston_node.core.utils.cryptography import KeyPair, derive_public_key
@pytest.mark.parametrize('blockchain_argument_name', ('memory_blockchain', 'file_blockchain'))
def test_can_add_block(
file_blockchain: BlockchainBase,
memory_blockchain: BlockchainBase,
treasury_account_key_pair: KeyPair,
user_account_key_pair: KeyPair,
preferred_node,
blockchain_argument_name,
primary_validator_key_pair,
):
blockchain: BlockchainBase = locals()[blockchain_argument_name]
treasury_account_number = treasury_account_key_pair.public
treasury_initial_balance = blockchain.get_account_current_balance(treasury_account_number)
assert treasury_initial_balance is not None
user_account_number = user_account_key_pair.public
primary_validator = blockchain.get_primary_validator()
assert primary_validator
pv_account_number = primary_validator.identifier
assert pv_account_number
preferred_node_account_number = preferred_node.identifier
assert primary_validator.fee_amount > 0
assert preferred_node.fee_amount > 0
assert primary_validator.fee_amount != preferred_node.fee_amount
total_fees = primary_validator.fee_amount + preferred_node.fee_amount
pv_signing_key = primary_validator_key_pair.private
assert derive_public_key(pv_signing_key) == pv_account_number
block0 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=user_account_number,
amount=30,
request_signing_key=treasury_account_key_pair.private,
pv_signing_key=pv_signing_key,
preferred_node=preferred_node,
)
blockchain.add_block(block0)
assert blockchain.get_account_current_balance(user_account_number) == 30
assert blockchain.get_account_current_balance(
treasury_account_number
) == treasury_initial_balance - 30 - total_fees
assert blockchain.get_account_current_balance(preferred_node_account_number) == preferred_node.fee_amount
assert blockchain.get_account_current_balance(pv_account_number) == primary_validator.fee_amount
with pytest.raises(ValidationError, match='Block number must be equal to next block number.*'):
blockchain.add_block(block0)
block1 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=user_account_number,
amount=10,
request_signing_key=treasury_account_key_pair.private,
pv_signing_key=pv_signing_key,
preferred_node=preferred_node,
)
blockchain.add_block(block1)
assert blockchain.get_account_current_balance(user_account_number) == 40
assert blockchain.get_account_current_balance(treasury_account_number
) == (treasury_initial_balance - 30 - 10 - 2 * total_fees)
assert blockchain.get_account_current_balance(preferred_node_account_number) == preferred_node.fee_amount * 2
assert blockchain.get_account_current_balance(pv_account_number) == primary_validator.fee_amount * 2
block2 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=treasury_account_number,
amount=5,
request_signing_key=user_account_key_pair.private,
pv_signing_key=pv_signing_key,
preferred_node=preferred_node,
)
blockchain.add_block(block2)
assert blockchain.get_account_current_balance(user_account_number) == 40 - 5 - total_fees
assert blockchain.get_account_current_balance(treasury_account_number
) == (treasury_initial_balance - 30 - 10 + 5 - 2 * total_fees)
assert blockchain.get_account_current_balance(preferred_node_account_number) == preferred_node.fee_amount * 3
assert blockchain.get_account_current_balance(pv_account_number) == primary_validator.fee_amount * 3
@pytest.mark.parametrize('blockchain_argument_name', ('memory_blockchain', 'file_blockchain'))
def test_can_add_coin_transfer_block(
memory_blockchain: BlockchainBase,
file_blockchain: BlockchainBase,
treasury_account_key_pair: KeyPair,
user_account_key_pair: KeyPair,
primary_validator_key_pair: KeyPair,
preferred_node_key_pair: KeyPair,
preferred_node,
blockchain_argument_name,
):
blockchain: BlockchainBase = locals()[blockchain_argument_name]
treasury_account = treasury_account_key_pair.public
treasury_initial_balance = blockchain.get_account_current_balance(treasury_account)
assert treasury_initial_balance is not None
user_account = user_account_key_pair.public
pv_account = primary_validator_key_pair.public
node_account = preferred_node_key_pair.public
total_fees = 1 + 4
block0 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=user_account,
amount=30,
request_signing_key=treasury_account_key_pair.private,
pv_signing_key=primary_validator_key_pair.private,
preferred_node=preferred_node,
)
blockchain.add_block(block0)
assert blockchain.get_account_current_balance(user_account) == 30
assert blockchain.get_account_current_balance(node_account) == 1
assert blockchain.get_account_current_balance(pv_account) == 4
assert blockchain.get_account_current_balance(treasury_account) == treasury_initial_balance - 30 - total_fees
with pytest.raises(ValidationError, match='Block number must be equal to next block number.*'):
blockchain.add_block(block0)
block1 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=user_account,
amount=10,
request_signing_key=treasury_account_key_pair.private,
pv_signing_key=primary_validator_key_pair.private,
preferred_node=preferred_node,
)
blockchain.add_block(block1)
assert blockchain.get_account_current_balance(user_account) == 40
assert blockchain.get_account_current_balance(
treasury_account
) == treasury_initial_balance - 30 - 10 - 2 * total_fees
assert blockchain.get_account_current_balance(node_account) == 1 * 2
assert blockchain.get_account_current_balance(pv_account) == 4 * 2
block2 = Block.create_from_main_transaction(
blockchain=blockchain,
recipient=treasury_account,
amount=5,
request_signing_key=user_account_key_pair.private,
pv_signing_key=primary_validator_key_pair.private,
preferred_node=preferred_node,
)
blockchain.add_block(block2)
assert blockchain.get_account_current_balance(user_account) == 40 - 5 - total_fees
assert blockchain.get_account_current_balance(
treasury_account
) == treasury_initial_balance - 30 - 10 + 5 - 2 * total_fees
assert blockchain.get_account_current_balance(node_account) == 1 * 3
assert blockchain.get_account_current_balance(pv_account) == 4 * 3
@pytest.mark.parametrize('blockchain_argument_name', ('memory_blockchain', 'file_blockchain'))
def test_can_add_node_declaration_block(
memory_blockchain: BlockchainBase,
file_blockchain: BlockchainBase,
user_account_key_pair: KeyPair,
blockchain_argument_name,
primary_validator_key_pair,
):
blockchain: BlockchainBase = locals()[blockchain_argument_name]
user_account = user_account_key_pair.public
request0 = NodeDeclarationSignedChangeRequest.create(
network_addresses=['http://127.0.0.1'], fee_amount=3, signing_key=user_account_key_pair.private
)
block0 = Block.create_from_signed_change_request(blockchain, request0, primary_validator_key_pair.private)
blockchain.add_block(block0)
assert blockchain.get_node_by_identifier(user_account) == request0.message.node
blockchain.snapshot_blockchain_state()
assert blockchain.get_last_blockchain_state().get_node(user_account) == request0.message.node
request1 = NodeDeclarationSignedChangeRequest.create(
network_addresses=['http://127.0.0.2', 'http://192.168.0.34'],
fee_amount=3,
signing_key=user_account_key_pair.private
)
block1 = Block.create_from_signed_change_request(blockchain, request1, primary_validator_key_pair.private)
blockchain.add_block(block1)
assert blockchain.get_node_by_identifier(user_account) == request1.message.node
blockchain.snapshot_blockchain_state()
assert blockchain.get_last_blockchain_state().get_node(user_account) == request1.message.node
| 2.203125 | 2 |
Q21/sol.py | shivamT95/projecteuler | 0 | 12773599 | <gh_stars>0
divs = {}
def sm(x):
s = 0
for i in range(2,int(x**0.5)):
if x%i == 0:
s += i
s += x/i
if i*i == x:
s -= i
return s+1
for i in range(10001):
divs[i] = sm(i)
ans = 0
for i in range(10001):
for j in range(10001):
if divs[i] == j and divs[j] == i and i!=j:
ans += i
print(ans)
| 2.8125 | 3 |
src/ion/process/bootstrap/plugins/bootstrap_policy.py | scionrep/scioncc_new | 2 | 12773600 | #!/usr/bin/env python
"""Bootstrap process for system policy"""
__author__ = '<NAME>, <NAME>'
from pyon.public import log
from ion.core.bootstrap_process import BootstrapPlugin
from ion.process.bootstrap.load_system_policy import LoadSystemPolicy
class BootstrapPolicy(BootstrapPlugin):
"""
Bootstrap plugin for system policy
"""
def on_initial_bootstrap(self, process, config, **kwargs):
if config.get_safe("system.load_policy", False):
LoadSystemPolicy.op_load_system_policies(process)
| 1.960938 | 2 |
src/secondaires/navigation/commandes/gouvernail/tenir.py | vlegoff/tsunami | 14 | 12773601 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# raise of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this raise of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'tenir' de la commande 'gouvernail'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmTenir(Parametre):
"""Commande 'gouvernail tenir'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "tenir", "hold")
self.aide_courte = "fait tenir le gouvernail"
self.aide_longue = \
"Cette commande permet de tenir le gouvernail. Il est " \
"obligatoire de tenir un gouvernail pour virer. En outre, " \
"un gouvernail qui n'est pas tenu ne sera pas du tout " \
"stable et le navire pourra faire des embardées."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
salle = personnage.salle
if not hasattr(salle, "navire") or salle.navire is None or \
salle.navire.etendue is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
gouvernail = salle.gouvernail
if not gouvernail:
personnage << "|err|Il n'y a pas de gouvernail ici.|ff|"
return
if gouvernail.tenu:
if gouvernail.tenu is personnage:
personnage << "|err|Vous tenez déjà ce gouvernail.|ff|"
else:
personnage << "|err|Ce gouvernail est déjà tenu par " \
"quelqu'un d'autre.|ff|"
else:
gouvernail.tenir(personnage)
| 1.421875 | 1 |
MaudeMiner/settings.py | tklovett/MaudeMiner | 3 | 12773602 | #######################
# MaudeMiner Settings #
#######################
# Database settings
DATABASE_PATH = '/Users/tklovett/maude/'
DATABASE_NAME = 'maude'
# These setting control where the text files and zip files retrieved from the FDA website are stored
DATA_PATH = '/Users/tklovett/maude/data/'
ZIPS_PATH = DATA_PATH + 'zips/'
TXTS_PATH = DATA_PATH + 'txts/'
# The downloader module will use
MAUDE_DATA_ORIGIN = 'http://www.fda.gov/MedicalDevices/DeviceRegulationandGuidance/PostmarketRequirements/ReportingAdverseEvents/ucm127891.htm'
LINES_PER_DB_COMMIT = 1000 * 50
# MaudeMiner will load any modules listed here
INSTALLED_MODULES = (
"querier",
"tokenizer",
"html_generator",
"cleanser",
)
| 1.40625 | 1 |
todolist/main/migrations/0001_initial.py | gda2048/TODOlist | 1 | 12773603 | # Generated by Django 2.2.2 on 2019-06-14 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50, verbose_name='Название')),
('description', models.CharField(max_length=500, verbose_name='Описание')),
('is_archived', models.BooleanField(default=False, verbose_name='Архивировано')),
],
),
]
| 1.75 | 2 |
spid_cie_oidc/onboarding/views.py | freddi301/spid-cie-oidc-django | 0 | 12773604 | import json
from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from django.contrib import messages
from django.utils.translation import gettext as _
from .forms import (
OnboardingRegistrationForm,
OnboardingCreateTrustChain,
OnboardingValidatingTrustMarkForm
)
from .models import OnBoardingRegistration
from spid_cie_oidc.entity.jwks import (
private_pem_from_jwk,
public_pem_from_jwk,
new_rsa_key,
serialize_rsa_key
)
from spid_cie_oidc.entity.jwtse import unpad_jwt_head, unpad_jwt_payload, verify_jws
from spid_cie_oidc.authority.views import trust_mark_status, resolve_entity_statement
def onboarding_landing(request):
return render(request, "onboarding_landing.html")
def onboarding_registration(request):
form = OnboardingRegistrationForm()
context = {"form": form}
if request.method == "POST":
form = OnboardingRegistrationForm(request.POST)
if not form.is_valid():
context = {"form": form}
else:
form_dict = {**form.cleaned_data}
OnBoardingRegistration.objects.create(**form_dict)
messages.success(request, _("Registration successfully"))
return redirect("oidc_onboarding_entities")
return render(request, "onboarding_registration.html", context)
def onboarding_entities(request):
entity_list = OnBoardingRegistration.objects.all()
p = Paginator(entity_list, 10)
page = request.GET.get("page")
entities = p.get_page(page)
return render(
request,
"onboarding_entities.html",
{"entity_list": entity_list, "entities": entities},
)
def onboarding_create_jwk(request):
_rsa_key = new_rsa_key()
private_jwk = serialize_rsa_key(_rsa_key.priv_key, 'private')
public_jwk = serialize_rsa_key(_rsa_key.pub_key)
context = {
"private_jwk": private_jwk,
"public_jwk": public_jwk
}
return render(request, 'onboarding_jwk.html', context)
def onboarding_convert_jwk(request):
jwk_type = request.GET.get('type')
context = {
"jwk": "",
"pem": "",
"jwk_type": jwk_type
}
if request.method == 'POST':
try:
jwk_str = request.POST.get('jwk')
jwk_str_double_quote = jwk_str.replace("'", '"')
jwk_dict = json.loads(jwk_str_double_quote)
if jwk_type == 'private':
pem = private_pem_from_jwk(jwk_dict)
if jwk_type == 'public':
pem = public_pem_from_jwk(jwk_dict)
context = {
"jwk": jwk_dict,
"pem": pem,
"jwk_type": jwk_type
}
except Exception as e:
messages.error(request, _(f" {e} "))
return render(request, 'onboarding_convert_jwk.html', context)
return render(request, 'onboarding_convert_jwk.html', context)
def onboarding_resolve_statement(request):
if "sub" in request.GET :
form = OnboardingCreateTrustChain(request.GET)
else:
form = OnboardingCreateTrustChain()
context = {'form': form}
if form.is_valid():
context = {
'form': form,
"resolved_statement": "",
}
try:
res = resolve_entity_statement(request, format="json")
context["resolved_statement"] = res.content.decode()
except Exception:
messages.error(request, _('Failed to resolve entity statement, Please check your inserted data'))
render(request, 'onboarding_resolve_statement.html', context)
return render(request, 'onboarding_resolve_statement.html', context)
def onboarding_validating_trustmark(request):
if "id" in request.GET or "trust_mark" in request.GET:
form = OnboardingValidatingTrustMarkForm(request.GET)
else:
form = OnboardingValidatingTrustMarkForm()
context = {"form": form}
if form.is_valid():
res = trust_mark_status(request)
content = json.loads(res.content.decode())
context = {'form': form}
if content['active']:
messages.success(request, _('Validation Trust Mark Successfully'))
else:
messages.error(request, _('Validation Trust Mark Failed'))
return render(request, 'onboarding_validating_tm.html', context)
def onboarding_decode_jwt(request):
context = {
"jwt": "",
"jwk": "",
"head": "",
"payload": ""
}
if request.POST.get('jwt'):
jwt = request.POST['jwt']
head = unpad_jwt_head(jwt)
payload = unpad_jwt_payload(jwt)
context["jwt"] = jwt
context["head"] = head
context["payload"] = payload
if request.POST.get('jwk'):
jwk_str = request.POST['jwk']
context["jwk"] = jwk_str
jwk_str_double_quote = jwk_str.replace("'", '"')
jwk = json.loads(jwk_str_double_quote)
try:
verify_jws(jwt, jwk)
messages.success(request, _('Your jws is verified'))
except Exception:
messages.error(request, _("Jws verification failed"))
render(request, 'onboarding_decode_jwt.html', context)
return render(request, 'onboarding_decode_jwt.html', context)
| 1.976563 | 2 |
tests/test_workspace.py | she11c0de/cubes | 0 | 12773605 | <reponame>she11c0de/cubes
import unittest
import os
import json
import re
from cubes.errors import *
from cubes.workspace import *
from cubes.stores import Store
from cubes.model import *
from .common import CubesTestCaseBase
# FIXME: remove this once satisfied
class WorkspaceTestCaseBase(CubesTestCaseBase):
def default_workspace(self, model_name=None):
model_name = model_name or "model.json"
ws = Workspace(config=self.data_path("slicer.ini"))
ws.import_model(self.model_path("model.json"))
return ws
class WorkspaceStoresTestCase(WorkspaceTestCaseBase):
def test_empty(self):
"""Just test whether we can create empty workspace"""
ws = Workspace()
self.assertEqual(0, len(ws.store_infos))
def test_stores(self):
class ImaginaryStore(Store):
pass
ws = Workspace(stores={"default":{"type":"imaginary"}})
self.assertTrue("default" in ws.store_infos)
ws = Workspace(stores=self.data_path("stores.ini"))
self.assertEqual(3, len(ws.store_infos) )
ws = Workspace(config=self.data_path("slicer.ini"))
self.assertEqual(2, len(ws.store_infos))
self.assertTrue("default" in ws.store_infos)
self.assertTrue("production" in ws.store_infos)
def test_duplicate_store(self):
with self.assertRaises(CubesError):
ws = Workspace(config=self.data_path("slicer.ini"),
stores=self.data_path("stores.ini"))
class WorkspaceModelTestCase(WorkspaceTestCaseBase):
def test_get_cube(self):
ws = self.default_workspace()
cube = ws.cube("contracts")
self.assertEqual("contracts", cube.name)
# self.assertEqual(6, len(cube.dimensions))
self.assertEqual(1, len(cube.measures))
def test_get_namespace_cube(self):
ws = Workspace()
ws.import_model(self.model_path("model.json"), namespace="local")
# This should pass
cube = ws.cube("contracts")
self.assertIsInstance(cube, Cube)
self.assertEqual(cube.name, "contracts")
ws.lookup_method = "exact"
with self.assertRaises(NoSuchCubeError):
cube = ws.cube("contracts")
cube = ws.cube("local.contracts")
self.assertEqual("local.contracts", cube.name)
def test_cube_with_dimensions_in_two_namespaces(self):
ws = Workspace()
ws.import_model(self.model_path("model.json"), namespace="store1")
ws.import_model(self.model_path("other.json"), namespace="store2")
# This should not pass, since the dimension is in another namespace
with self.assertRaises(NoSuchDimensionError):
ws.cube("other")
ws = Workspace()
ws.import_model(self.model_path("model.json"), namespace="default")
ws.import_model(self.model_path("other.json"), namespace="store2")
# This should pass, since the dimension is in the default namespace
ws.cube("other")
def test_get_dimension(self):
ws = self.default_workspace()
dim = ws.dimension("date")
self.assertEqual("date", dim.name)
def test_template(self):
ws = Workspace()
ws.import_model(self.model_path("templated_dimension.json"))
dim = ws.dimension("date")
self.assertEqual("date", dim.name)
self.assertEqual(3, len(dim.levels))
dim = ws.dimension("start_date")
self.assertEqual("start_date", dim.name)
self.assertEqual(3, len(dim.levels))
dim = ws.dimension("end_date")
self.assertEqual("end_date", dim.name)
def test_external_template(self):
ws = Workspace()
ws.import_model(self.model_path("templated_dimension.json"))
ws.import_model(self.model_path("templated_dimension_ext.json"))
dim = ws.dimension("another_date")
self.assertEqual("another_date", dim.name)
self.assertEqual(3, len(dim.levels))
@unittest.skip("We are lazy now, we don't want to ping the provider for "
"nothing")
def test_duplicate_dimension(self):
ws = Workspace()
ws.import_model(self.model_path("templated_dimension.json"))
model = {"dimensions": [{"name": "date"}]}
with self.assertRaises(ModelError):
ws.import_model(model)
def test_local_dimension(self):
# Test whether we can use local dimension with the same name as the
# public one
ws = Workspace()
ws.import_model(self.model_path("model_public_dimensions.json"))
ws.import_model(self.model_path("model_private_dimensions.json"))
dim = ws.dimension("date")
self.assertEqual(3, len(dim.levels))
self.assertEqual(["year", "month", "day"], dim.level_names)
cube = ws.cube("events")
dim = cube.dimension("date")
self.assertEqual(["year", "month", "day"], dim.level_names)
cube = ws.cube("lonely_yearly_events")
dim = cube.dimension("date")
self.assertEqual(["lonely_year"], dim.level_names)
| 2.546875 | 3 |
run-lab/gui.py | mes32/run-lab | 0 | 12773606 | <gh_stars>0
from Tkinter import *
class GUI:
def __init__(self):
self.master = Tk()
frame = Frame(self.master)
frame.pack()
self.exitButton = Button(frame, text="Exit", command=frame.quit)
self.exitButton.pack(side=LEFT)
def display(self):
self.master.mainloop()
| 3.015625 | 3 |
twitter_fizzbuzz/auth.py | marcusgabrields/twitter_fizzbuzz | 0 | 12773607 | <reponame>marcusgabrields/twitter_fizzbuzz
from decouple import config
from requests_oauthlib import OAuth1Session
API_KEY = config('API_KEY')
API_SECRET_KEY = config('API_SECRET_KEY')
ACCESS_TOKEN = config('ACCESS_TOKEN')
ACCESS_TOKEN_SECRET = config('ACCESS_TOKEN_SECRET')
def get_auth():
twitter_auth = OAuth1Session(
API_KEY,
API_SECRET_KEY,
ACCESS_TOKEN,
ACCESS_TOKEN_SECRET
)
return twitter_auth
| 2.265625 | 2 |
torchflare/callbacks/comet_logger.py | Atharva-Phatak/torchflare | 86 | 12773608 | """Implements Comet Logger."""
from abc import ABC
from typing import TYPE_CHECKING, List
from torchflare.callbacks.callback import Callbacks
from torchflare.callbacks.states import CallbackOrder
from torchflare.utils.imports_check import module_available
if TYPE_CHECKING:
from torchflare.experiments.experiment import Experiment
_AVAILABLE = module_available("come_ml")
if _AVAILABLE:
import comet_ml
else:
comet_ml = None
class CometLogger(Callbacks, ABC):
"""Callback to log your metrics and loss values to Comet to track your experiments.
For more information about Comet look at [Comet.ml](https://www.comet.ml/site/)
Args:
api_token: Your API key obtained from comet.ml
params: The hyperparameters for your model and experiment as a dictionary
project_name: Send your experiment to a specific project.
Otherwise, will be sent to Uncategorized Experiments.
workspace: Attach an experiment to a project that belongs to this workspace
tags: List of strings.
Examples:
.. code-block::
from torchflare.callbacks import CometLogger
params = {"bs": 16, "lr": 0.3}
logger = CometLogger(
project_name="experiment_10",
workspace="username",
params=params,
tags=["Experiment", "fold_0"],
api_token="your_secret_api_token",
)
"""
def __init__(
self,
api_token: str,
params: dict,
project_name: str,
workspace: str,
tags: List[str],
):
"""Constructor for CometLogger class."""
super(CometLogger, self).__init__(order=CallbackOrder.LOGGING)
self.api_token = api_token
self.project_name = project_name
self.workspace = workspace
self.params = params
self.tags = tags
self.experiment = None
def on_experiment_start(self, experiment: "Experiment"):
"""Start of experiment."""
self.experiment = comet_ml.Experiment(
project_name=self.project_name,
api_key=self.api_token,
workspace=self.workspace,
log_code=False,
display_summary_level=0,
)
if self.tags is not None:
self.experiment.add_tags(self.tags)
if self.params is not None:
self.experiment.log_parameters(self.params)
def on_epoch_end(self, experiment: "Experiment"):
"""Function to log your metrics and values at the end of very epoch."""
logs = {k: v for k, v in experiment.exp_logs.items() if k != experiment.epoch_key}
self.experiment.log_metrics(logs, step=experiment.exp_logs[experiment.epoch_key])
def on_experiment_end(self, experiment: "Experiment"):
"""Function to close the experiment when training ends."""
self.experiment.end()
self.experiment = None
| 2.6875 | 3 |
statistics.py | clean-code-craft-tcq-1/start-stats-python-DivyaK-bit | 0 | 12773609 | <reponame>clean-code-craft-tcq-1/start-stats-python-DivyaK-bit<gh_stars>0
import math
def calculateStats(numbers):
Nums = {}
if len(numbers) == 0:
Nums = dict.fromkeys(['avg','min','max'],math.nan) #Sets all stats value to Nan for an empty list
else:
Nums["avg"] = sum(numbers)/len(numbers)
Nums["min"] = min(numbers)
Nums["max"] = max(numbers)
return Nums
| 2.859375 | 3 |
pybayenv/create_bin_dist.py | kristohr/pybayenv_beta | 0 | 12773610 | <filename>pybayenv/create_bin_dist.py
import sys, string, re, os, commands, time
#from scipy import stats
#import scipy as sp
import numpy as np
#import matplotlib as mpl
#from matplotlib import pyplot as plt
class Locus_diff:
def __init__(self, name, num_files):
self.name = name.strip()
self.is_in_run = [[0 for x in xrange(1)] for x in xrange(num_files)]
self.sum_bf = 0
self.num_runs = 0
self.bf_list = []
def get_name(self):
return self.name
def set_in_run(self, run):
self.is_in_run[run] = 1
self.num_runs += 1
def get_in_run(self):
return self.is_in_run
def add_bf(self, bf):
self.sum_bf += bf
def add_to_list(self, bf):
self.bf_list.append(bf)
def get_sum_bf(self):
return self.sum_bf
def get_num_runs(self):
return self.num_runs
def get_median_bf(self):
factors = np.array(self.bf_list)
median = np.median(factors)
return median
def get_average_bf(self):
factors = np.array(self.bf_list)
avg = np.average(factors)
return avg
def get_bf_list(self):
return self.bf_list
def create_bin_matrix(in_files, n):
num_files = len(in_files)
out_file = "results/" + str(n) + "bin_dist.txt"
avg_file = "results/" + str(n) + "average_bf.txt"
median_file = "results/" + str(n) + "median_bf.txt"
locus_list = [] #List of locus names
in_list = {}
locus_dict = {}
for i in range(0, num_files):
#print in_files[i]
dataset = open(in_files[i], 'r')
lines = dataset.readlines()
for line in lines:
data = line.split("\t")
name = data[0]
if i < 10:
name = name[:-2] #Removing last two chars from marker name
else:
name = name[:-3] #Removing last three chars from marker name
res = data[1]
if (not name in in_list):
locus = Locus_diff(name, num_files)
locus_list.append(locus)
locus.set_in_run(i)
locus.add_bf(float(res))
in_list[name] = locus
locus.add_to_list(float(res))
else:
in_list[name].set_in_run(i)
in_list[name].add_bf(float(res))
in_list[name].add_to_list(float(res))
#print "New file ..............................................."
#all_data = "Marker\t5000_Iterations\t10000_Iterations\t15000_Iterations\t20000_Iterations\t25000_Iterations\t30000_Iterations\n"
all_data = ""
for i in range(0, len(locus_list)):
data_line = "" + locus_list[i].get_name() + "\t"
data = locus_list[i].get_in_run()
for j in range(0, len(data)):
data_line += str(data[j]) + "\t"
#data_line += "0" #dummy for random var.
data_line += str(locus_list[i].get_num_runs()) + "\t"
## bf_list = locus_list[i].get_bf_list()
## for j in range(0, len(bf_list)):
## data_line += str(bf_list[j]) + "\t"
all_data += data_line + "\n"
all_data = all_data.replace("[", "")
all_data = all_data.replace("]", "")
#print all_data
print "Total significant SNPs for var " + str(n) + " is " + str(len(in_list))
FILE = open(out_file, 'w')
FILE.write(all_data)
FILE.close()
average_bf = ""
median_bf = ""
for i in range(0, len(locus_list)):
average_bf += locus_list[i].get_name() + "\t"
#average_bf += str(locus_list[i].get_sum_bf()/num_files) + "\n"
average_bf += str(locus_list[i].get_average_bf()) + "\n"
median_bf += locus_list[i].get_name() + "\t"
median_bf += str(locus_list[i].get_median_bf()) + "\n"
FILE = open(avg_file, 'w')
FILE.write(average_bf)
FILE.close()
FILE = open(median_file, 'w')
FILE.write(median_bf)
FILE.close()
| 2.765625 | 3 |
tracking_policy_agendas/classifiers/xgb_clf.py | MohammadForouhesh/Computational-Political-Science-Papers-Implementations | 1 | 12773611 | <gh_stars>1-10
from xgboost import XGBClassifier
from .meta_clf import MetaClf
class XgbClf(MetaClf):
def __init__(self, **kwargs):
super().__init__(classifier_instance=XGBClassifier(n_estimators=300), **kwargs)
| 2.078125 | 2 |
problems/leetcode/convert-sorted-array-to-binary-search-tree.py | AravindVasudev/datastructures-and-algorithms | 1 | 12773612 | <gh_stars>1-10
# https://leetcode.com/problems/convert-sorted-array-to-binary-search-tree/
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
return self.generateTree(nums, 0, len(nums) - 1)
def generateTree(self, nums, left, right):
if left > right:
return None
mid = (left + right) // 2
curNode = TreeNode(nums[mid])
curNode.left = self.generateTree(nums, left, mid - 1)
curNode.right = self.generateTree(nums, mid + 1, right)
return curNode
| 3.53125 | 4 |
clip/mangled/__torch__/torch/nn/modules/linear/___torch_mangle_9449.py | shawwn/CLIP | 6 | 12773613 | <filename>clip/mangled/__torch__/torch/nn/modules/linear/___torch_mangle_9449.py
class _LinearWithBias(Module):
__parameters__ = ["weight", "bias", ]
__buffers__ = []
weight : Tensor
bias : Tensor
training : bool
| 1.3125 | 1 |
alembic/versions/2afbc7d30a21_add_ip_address_to_database.py | PeterGrace/pi_director | 12 | 12773614 | <filename>alembic/versions/2afbc7d30a21_add_ip_address_to_database.py
"""add ip address to database
Revision ID: 2afbc7d30a21
Revises: <KEY>
Create Date: 2015-09-08 21:19:59.721831
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('PiUrl', sa.Column('ip', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('PiUrl', 'ip')
### end Alembic commands ###
| 1.585938 | 2 |
backend/Film/models.py | jspaeza/AppDjangoVue | 0 | 12773615 | <filename>backend/Film/models.py<gh_stars>0
from django.db import models
class Film(models.Model):
title = models.CharField(max_length=50)
sinopsis = models.TextField()
imagen = models.FileField(blank=False, null=True)
def __str__(self):
return self.imagen.name
| 1.875 | 2 |
tools/prune.py | CnybTseng/YOLOv3 | 1 | 12773616 | <reponame>CnybTseng/YOLOv3<gh_stars>1-10
# -*- coding: utf-8 -*-
# file: prune.py
# brief: YOLOv3 implementation based on PyTorch
# author: <NAME>
# date: 2019/8/15
from __future__ import print_function
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from os.path import splitext
from io import StringIO
from functools import partial
import timeit
import json
import copy
import glob
import sys
import cv2
import os
import re
sys.path.append('.')
import utils
import darknet as net
import dataset as ds
import yolov3
def save_print_stdout(object, filename):
old_stdout = sys.stdout
result = StringIO()
sys.stdout = result
print(object)
with open(filename, 'w') as file:
file.write(result.getvalue())
file.close()
sys.stdout = old_stdout
def save_model_parameter_as_file(model, path):
for name, module in model.named_modules():
if isinstance(module, torch.nn.Conv2d):
if module.bias is not None:
np.savetxt(f'{path}/{name}.bias.txt', module.bias.data.numpy())
elif isinstance(module, torch.nn.BatchNorm2d):
np.savetxt(f'{path}/{name}.weight.txt', module.weight.data.numpy())
np.savetxt(f'{path}/{name}.bias.txt', module.bias.data.numpy())
np.savetxt(f'{path}/{name}.running_mean.txt', module.running_mean.data.numpy())
np.savetxt(f'{path}/{name}.running_var.txt', module.running_var.data.numpy())
def calc_prune_thresh(model, pr, workspace, force_thresh=0):
gamma = list()
layer_id = list()
layer_name = list()
for name, module in model.named_modules():
if isinstance(module, torch.nn.BatchNorm2d) and model.prune_permit[name][1]:
gamma.append(module.weight.cpu().data.abs().numpy().tolist())
layer_id.append(model.prune_permit[name][0])
layer_name.append(name)
gamma_all = list()
max_gammas = list()
for g in gamma:
gamma_all += g
max_gammas.append(np.max(g))
max_thresh = np.min(max_gammas)
gamma_all = np.sort(np.array(gamma_all))
thresh_index = np.int32(args.pr * gamma_all.shape[0])
thresh = gamma_all[thresh_index]
if force_thresh > 0:
thresh = force_thresh
with open(f'{workspace}/log/gamma.txt', 'w') as file:
for g in gamma_all:
file.write(f'{g}\n')
file.write(f'thresh_index:{thresh_index}\n')
file.close()
print(f'original prune threshold: {thresh}')
print(f'max_gammas = {max_gammas}')
print(f'prune threshold should be less than {max_thresh}')
thresh = min(thresh, max_thresh)
print(f'tuned prune threshold: {thresh}')
num_subplots = len(gamma)
for k in range(num_subplots):
num_prune = np.sum(np.array(gamma[k]) < thresh)
plt.title(f'{layer_name[k]},{num_prune}/{len(gamma[k])},{thresh}')
plt.plot(gamma[k], 'r-+')
plt.plot([thresh] * len(gamma[k]), 'b--')
plt.xlabel('Channel Index')
plt.ylabel('Gamma Abs. Value')
plt.savefig(f'{workspace}/log/layer_{layer_id[k]}.jpg', dpi=120)
plt.clf()
return thresh
def make_prune_config(model, thresh):
prune_config = {}
parent_conv = None
for name, module in model.named_modules():
if isinstance(module, torch.nn.Conv2d):
if parent_conv is not None:
prune_config[name] = {'in_mask':prune_config[parent_conv]['out_mask']}
prune_config[name]['parent'] = parent_conv.replace('conv', 'norm')
parent_conv = None
elif 'cbrl9' in name:
prune_config[name] = {'in_mask':prune_config['cbrl8.conv']['in_mask']}
prune_config[name]['parent'] = prune_config['cbrl8.conv']['parent']
elif 'cbrl12' in name:
prune_config[name] = {'in_mask':prune_config['cbrl11.conv']['in_mask']}
prune_config[name]['parent'] = prune_config['cbrl11.conv']['parent']
elif isinstance(module, torch.nn.BatchNorm2d) and model.prune_permit[name][1]:
mask = module.weight.cpu().data.abs().ge(thresh).numpy().tolist()
parent_conv = name.replace('norm', 'conv')
if parent_conv in prune_config:
prune_config[parent_conv]['out_mask'] = mask
else:
prune_config[parent_conv] = {'out_mask':mask}
return prune_config
def find_module(model, name):
parent_module = model
hierarchies = name.split(".")
for h in hierarchies:
parent_module = parent_module.__getattr__(h)
return parent_module
def model_slimming(model, prune_config):
new_modules = {}
for name, module in model.named_modules():
if isinstance(module, torch.nn.Conv2d):
norm_name = name.replace('conv', 'norm')
if norm_name in model.prune_permit and model.prune_permit[norm_name][1]:
out_mask = prune_config[name]['out_mask']
out_indices = np.argwhere(out_mask)[:,0].tolist()
out_channels = sum(out_mask)
kernel_size = module.kernel_size
stride = module.stride
padding = module.padding
bias = module.bias is not None # biases all be False
if 'in_mask' in prune_config[name]:
in_mask = prune_config[name]['in_mask']
in_indices = np.argwhere(in_mask)[:,0].tolist()
in_channels = sum(in_mask)
new_modules[name] = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
new_modules[name].weight.data = module.weight.data[out_indices,:,:,:][:,in_indices,:,:].clone()
print(f'copy input and output channels changed {name} done. {sum(in_mask)}/{len(in_mask)} {sum(out_mask)}/{len(out_mask)}')
else:
in_channels = module.in_channels
new_modules[name] = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
new_modules[name].weight.data = module.weight.data[out_indices,:,:,:].clone()
print(f'copy output channels changed {name} done. {sum(out_mask)}/{len(out_mask)}')
else:
out_channels = module.out_channels
kernel_size = module.kernel_size
stride = module.stride
padding = module.padding
bias = module.bias is not None
if name in prune_config and 'in_mask' in prune_config[name]:
in_mask = prune_config[name]['in_mask']
in_channels = sum(in_mask)
in_indices = np.argwhere(in_mask)[:,0].tolist()
new_modules[name] = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=bias)
new_modules[name].weight.data = module.weight.data[:,in_indices,:,:].clone()
print(f'copy input channels changed {name} done. {sum(in_mask)}/{len(in_mask)}')
if bias:
print(f'the shape of {name} is not changed, but we should update its parameters!')
new_modules[name].bias.data = module.bias.data.clone()
parent_module = find_module(model, prune_config[name]['parent'])
print(f"find parent {prune_config[name]['parent']} {parent_module}")
if sum(in_mask) == len(in_mask):
print(f'parent module has not been pruned!')
continue
prune_indices = np.argwhere(1 - np.array(in_mask))[:,0].tolist()
residual_bias = parent_module.bias[prune_indices]
residual_bias = F.leaky_relu(residual_bias, negative_slope=0.1)
filter_sumel = module.weight.data[:,prune_indices,:,:].sum(dim=(2,3))
offset = filter_sumel.matmul(residual_bias.reshape(-1,1)).reshape(-1)
new_modules[name].bias.data.add_(offset)
else:
print(f'the {name} is not changed absolutely!')
elif isinstance(module, torch.nn.BatchNorm2d):
conv_name = name.replace('norm', 'conv')
if model.prune_permit[name][1]:
mask = prune_config[conv_name]['out_mask']
num_features = sum(mask)
momentum = module.momentum
indices = np.argwhere(mask)[:,0].tolist()
new_modules[name] = torch.nn.BatchNorm2d(num_features=num_features, momentum=momentum)
new_modules[name].bias.data = module.bias.data[indices].clone()
new_modules[name].weight.data = module.weight.data[indices].clone()
new_modules[name].running_var.data = module.running_var.data[indices].clone()
new_modules[name].running_mean.data = module.running_mean.data[indices].clone()
new_modules[name].num_batches_tracked = module.num_batches_tracked
print(f'copy {name} done. {sum(mask)}/{len(mask)}')
if 'parent' in prune_config[conv_name]:
print('and we also need to update its parameters')
parent_module = find_module(model, prune_config[conv_name]['parent'])
print(f"find parent {prune_config[conv_name]['parent']} {parent_module}")
parent_conv = prune_config[conv_name]['parent'].replace('norm', 'conv')
mask = prune_config[parent_conv]['out_mask']
if sum(mask) == len(mask):
print(f'parent module has not been pruned!')
continue
prune_indices = np.argwhere(1 - np.array(mask))[:,0].tolist()
residual_bias = parent_module.bias[prune_indices]
residual_bias = F.leaky_relu(residual_bias, negative_slope=0.1)
conv_module = find_module(model, conv_name)
print(f'find current {conv_name} {conv_module}')
filter_sumel = conv_module.weight.data[:,prune_indices,:,:].sum(dim=(2,3))
offset = filter_sumel.matmul(residual_bias.reshape(-1,1)).reshape(-1)
new_modules[name].running_mean.data.sub_(offset[indices])
else:
if conv_name in prune_config:
print(f'the shape of {name} is not changed, but we should update its parameters!')
num_features = module.num_features
momentum = module.momentum
new_modules[name] = torch.nn.BatchNorm2d(num_features=num_features, momentum=momentum)
new_modules[name].bias.data = module.bias.data.clone()
new_modules[name].weight.data = module.weight.data.clone()
new_modules[name].running_var.data = module.running_var.data.clone()
new_modules[name].running_mean.data = module.running_mean.data.clone()
new_modules[name].num_batches_tracked = module.num_batches_tracked
parent_module = find_module(model, prune_config[conv_name]['parent'])
print(f"find parent {prune_config[conv_name]['parent']} {parent_module}")
parent_conv = prune_config[conv_name]['parent'].replace('norm', 'conv')
mask = prune_config[parent_conv]['out_mask']
if sum(mask) == len(mask):
print(f'parent module has not been pruned!')
continue
prune_indices = np.argwhere(1 - np.array(mask))[:,0].tolist()
residual_bias = parent_module.bias[prune_indices]
residual_bias = F.leaky_relu(residual_bias, negative_slope=0.1)
conv_module = find_module(model, conv_name)
print(f'find current {conv_name} {conv_module}')
filter_sumel = conv_module.weight.data[:,prune_indices,:,:].sum(dim=(2,3))
offset = filter_sumel.matmul(residual_bias.reshape(-1,1)).reshape(-1)
new_modules[name].running_mean.data.sub_(offset)
else:
print(f'the {name} is not changed absolutely!')
for name in new_modules:
parent_module = model
hierarchies = name.split(".")
if len(hierarchies) == 1:
model.__setattr__(name, new_modules[name])
continue
for h in hierarchies[:-1]:
parent_module = parent_module.__getattr__(h)
parent_module.__setattr__(hierarchies[-1], new_modules[name])
return model
def inference(model, decoder, filename, in_size, class_names):
model.eval()
transform = ds.get_transform(train=False, net_w=in_size[0], net_h=in_size[1])
FloatTensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
im = cv2.imread(filename, cv2.IMREAD_COLOR)
rgb = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
x, _ = transform(rgb, None)
x = x.type(FloatTensor) / 255.0
start = timeit.default_timer()
xs = model(x)
y = decoder(xs)
end = timeit.default_timer()
latency = end - start
z = utils.get_network_boxes(y.clone(), im.shape[:2], thresh=0.5)
nms = utils.do_nms_sort(z)
result = utils.overlap_detection(im, nms, class_names)
return result, latency, y
def compare_models(model1, model2):
models_differ = 0
for key_item1, key_item2 in zip(model1.state_dict().items(), model2.state_dict().items()):
if torch.equal(key_item1[1], key_item2[1]):
pass
else:
models_differ += 1
if (key_item1[0] == key_item2[0]):
print('mismtach found at', key_item1[0])
else:
raise Exception
if models_differ == 0:
print('models match perfectly!')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--in-size', type=str, default='416,416', help='network input size')
parser.add_argument('--model', type=str, default='', help='model file')
parser.add_argument('--dataset', type=str, default='', help='dataset path')
parser.add_argument('--num-classes', type=int, default=3, help='number of classes')
parser.add_argument('--prune-ratio', '-pr', dest='pr', type=float, default=0.15, help='prune ratio')
parser.add_argument('--thresh', type=float, default=0, help='prune threshold')
parser.add_argument('--image', type=str, default='', help='test image filename')
parser.add_argument('--test-prune-model', '-test', dest='test', help='test pruned model', action='store_true')
parser.add_argument('--eval', help='evaluate pruned model', action='store_true')
parser.add_argument('--eval-epoch', dest='eval_epoch', type=int, default=0, help='epoch beginning evaluate')
parser.add_argument('--workspace', type=str, default='workspace', help='workspace path')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
in_size = [int(insz) for insz in args.in_size.split(',')]
anchors = np.loadtxt(os.path.join(args.dataset, 'anchors.txt'))
class_names = utils.load_class_names(os.path.join(args.dataset, 'classes.txt'))
decoder = yolov3.YOLOv3EvalDecoder(in_size, len(class_names), anchors)
if args.test:
model = torch.load(args.model, map_location=device)
result, latency, _ = inference(model, decoder, args.image, in_size, class_names)
print(f'pruned model latency is {latency} seconds.')
cv2.imwrite('detection/detection-prune.jpg', result)
sys.exit()
# if args.eval:
# dataset = ds.CustomDataset(args.dataset, 'test')
# data_loader = torch.utils.data.DataLoader(
# dataset=dataset,
# batch_size=1,
# shuffle=False,
# num_workers=0,
# collate_fn=partial(ds.collate_fn, in_size=torch.IntTensor(in_size), train=False))
#
# if os.path.isfile(args.model):
# model = torch.load(args.model, map_location=device)
# model.eval()
# mAP = evaluate(model, decoder, data_loader, device, args.num_classes)
# print(f'mAP of {args.model} on validation dataset:%.2f%%' % (mAP * 100))
# sys.exit()
# elif os.path.isdir(args.model):
# paths = list(sorted(glob.glob(os.path.join(args.model, '*.pth'))))
# mAPs = list()
# for path in paths:
# if 'trainer' in path: continue
# segments = re.split(r'[-,.]', path)
# if int(segments[-2]) < args.eval_epoch: continue
# model = torch.load(path, map_location=device)
# model.eval()
# mAP = evaluate(model, decoder, data_loader, device, args.num_classes)
# mAPs.append(mAP)
# with open(f'{args.workspace}/log/evaluation.txt', 'a') as file:
# file.write(f'{int(segments[-2])} {mAP}\n')
# file.close()
# print(f'mAP of {path} on validation dataset:%.2f%%' % (mAP * 100))
# mAPs = np.array(mAPs)
# epoch = np.argmax(mAPs)
# print(f'Best model is ckpt-{epoch+args.eval_epoch}, best mAP is %.2f%%' % (mAPs[epoch] * 100))
model = net.DarkNet(anchors, in_size=in_size, num_classes=args.num_classes).to(device)
model.load_state_dict(torch.load(args.model, map_location=device))
model.load_prune_permit('model/prune_permit.json')
save_print_stdout(model, f'{args.workspace}/log/model.txt')
# don't use 'model.eval()' in case of 'batch_norm_dead_output' error in exported onnx model
# save_model_parameter_as_file(model, 'log/0')
model_copy = copy.deepcopy(model)
if args.image:
# don't use 'model' in case of 'batch_norm_dead_output' error in exported onnx model
result, latency, y = inference(model_copy, decoder, args.image, in_size, class_names)
print(f'original model latency is {latency} seconds.')
cv2.imwrite('detection/detection.jpg', result)
np.savetxt(f'{args.workspace}/log/0.txt', y.data.numpy().flatten())
thresh = calc_prune_thresh(model, args.pr, args.workspace, force_thresh=args.thresh)
prune_config = make_prune_config(model, thresh)
model = model_slimming(model, prune_config)
# save_model_parameter_as_file(model, 'log/1')
torch.save(model.state_dict(), f"{args.workspace}/log/yolov3-prune.pth")
torch.save(model, f"{args.workspace}/log/yolov3-prune-full.pth")
# compare_models(model_copy, model)
save_print_stdout(model, f'{args.workspace}/log/model-prune.txt')
with open(f'{args.workspace}/log/prune_config.json', 'w') as file:
file.write(json.dumps(prune_config))
file.close()
if args.image:
result, latency, y = inference(model, decoder, args.image, in_size, class_names)
print(f'pruned model latency is {latency} seconds.')
cv2.imwrite('detection/detection-prune.jpg', result)
np.savetxt(f'{args.workspace}/log/1.txt', y.data.numpy().flatten())
else:
print('test pruned model...', end='')
x = torch.rand(1, 3, 416, 416, dtype=torch.float32).to(device)
ys = model(x)
for y in ys:
print(f'done\noutput size is {y.size()}') | 2.265625 | 2 |
backend/server/manage.py | map34/OceanHub | 0 | 12773617 | <reponame>map34/OceanHub<filename>backend/server/manage.py
from flask_script import Command, Manager
from oceanhub import app
from oceanhub.scripts.shell import RunOceanHubShell
from oceanhub.scripts.tests import RunOceanHubTests
manager = Manager(app)
class RunOceanHubServer(Command):
def run(self):
app.run(
host='0.0.0.0',
debug=True,
port=5000
)
manager.add_command('shell', RunOceanHubShell())
manager.add_command('runserver', RunOceanHubServer())
manager.add_command('test', RunOceanHubTests())
if __name__ == '__main__':
manager.run()
| 1.9375 | 2 |
e2end/model/fast_compilation.py | oplatek/e2end | 14 | 12773618 | <reponame>oplatek/e2end<filename>e2end/model/fast_compilation.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import E2E_property_decodingBase
import tensorflow as tf
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class FastComp(E2E_property_decodingBase):
'''Dummy class just for debugging training loop - it compiles fast.'''
def __init__(self, config):
self.config = config
self._var2save = tf.Variable([1])
self.step, self.config = 0, config
self._define_inputs()
arr = [
self.turn_len, self.dec_targets, self.target_lens,
self.is_first_turn, self.feed_previous,
self.enc_dropout_keep, self.dec_dropout_keep,
] + self.feat_list
self.testTrainOp = tf.concat(0, [tf.to_float(tf.reshape(x, (-1, 1))) for x in arr])
def train_step(self, sess, train_dict, log_output=False):
self.step += 1
sess.run(self.testTrainOp, train_dict)
logger.debug('train_dict: %s', train_dict)
logger.debug('input_feed_dict_shape: %s', [(k, v.shape) if hasattr(v, 'shape') else (k, v) for k, v in train_dict.items()])
if log_output:
return {'reward': -666, 'summarize': tf.Summary(value=[tf.Summary.Value(tag='dummy_loss', simple_value=-666)])}
else:
return {}
return {}
def decode_step(self, sess, input_feed_dict):
logger.debug('input_feed_dict: %s', input_feed_dict)
return {'decoder_outputs': [[0]], 'loss': -777}
def eval_step(self, sess, labels_dict, log_output=False):
return {'decoder_outputs': [[1]], 'loss': -777,
'summarize': tf.Summary(value=[tf.Summary.Value(tag='dummy_loss', simple_value=-666)]),
'reward': -888}
| 2.015625 | 2 |
examples/conf.py | simple-lang/simple-docs | 8 | 12773619 | <reponame>simple-lang/simple-docs
import sys
import os
import shlex
this_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, this_path)
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Simple Examples'
copyright = u'2018-2019, <NAME> and the Simple community (MIT)'
author = u'<NAME>'
version = '0.1'
release = '0.1'
import simple_lexer
html_theme = "nature"
| 1.546875 | 2 |
tools/UpInterrJava.py | Hydrogen-OS-P/build | 33 | 12773620 | <gh_stars>10-100
#!/usr/bin/env python
'''
Created on 2012-12-20
@author: jock
'''
import sys
import os
import re
DIRECT_METHOD_FLAG = "# direct methods"
class UpInterrJava(object):
'''
classdocs
'''
def __init__(self, addFile, inDir):
'''
Constructor
'''
self.idsList = self.getAddIdList(addFile)
self.inDir = inDir
def getAddIdList(self, addFile):
'''
getAddIdList: get add id list from merge_add.txt
'''
upfile = file(addFile, 'r')
idList = {}
for line in upfile.readlines():
itemList = line.split()
if (len(itemList) == 3):
# print itemList[0], itemList[1], itemList[2]
if idList.has_key(itemList[0]) is False:
idList[itemList[0]] = []
idList[itemList[0]].append([itemList[1], itemList[2]])
else:
print "WRONG merged_add.txt: %s" % line
return idList
def upInterrJava(self):
'''
update the internal R*.smali
'''
for rType in self.idsList.keys():
resFileName = r'%s/R$%s.smali' % (self.inDir, rType)
if os.path.exists(resFileName):
resFile = file(resFileName, 'r+')
fileContent = resFile.read()
for addItem in self.idsList[rType]:
rName = addItem[0]
rId = addItem[1]
if rType == "style":
rName = rName.replace(r'.', r'_')
linefill = "\n.field public static final %s:I = %s\n" % (rName, rId)
fileContent = re.sub(r'^.field public static final %s:I *=.*$' %(rName), '', fileContent, 0, re.M)
fileContent = fileContent.replace("\n%s" % DIRECT_METHOD_FLAG, "%s\n%s" % (linefill, DIRECT_METHOD_FLAG), 1)
resFile.seek(0, 0)
resFile.truncate()
resFile.write(fileContent)
resFile.close()
else:
print "%s not exist!!" % resFileName
def main():
if len(sys.argv) < 3:
print " usage:./UpInterr.py <MAP_ADD_FILE> <R_DIR> "
print "eg. : ./UpInterr.py merge_add.txt framework.jar.out/smali/com/android/internal/"
sys.exit(1)
print "start update %s/R*.smali ..." % sys.argv[2]
UpInterrJava(sys.argv[1], sys.argv[2]).upInterrJava()
print "update done!!"
if __name__ == '__main__':
main()
| 2.71875 | 3 |
Tasks/Filter.ByExtensions/task.py | esprengle/python-droppy-workspace | 2 | 12773621 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from shutil import copyfile
class Task(object):
"""
Documentation: https://docs.droppyapp.com/tasks/filter-by-extensions
"""
def __init__(self, input_dir, output_dir, **kwargs):
# Get keyword arguments.
extensions = kwargs.get(str('extensions'), ['txt', 'json', 'xml'])
# Process files and directories.
for item_name in os.listdir(input_dir):
item_path = os.path.join(input_dir, item_name)
if os.path.isfile(item_path):
full_output_path = (os.path.join(output_dir, item_name))
self.check_and_copy(extensions, item_path, full_output_path)
elif os.path.isdir(item_path):
output_base = item_name
os.makedirs(os.path.join(output_dir, output_base))
for root, dirs, files in os.walk(item_path):
for d in dirs:
relative_path = root[len(item_path) + 1:]
os.makedirs(os.path.join(output_dir, output_base, relative_path, d))
for f in files:
relative_path = root[len(item_path) + 1:]
full_input_path = os.path.join(root, f)
full_output_path = os.path.join(output_dir, output_base, relative_path, f)
self.check_and_copy(extensions, full_input_path, full_output_path)
@staticmethod
def check_and_copy(extensions, full_input_path, full_output_path):
file_name, file_extension = os.path.splitext(full_input_path)
extensions_uppercased = [extension.upper() for extension in extensions]
if file_extension.replace('.', '').upper() in extensions_uppercased:
copyfile(full_input_path, full_output_path)
| 2.75 | 3 |
ex5_cisco3_conf.py | ramyacr97/RamyaPython | 0 | 12773622 | from __future__ import unicode_literals, print_function
from netmiko import Netmiko
from getpass import getpass
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
password = getpass()
env = Environment()
env.loader = FileSystemLoader('.')
cisco3 = {
"host": 'cisco3.lasthop.io',
"username": 'pyclass',
"password": password,
"device_type": 'cisco_ios',
}
my_vars = {
"ntp_server1":'172.16.31.10',
"ntp_server2": '172.16.31.10',
"timezone": 'PST',
"timezone_offset": '-8',
"timezone_dst": 'PDT',
}
net_connect = Netmiko(**cisco3)
template_file = 'base_template.j2'
template = env.get_template(template_file)
output = template.render(**my_vars)
print(output)
| 2.265625 | 2 |
skill_data.py | wang-g/wang-g.github.io | 0 | 12773623 | <reponame>wang-g/wang-g.github.io
from bs4 import BeautifulSoup
import urllib2
import re
def url_request(url):
hdr = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36',
'Accept':'*/*'}
request = urllib2.Request(url, headers=hdr)
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, e:
print e.fp.read()
quit()
return response
def get_skill_info(html):
bs = BeautifulSoup(html)
# First, tries finding mana cost information in the gem progression table
gem_prog_table = bs.find('table', {'class': 'wikitable GemLevelTable'})
if gem_prog_table == None: #returns if gem progression table not found
return None
first_row = gem_prog_table.find('tr') #finds first row, the header row
prog_headers = first_row.find_all('th') #gets a list of the column labels
mana_cost_col = -1
for i in range(len(prog_headers)):
header_text = prog_headers[i].text
if "Mana" in header_text and "Cost" in header_text:
mana_cost_col = i
break
if mana_cost_col != -1: #if no column labeled "Mana Cost" was found, return None
mana_costs = []
for row in first_row.find_next_siblings('tr'):
## print row
row_entries = row.find_all(['th','td'])
level_entry = row.find('th').text.strip()
mana_entry = row_entries[mana_cost_col].text.strip()
try:
mana_costs.append((int(level_entry),int(mana_entry)))
except ValueError:
continue
return mana_costs
# If no mana cost information is found in the gem progression table,
# then checks the gem infobox
gem_infobox = bs.find('table', {'class': 'GemInfoboxContainer'})
infobox_mana_reserved = gem_infobox.find('td', text=re.compile('Mana Reserved'))
infobox_mana_label = gem_infobox.find('td', text=re.compile('Mana Cost'))
if not infobox_mana_reserved == None: #if this skill reserves mana, it has no mana cost
return None
elif not infobox_mana_label == None:
infobox_mana = infobox_mana_label.find_next_sibling().text.strip()
try:
return int(infobox_mana)
except ValueError:
return None
else:
return None
def active_skill_row(skill_name, skill_tag_and_costs):
row = skill_name + "|"
attr_tag = skill_tag_and_costs[0]
mana_costs = skill_tag_and_costs[1]
row += attr_tag + "|"
if type(mana_costs) is list:
for t in mana_costs:
row += str(t[0]) + "-" + str(t[1]) + ","
row = row[:-1]
else:
row += str(mana_costs)
return row+"\n"
def find_skills(bs, search_phrase, attr_tag):
headers = bs.find_all('th', text=re.compile(search_phrase))
header = ''
for h in headers:
if len(h.find_next_siblings('th')) < 4:
header = h
break
table = header.find_parent('table')
rows = table.find_all('tr')
base_url = 'http://pathofexile.gamepedia.com/'
skill_list = []
for r in rows:
link = r.find('a')
if link == None:
continue
skill_name = link.text.strip()
url = base_url + link['href']
skill_list.append((skill_name, attr_tag, url))
return skill_list
url = 'http://pathofexile.gamepedia.com/Skills'
base_url = 'http://pathofexile.gamepedia.<EMAIL>'
response = url_request(url)
sk_html = response.read()
bs = BeautifulSoup(sk_html)
##str_headers = bs.find_all('th', text=re.compile('Strength Skills'))
##str_header = ''
##for h in str_headers:
## if len(h.find_next_siblings('th')) == 3:
## str_header = h
## break
##str_table = str_header.find_parent('table')
##str_rows = str_table.find_all('tr')
##base_url = 'http://pathofexile.gamepedia.com/'
skill_list = []
skill_list.extend(find_skills(bs, 'Strength Skills', 'str'))
skill_list.extend(find_skills(bs, 'Dexterity Skills', 'dex'))
skill_list.extend(find_skills(bs, 'Intelligence Skills', 'int'))
skill_list.extend(find_skills(bs, 'Other Skills', 'oth'))
skills_dict = {}
no_cost_list = []
for s in skill_list:
print s[0]
response = url_request(s[2])
skill_html = response.read()
mana_costs = get_skill_info(skill_html)
print str(mana_costs)
if mana_costs == None:
no_cost_list.append(s[0])
else:
skills_dict[s[0]] = (s[1], mana_costs)
##for r in str_rows:
## link = r.find('a')
## if link == None:
## continue
## skill_name = link.text.strip()
## print skill_name
## url = base_url + link['href']
## response = url_request(url)
## skill_html = response.read()
## mana_costs = get_skill_info(skill_html)
## print str(mana_costs)
## if mana_costs == None:
## no_cost_list.append(skill_name)
## else:
## skills_dict[skill_name] = mana_costs
mana_file = open('mana_file.txt', 'w')
for skill in skills_dict:
if type(skills_dict[skill][1]) is list and len(skills_dict[skill][1]) < 20:
mana_file.write("--")
mana_file.write(active_skill_row(skill, skills_dict[skill]))
mana_file.write("===NO_COST===\n")
for skill_name in no_cost_list:
mana_file.write(skill_name + "\n")
mana_file.close()
| 3.125 | 3 |
aiortc/codecs/__init__.py | dsvictor94/aiortc | 1 | 12773624 | from ..rtcrtpparameters import RTCRtcpFeedback, RTCRtpCodecParameters
from .g711 import PcmaDecoder, PcmaEncoder, PcmuDecoder, PcmuEncoder
from .opus import OpusDecoder, OpusEncoder
from .vpx import VpxDecoder, VpxEncoder
PCMU_CODEC = RTCRtpCodecParameters(name='PCMU', clockRate=8000, channels=1, payloadType=0)
PCMA_CODEC = RTCRtpCodecParameters(name='PCMA', clockRate=8000, channels=1, payloadType=8)
MEDIA_CODECS = {
'audio': [
RTCRtpCodecParameters(name='opus', clockRate=48000, channels=2),
PCMU_CODEC,
PCMA_CODEC,
],
'video': [
RTCRtpCodecParameters(name='VP8', clockRate=90000, rtcpFeedback=[
RTCRtcpFeedback(type='nack'),
RTCRtcpFeedback(type='nack pli'),
])
]
}
def get_decoder(codec):
if codec.name == 'opus':
return OpusDecoder()
elif codec.name == 'PCMU':
return PcmuDecoder()
elif codec.name == 'PCMA':
return PcmaDecoder()
elif codec.name == 'VP8':
return VpxDecoder()
def get_encoder(codec):
if codec.name == 'opus':
return OpusEncoder()
elif codec.name == 'PCMU':
return PcmuEncoder()
elif codec.name == 'PCMA':
return PcmaEncoder()
elif codec.name == 'VP8':
return VpxEncoder()
| 2.21875 | 2 |
wbia/guitool/__init__.py | dylanirion/wildbook-ia | 6 | 12773625 | # -*- coding: utf-8 -*-
# flake8: noqa
__version__ = '2.0.1'
import logging
import utool as ut
ut.noinject(__name__, '[guitool.__init__]')
# try:
# # try seeing if importing plottool before any guitool things helps
# import wbia.plottool
# except Exception as ex:
# import utool as ut
# ut.printex(ex, 'tried to import wbia.plottool to solve win crash')
# raise
# #pass
# print('__guitool__1')
from wbia.guitool import __PYQT__
# print('__guitool__2')
from wbia.guitool import api_item_model
from wbia.guitool import api_table_view
from wbia.guitool import api_tree_view
from wbia.guitool import api_item_widget
from wbia.guitool import stripe_proxy_model
from wbia.guitool import guitool_tables
from wbia.guitool import guitool_dialogs
from wbia.guitool import guitool_decorators
from wbia.guitool import guitool_delegates
from wbia.guitool import guitool_components
from wbia.guitool import guitool_main
from wbia.guitool import guitool_misc
from wbia.guitool import qtype
import utool
print, rrr, profile = utool.inject2(__name__, '[guitool]')
logger = logging.getLogger('wbia')
def reload_subs():
"""Reloads utool and submodules """
rrr()
if hasattr(guitool_tables, 'rrr'):
guitool_tables.rrr()
if hasattr(guitool_dialogs, 'rrr'):
guitool_dialogs.rrr()
if hasattr(guitool_decorators, 'rrr'):
guitool_decorators.rrr()
if hasattr(guitool_main, 'rrr'):
guitool_main.rrr()
if hasattr(guitool_misc, 'rrr'):
guitool_misc.rrr()
if hasattr(api_item_model, 'rrr'):
api_item_model.rrr()
if hasattr(qtype, 'rrr'):
qtype.rrr()
if hasattr(guitool_components, 'rrr'):
guitool_components.rrr()
rrrr = reload_subs
IMPORT_TUPLES = [
('guitool_main', None),
('guitool_components', None),
('guitool_dialogs', None),
('guitool_decorators', None),
('guitool_misc', None),
('api_item_model', None),
('api_tree_view', None),
('api_table_view', None),
('qtype', None),
('stripe_proxy_model', None),
('filter_proxy_model', None),
]
"""
python -c "import wbia.guitool" --dump-guitool-init
python -c "import wbia.guitool" --update-guitool-init
"""
__DYNAMIC__ = not ut.get_argflag('--nodyn')
DOELSE = False
if __DYNAMIC__:
# TODO: import all utool external prereqs. Then the imports will not import
# anything that has already in a toplevel namespace
# COMMENTED OUT FOR FROZEN __INIT__
# Dynamically import listed util libraries and their members.
from utool._internal import util_importer
# FIXME: this might actually work with rrrr, but things arent being
# reimported because they are already in the modules list
ignore_endswith = ['_cyth']
ignore_list = ['Qt']
import_execstr = util_importer.dynamic_import(
__name__,
IMPORT_TUPLES,
ignore_endswith=ignore_endswith,
ignore_list=ignore_list,
verbose=False,
)
exec(import_execstr)
DOELSE = False
else:
# Do the nonexec import (can force it to happen no matter what if alwyas set
# to True)
DOELSE = True
# This screws up dynamic_import if it is placed before
from wbia.guitool.guitool_tables import *
from wbia.guitool.guitool_dialogs import *
from wbia.guitool.guitool_decorators import *
from wbia.guitool.guitool_delegates import *
from wbia.guitool.guitool_components import *
from wbia.guitool.guitool_main import *
from wbia.guitool.guitool_misc import *
from wbia.guitool.api_item_model import *
from wbia.guitool.api_table_view import *
from wbia.guitool.api_tree_view import *
from wbia.guitool.api_item_widget import *
from wbia.guitool.stripe_proxy_model import *
from wbia.guitool.filter_proxy_model import *
from wbia.guitool.qtype import *
if DOELSE:
pass
# <AUTOGEN_INIT>
from wbia.guitool import guitool_main
from wbia.guitool import guitool_components
from wbia.guitool import guitool_dialogs
from wbia.guitool import guitool_decorators
from wbia.guitool import guitool_misc
from wbia.guitool import api_item_model
from wbia.guitool import api_tree_view
from wbia.guitool import api_table_view
from wbia.guitool import qtype
from wbia.guitool import stripe_proxy_model
from wbia.guitool import filter_proxy_model
from wbia.guitool.guitool_main import (
GUITOOL_PYQT_VERSION,
GuitoolApplication,
IS_ROOT_WINDOW,
QAPP,
QUIET,
VERBOSE,
activate_qwindow,
ensure_qapp,
ensure_qtapp,
exit_application,
get_qtapp,
ping_python_interpreter,
qtapp_loop,
qtapp_loop_nonblocking,
remove_pyqt_input_hook,
)
from wbia.guitool.guitool_components import (
ALIGN_DICT,
BlockSignals,
ConfigConfirmWidget,
DEBUG_WIDGET,
GuiProgContext,
GuitoolWidget,
PROG_TEXT,
ProgHook,
ResizableTextEdit,
SimpleTree,
Spoiler,
WIDGET_BASE,
adjust_font,
fix_child_attr_heirarchy,
fix_child_size_heirarchy,
getAvailableFonts,
get_nested_attr,
get_widget_text_width,
layoutSplitter,
make_style_sheet,
msg_event,
newButton,
newCheckBox,
newComboBox,
newFont,
newFrame,
newLabel,
newLineEdit,
newMenu,
newMenuAction,
newMenubar,
newOutputLog,
newProgressBar,
newQPoint,
newScrollArea,
newSizePolicy,
newSplitter,
newTabWidget,
newTextEdit,
newToolbar,
newWidget,
print_widget_heirarchy,
prop_text_map,
rectify_qt_const,
walk_widget_heirarchy,
)
from wbia.guitool.guitool_dialogs import (
ResizableMessageBox,
SELDIR_CACHEID,
are_you_sure,
build_nested_qmenu,
connect_context_menu,
msgbox,
newDirectoryDialog,
newFileDialog,
popup_menu,
select_directory,
select_files,
select_images,
user_info,
user_input,
user_option,
user_question,
)
from wbia.guitool.guitool_decorators import (
DEBUG,
checks_qt_error,
signal_,
slot_,
)
from wbia.guitool.guitool_misc import (
ALT_KEY,
BlockContext,
GUILoggingHandler,
GUILoggingSender,
QLoggedOutput,
find_used_chars,
get_cplat_tab_height,
get_view_selection_as_str,
make_option_dict,
make_word_hotlinks,
)
from wbia.guitool.api_item_model import (
APIItemModel,
API_MODEL_BASE,
ChangeLayoutContext,
QVariantHack,
VERBOSE_MODEL,
default_method_decorator,
simple_thumbnail_widget,
updater,
)
from wbia.guitool.api_tree_view import (
APITreeView,
API_VIEW_BASE,
testdata_tree_view,
)
from wbia.guitool.api_table_view import APITableView
from wbia.guitool.qtype import (
ItemDataRoles,
LOCALE,
QLocale,
QString,
QT_BUTTON_TYPES,
QT_COMBO_TYPES,
QT_DELEGATE_TYPES,
QT_ICON_TYPES,
QT_IMAGE_TYPES,
QT_PIXMAP_TYPES,
QVariant,
SIMPLE_CASTING,
cast_from_qt,
cast_into_qt,
infer_coltype,
locale_float,
numpy_to_qicon,
numpy_to_qpixmap,
qindexinfo,
to_qcolor,
)
from wbia.guitool.stripe_proxy_model import (
STRIPE_PROXY_BASE,
STRIP_PROXY_META_CLASS,
STRIP_PROXY_SIX_BASE,
StripeProxyModel,
)
from wbia.guitool.filter_proxy_model import (
BASE_CLASS,
FilterProxyModel,
)
import utool
print, rrr, profile = utool.inject2(__name__, '[guitool]')
def reassign_submodule_attributes(verbose=1):
"""
Updates attributes in the __init__ modules with updated attributes
in the submodules.
"""
import sys
if verbose and '--quiet' not in sys.argv:
print('dev reimport')
# Self import
import wbia.guitool
# Implicit reassignment.
seen_ = set([])
for tup in IMPORT_TUPLES:
if len(tup) > 2 and tup[2]:
continue # dont import package names
submodname, fromimports = tup[0:2]
submod = getattr(guitool, submodname)
for attr in dir(submod):
if attr.startswith('_'):
continue
if attr in seen_:
# This just holds off bad behavior
# but it does mimic normal util_import behavior
# which is good
continue
seen_.add(attr)
setattr(guitool, attr, getattr(submod, attr))
def reload_subs(verbose=1):
""" Reloads guitool and submodules """
if verbose:
print('Reloading guitool submodules')
rrr(verbose > 1)
def wrap_fbrrr(mod):
def fbrrr(*args, **kwargs):
""" fallback reload """
if verbose > 0:
print('Auto-reload (using rrr) not setup for mod=%r' % (mod,))
return fbrrr
def get_rrr(mod):
if hasattr(mod, 'rrr'):
return mod.rrr
else:
return wrap_fbrrr(mod)
def get_reload_subs(mod):
return getattr(mod, 'reload_subs', wrap_fbrrr(mod))
get_rrr(guitool_main)(verbose > 1)
get_rrr(guitool_components)(verbose > 1)
get_rrr(guitool_dialogs)(verbose > 1)
get_rrr(guitool_decorators)(verbose > 1)
get_rrr(guitool_misc)(verbose > 1)
get_rrr(api_item_model)(verbose > 1)
get_rrr(api_tree_view)(verbose > 1)
get_rrr(api_table_view)(verbose > 1)
get_rrr(qtype)(verbose > 1)
get_rrr(stripe_proxy_model)(verbose > 1)
get_rrr(filter_proxy_model)(verbose > 1)
rrr(verbose > 1)
try:
# hackish way of propogating up the new reloaded submodule attributes
reassign_submodule_attributes(verbose=verbose)
except Exception as ex:
print(ex)
rrrr = reload_subs
# </AUTOGEN_INIT>
| 1.875 | 2 |
tests/test_core/test_clientcontroller.py | dls-controls/github-publish-test | 0 | 12773626 | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import setup_malcolm_paths
from collections import OrderedDict
import unittest
from mock import MagicMock, patch, call
# logging
# import logging
# logging.basicConfig(level=logging.DEBUG)
# module imports
from malcolm.controllers.defaultcontroller import DefaultController
from malcolm.core import Attribute, ClientController
from malcolm.core.vmetas import StringMeta, NumberMeta
from malcolm.compat import queue
from malcolm.parts.demo import HelloPart
class TestClientController(unittest.TestCase):
def setUp(self):
p = MagicMock()
part = HelloPart(p, None)
# Serialized version of the block we want
source = DefaultController(
"blockname", p, parts={"hello":part}).block
self.serialized = source.to_dict()
# Setup client controller prerequisites
self.p = MagicMock()
self.p.name = "process"
self.comms = MagicMock()
self.cc = ClientController("blockname", self.p)
self.b = self.cc.block
# get process to give us comms
self.p.get_client_comms.return_value = self.comms
# tell our controller which blocks the process can talk to
response = MagicMock(id=self.cc.REMOTE_BLOCKS_ID, value=["blockname"])
self.cc.put(response)
# tell our controller the serialized state of the block
response = MagicMock(id=self.cc.BLOCK_ID, changes=[[[], self.serialized]])
self.cc.put(response)
def test_init(self):
self.assertEqual(self.p.q.put.call_count, 1)
req = self.p.q.put.call_args[0][0]
self.assertEqual(req.typeid, "malcolm:core/Subscribe:1.0")
self.assertEqual(req.endpoint, [self.p.name, "remoteBlocks", "value"])
self.assertEqual(req.response_queue, self.cc)
self.p.get_client_comms.assert_called_with("blockname")
self.assertEqual(self.comms.q.put.call_count, 1)
req = self.comms.q.put.call_args[0][0]
self.assertEqual(req.typeid, "malcolm:core/Subscribe:1.0")
self.assertEqual(req.delta, True)
self.assertEqual(req.response_queue, self.cc)
self.assertEqual(req.endpoint, ["blockname"])
def test_methods_created(self):
self.assertEqual(list(self.b), [
'meta', 'state', 'status', 'busy', 'disable', 'reset', 'say_hello'])
m = self.b["say_hello"]
self.assertEqual(list(m.takes.elements), ["name", "sleep"])
self.assertEqual(type(m.takes.elements["name"]), StringMeta)
self.assertEqual(type(m.takes.elements["sleep"]), NumberMeta)
self.assertEqual(list(m.returns.elements), ["greeting"])
self.assertEqual(type(m.returns.elements["greeting"]), StringMeta)
self.assertEqual(m.defaults, dict(sleep=0))
def test_call_method(self):
self.p.create_queue.return_value = queue.Queue()
def f(request):
request.respond_with_return(dict(
greeting="Hello %s" % request.parameters["name"]))
self.comms.q.put.side_effect = f
ret = self.b.say_hello(name="me")
self.assertEqual(ret.greeting, "Hello me")
def test_put_update_response(self):
m = MagicMock(spec=Attribute)
self.b.replace_endpoints(dict(child=m))
response = MagicMock(
id=self.cc.BLOCK_ID,
changes=[[["child", "value"], "change"]])
self.cc.put(response)
m.set_value.assert_called_once_with("change", notify=False)
def test_put_root_update_response(self):
attr1 = StringMeta("dummy").make_attribute()
attr2 = StringMeta("dummy2").make_attribute()
new_block_structure = OrderedDict(typeid='malcolm:core/Block:1.0')
new_block_structure["attr1"] = attr1.to_dict()
new_block_structure["attr2"] = attr2.to_dict()
response = MagicMock(
id=self.cc.BLOCK_ID,
changes=[[[], new_block_structure]])
self.cc.put(response)
self.assertEqual(self.b.to_dict(), new_block_structure)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 2.328125 | 2 |
writeDzn.py | alejandrohn/SCI-generator | 0 | 12773627 | <filename>writeDzn.py
from printElement import PrintElement
TAnswer = '%number of attributes'
KAnswer = '%maximum size of support set'
NAnswer = '%number of positive instances'
MAnswer = '%number of negative instances'
CAnswer = '%number of atMostOne Constraints'
class ManageWriteDzn:
global f
def __init__(self):
self.printElement = PrintElement()
def writeDzn(self, fileName, answer, omegap, omegan, atMostOne):
self.openFile(fileName)
self.writeParams(answer)
self.writeLineBreak(1)
self.writeOmegap(omegap)
self.writeLineBreak(2)
self.writeOmegan(omegan)
self.writeLineBreak(4)
self.writeAtMostOne(atMostOne)
self.f.close()
def writeLine(self, attribute, text, symbol):
self.f.write(symbol + '=' + attribute.__str__() + '; ' + text + '\n')
def writeOmegap(self, omegap):
self.f.write(
'omegap = ' + self.printElement.printMatrixWithPipe(omegap))
def writeOmegan(self, omegan):
self.f.write(
'omegan = ' + self.printElement.printMatrixWithPipe(omegan))
def writeParams(self, answer):
self.writeLine(answer.numberAttributeT, TAnswer, 't')
self.writeLine(answer.maximumSizeSetK, KAnswer, 'k')
self.writeLine(answer.positiveInstancesN, NAnswer, 'n')
self.writeLine(answer.negativeInstancesM, NAnswer, 'm')
self.writeLine(answer.atMostConstraintsC, CAnswer, 'c')
def writeLineBreak(self, number):
for i in range(0, number):
self.f.write('\n')
def openFile(self, fileName):
self.f = open('models/' + fileName + '.dzn', "w+")
def writeAtMostOne(self, atmostone):
self.f.write('atMostOne = ' +
self.printElement.printMatrixWithBrace1(atmostone))
| 2.78125 | 3 |
coffee/ik/ik_solver.py | kevinzakka/coffee | 18 | 12773628 | # Some implementation details regarding PyBullet:
#
# PyBullet's IK solver uses damped least squares (DLS) optimization. This is commonly
# known as Levenberg-Marquardt (LM) optimization.
from __future__ import annotations
import dataclasses
from typing import NamedTuple, Optional
import numpy as np
import pybullet as p
from dm_robotics.geometry.geometry import Pose
from dm_robotics.transformations import transformations as tr
from coffee.client import BulletClient, ClientConfig, ConnectionMode
from coffee.joints import Joints
from coffee.structs import LinkState
from coffee.utils import geometry_utils
class IKSolution(NamedTuple):
"""An IK solution returned by the IKSolver.
Attributes:
qpos: The joint configuration.
linear_err: The linear error between the solved pose and the target pose.
angular_err: The angular error between the solved pose and the target pose.
"""
qpos: np.ndarray
linear_err: float
angular_err: float
@dataclasses.dataclass
class IKSolver:
"""Inverse kinematics solver.
Computes a joint configuration that brings an element (in a kinematic chain) to a
desired pose.
"""
pb_client: BulletClient
joints: Joints
ik_point_joint_id: int
joint_damping: float = 0.0
nullspace_reference: Optional[np.ndarray] = None
def __post_init__(self) -> None:
if self.nullspace_reference is None:
self.nullspace_reference = 0.5 * np.sum(self.joints.joints_range, axis=1)
# Dirty hack to get around pybullet's lack of support for computing FK given a
# joint configuration as an argument.
# See: https://github.com/bulletphysics/bullet3/issues/2603
self._shadow_client = BulletClient.create(
mode=ConnectionMode.DIRECT,
config=ClientConfig(),
)
manipulator_kwargs = self.pb_client._body_cache[self.joints.body_id]
shadow_body_id = self._shadow_client.load_urdf(**manipulator_kwargs)
# Make sure the shadow robot is in the same world pose as the actual one.
pos, quat = self.pb_client.getBasePositionAndOrientation(self.joints.body_id)
self._shadow_client.resetBasePositionAndOrientation(
bodyUniqueId=shadow_body_id,
posObj=pos,
ornObj=quat,
)
self._shadow_joints = Joints.from_body_id(shadow_body_id, self._shadow_client)
def solve(
self,
ref_pose: Pose,
linear_tol: float = 1e-3,
angular_tol: float = 1e-3,
max_steps: int = 100,
num_attempts: int = 50,
stop_on_first_successful_attempt: bool = False,
inital_joint_configuration: Optional[np.ndarray] = None,
nullspace_reference: Optional[np.ndarray] = None,
verbose: bool = False,
) -> Optional[np.ndarray]:
"""Attempts to solve the inverse kinematics problem.
This method computes a joint configuration that solves the IK problem. It
returns None if no solution is found. If multiple solutions are
found, it will return the one where the joints are closer to the
`nullspace_reference`. If no `nullspace_reference is provided, it will use the
center of the joint ranges as reference.
Args:
ref_pose: Target pose of the controlled element in Cartesian world frame.
linear_tol: The linear tolerance, in meters, that determines if the solution
found is valid.
angular_tol: The angular tolerance, in radians, that determines if the
solution found is valid.
max_steps:
num_attempts: The number of different attempts the solver should do. For a
given target pose, there exists an infinite number of possible
solutions, having more attempts allows to compare different joint
configurations. The solver will return the solution where the joints are
closer to the `nullspace_reference`. Note that not all attempts are
successful, and thus, having more attempts gives better chances of
finding a correct solution.
stop_on_first_successful_attempt: If true, the method will return the
first solution that meets the tolerance criteria. If false, returns the
solution where the joints are closer to `nullspace_reference`.
inital_joint_configuration: A joint configuration that will be used for
the first attempt. This can be useful in the case of a complex pose,
a user could provide the initial guess that is close to the desired
solution. If None, all the joints will be set to 0 for the first
attempt.
nullspace_reference: The desired joint configuration that is set as the
nullspace goal. When the controlled element is in the desired pose, the
solver will try and bring the joint configuration closer to the
nullspace reference without moving the element. If no nullspace
reference is provided, the center of the joint ranges is used as
reference. Can be overriden in the `solve` method.
Returns:
The corresponding joint configuration if a solution is found, else None.
Raises:
ValueError: If the `nullspace_reference` does not have the correct length.
ValueError: If the `inital_joint_configuration` does not have the correct
length.
"""
if nullspace_reference is None:
nullspace_reference = self.nullspace_reference
else:
if len(nullspace_reference) != self.joints.dof:
raise ValueError("nullspace_reference has an invalid length.")
if inital_joint_configuration is None:
inital_joint_configuration = self.joints.zeros_array()
else:
inital_joint_configuration = np.array(inital_joint_configuration)
if len(inital_joint_configuration) != self.joints.dof:
raise ValueError("inital_joint_configuration has an invalid length.")
nullspace_jnt_qpos_min_err = np.inf
sol_qpos = None
success = False
# Each iteration of this loop attempts to solve the inverse kinematics.
# If a solution is found, it is compared to previous solutions.
for attempt in range(num_attempts):
# Use the user provided joint configuration for the first attempt.
if attempt == 0:
qpos_new = inital_joint_configuration
else:
# Randomize the initial joint configuration so that the IK can find
# a different solution.
qpos_new = np.random.uniform(
low=self.joints.joints_lower_limit,
high=self.joints.joints_upper_limit,
)
# Reset the joints to this configuration.
for i, joint_id in enumerate(self._shadow_joints.controllable_joints):
self._shadow_client.resetJointState(
self._shadow_joints.body_id,
joint_id,
qpos_new[i],
)
# Solve the IK.
joint_qpos, linear_err, angular_err = self._solve_ik(
ref_pose,
max_steps,
verbose,
)
# Check if the attempt was successful. The solution is saved if the
# joints are closer to the nullspace reference.
if linear_err <= linear_tol and angular_err <= angular_tol:
success = True
nullspace_jnt_qpos_err = float(
np.linalg.norm(joint_qpos - nullspace_reference)
)
if nullspace_jnt_qpos_err < nullspace_jnt_qpos_min_err:
nullspace_jnt_qpos_min_err = nullspace_jnt_qpos_err
sol_qpos = joint_qpos
if verbose:
print(
f"attempt: {attempt} "
f"- nullspace_jnt_qpos_min_err: {nullspace_jnt_qpos_min_err:.4f} "
f"- success: {success}"
)
if success and stop_on_first_successful_attempt:
break
if not success:
print(f"Unable to solve inverse kinematics for ref_pose: {ref_pose}")
else:
if verbose:
print(f"Found a solution in {attempt} attempts.")
return sol_qpos
def _solve_ik(
self,
ref_pose: Pose,
max_steps: int,
verbose: bool,
) -> IKSolution:
"""Finds a joint configuration that brings element pose to target pose."""
try:
qpos = self._shadow_client.calculateInverseKinematics(
bodyUniqueId=self._shadow_joints.body_id,
endEffectorLinkIndex=self.ik_point_joint_id,
targetPosition=ref_pose.position,
targetOrientation=geometry_utils.as_quaternion_xyzw(
ref_pose.quaternion
),
residualThreshold=1e-5,
maxNumIterations=max_steps,
jointDamping=self._shadow_joints.const_array(
self.joint_damping
).tolist(),
)
if np.isnan(np.sum(qpos)):
qpos = None
else:
# Clip to joint limits.
qpos = np.clip(
a=qpos,
a_min=self._shadow_joints.joints_lower_limit,
a_max=self._shadow_joints.joints_upper_limit,
)
except p.error as e:
if verbose:
print(f"IK failed with error message: {e}")
qpos = None
# If we were unable to find a solution, exit early.
if qpos is None:
return IKSolution(np.empty(self._shadow_joints.dof), np.inf, np.inf)
# If we found a solution, we compute its associated pose and compare with the
# target pose. We do this by first using forward kinematics to compute the
# pose of the controlled element associated with the solution and then computing
# linear and angular errors.
# Forward kinematics.
for i, joint_id in enumerate(self._shadow_joints.controllable_joints):
self._shadow_client.resetJointState(
self._shadow_joints.body_id,
joint_id,
qpos[i],
)
cur_pose = self.forward_kinematics(shadow=True)
# Error computation.
linear_err = float(np.linalg.norm(ref_pose.position - cur_pose.position))
err_quat = tr.quat_diff_active(ref_pose.quaternion, cur_pose.quaternion)
err_axis_angle = tr.quat_to_axisangle(err_quat)
angular_err = float(np.linalg.norm(err_axis_angle))
return IKSolution(np.array(qpos), linear_err, angular_err)
def forward_kinematics(self, shadow: bool = False) -> Pose:
if shadow:
eef_link_state = LinkState(
*self._shadow_client.getLinkState(
bodyUniqueId=self._shadow_joints.body_id,
linkIndex=self.ik_point_joint_id,
computeLinkVelocity=0,
computeForwardKinematics=True,
)
)
else:
eef_link_state = LinkState(
*self.pb_client.getLinkState(
bodyUniqueId=self.joints.body_id,
linkIndex=self.ik_point_joint_id,
computeLinkVelocity=0,
computeForwardKinematics=True,
)
)
return Pose(
position=eef_link_state.link_world_position,
quaternion=geometry_utils.as_quaternion_wxyz(
eef_link_state.link_world_orientation
),
)
| 2.40625 | 2 |
tests/test_backends/test_base.py | ColinDuquesnoy/QCrash | 30 | 12773629 | from qcrash.backends.base import BaseBackend
from qcrash.formatters.email import EmailFormatter
import pytest
def test_qsettings():
b = BaseBackend(None, '', '', None)
assert b.qsettings() is not None
def test_set_formatter():
b = BaseBackend(None, '', '', None)
assert b.formatter is None
b.set_formatter(EmailFormatter("test"))
assert isinstance(b.formatter, EmailFormatter)
def test_send_report():
b = BaseBackend(None, '', '', None)
with pytest.raises(NotImplementedError):
b.send_report('', '')
| 2.359375 | 2 |
2020/day_06/one.py | zigapk/adventofcode | 0 | 12773630 | count = 0
current_group = set()
with open('in', 'r') as f:
for line in f.readlines():
l = line.strip()
if len(l) == 0:
# this is end of the last group
count += len(current_group)
current_group = set()
else:
# add answers to the current group
for chr in l:
current_group.add(chr)
# don't forget the last group
count += len(current_group)
print(count)
| 3.671875 | 4 |
flydra_analysis/flydra_analysis/a2/stimulus_positions.py | elhananby/flydra | 45 | 12773631 | <gh_stars>10-100
stim_positions = {
"double": [
[(0.4584, 0.2575, 0.2038), (0.4612, 0.2690, -0.0283)], # 3d location
[(0.4601, 0.1549, 0.1937), (0.4614, 0.1660, -0.0358)],
],
"double_20070301": [
[(0.4538, 0.2740, 0.1994), (0.4565, 0.2939, -0.0531)], # top highy
[(0.4516, 0.1642, 0.1872), (0.4541, 0.1767, -0.0606)], # top lowy
],
"half": [[(0.4567, 0.2029, 0.1958), (0.4581, 0.2166, -0.0329)],],
"half_20070303": [[(0.4628, 0.2066, 0.1920), (0.4703, 0.2276, -0.0555)]],
"tall": [[(0.4562, 0.1951, 0.2798), (0.4542, 0.2097, -0.0325)],],
##from 20061205:
##tall=[( 456.2, 195.1, 279.8),
## ( 454.2, 209.7,-32.5)]
##from 20061212:
##short=[( 461.4, 204.2, 128.1),
## ( 462.5, 205.3, 114.4)]
##from 20061219:
##necklace = [( 455.9, 194.4, 262.8),
## ( 456.2, 212.8,-42.2)]
## 'no post (smoothed)' : [( .4562, .1951, .2798),
## ( .4542, .2097,-.0325)],
"short": [[(0.4614, 0.2042, 0.1281), (0.4625, 0.2053, 0.1144)]],
"necklace": [[(0.4559, 0.1944, 0.2628), (0.4562, 0.2128, -0.0422)]],
None: [],
}
| 1.335938 | 1 |
tests/models/mantis_rcnn/test_mantis_rcnn_preds.py | ramaneswaran/mantisshrimp | 0 | 12773632 | <gh_stars>0
import pytest, torch
from mantisshrimp import *
@pytest.fixture(scope="module")
def faster_rcnn_batch():
dataset = test_utils.sample_dataset()
dataloader = MantisFasterRCNN.dataloader(dataset, batch_size=2)
xb, yb = next(iter(dataloader))
return xb, list(yb)
@pytest.fixture(scope="module")
def mask_rcnn_batch():
dataset = test_utils.sample_dataset()
dataloader = MantisMaskRCNN.dataloader(dataset, batch_size=2)
xb, yb = next(iter(dataloader))
return xb, list(yb)
@pytest.fixture(scope="module")
def batch(request):
return request.getfixturevalue(request.param)
@pytest.fixture()
def model_class(request):
if request.param == "faster":
return MantisFasterRCNN
if request.param == "mask":
return MantisMaskRCNN
@pytest.fixture()
def assert_model_preds(request):
expected = {
"loss_box_reg",
"loss_rpn_box_reg",
"loss_objectness",
"loss_classifier",
}
if request.param == "mask":
expected.add("loss_mask")
def _inner(model, batch):
with torch.no_grad():
preds = model.forward(*batch)
assert set(preds.keys()) == expected
return _inner
@pytest.mark.parametrize(
"model_class, batch, assert_model_preds",
[("faster", "faster_rcnn_batch", "faster"), ("mask", "mask_rcnn_batch", "mask")],
indirect=True,
)
def test_rcnn_simple_backbone(model_class, simple_backbone, batch, assert_model_preds):
model = model_class(num_classes=91, backbone=simple_backbone)
assert_model_preds(model, batch)
@pytest.mark.skip
@pytest.mark.slow
@pytest.mark.parametrize(
"model_class, batch, assert_model_preds",
[("faster", "faster_rcnn_batch", "faster"), ("mask", "mask_rcnn_batch", "mask")],
indirect=True,
)
def test_rcnn_default_backbone(model_class, batch, assert_model_preds):
model = model_class(num_classes=91)
assert_model_preds(model, batch)
@pytest.mark.skip
@pytest.mark.slow
@pytest.mark.parametrize(
"model_class, batch, assert_model_preds",
[("faster", "faster_rcnn_batch", "faster"), ("mask", "mask_rcnn_batch", "mask")],
indirect=True,
)
@pytest.mark.parametrize("pretrained", [False, True])
@pytest.mark.parametrize(
"backbone, fpn",
[
# ("mobilenet", False),
# ("vgg11", False),
# ("vgg13", False),
# ("vgg16", False),
# ("vgg19", False),
("resnet18", False),
("resnet18", True),
# ("resnet34", False),
# ("resnet34", True),
# ("resnet50", False),
# ("resnet50", True),
# these models are too big for github runners
# "resnet101",
# "resnet152",
# "resnext101_32x8d",
],
)
def test_mask_rcnn_backbones(
model_class, batch, assert_model_preds, backbone, fpn, pretrained
):
backbone = model_class.get_backbone_by_name(
name=backbone, fpn=fpn, pretrained=pretrained
)
model = model_class(num_classes=91, backbone=backbone)
assert_model_preds(model, batch)
| 2.125 | 2 |
code/shield/enableConfigure/lambda/index.py | aws-samples/aws-shield-advanced-rapid-deployment | 1 | 12773633 | import sys
sys.path.insert(0,'./shield/enableConfigure/lambda')
import json
import boto3
import os
import botocore
import urllib3
import cfnresponse
import logging
logger = logging.getLogger('hc')
logger.setLevel('DEBUG')
shield_client = boto3.client('shield')
iam_client = boto3.client('iam')
s3_client = boto3.client('s3')
#Get Shield Config Values and Options
enabledProactiveEngagement = os.environ['EnabledProactiveEngagement']
enableDRTAccess = os.environ['EnableDRTAccess']
emergencyContactCount = os.environ['EmergencyContactCount']
accountId = os.environ['AccountId']
#Build Emergency Contact List
def lambda_handler(event, context):
logger.debug(event)
responseData = {}
if "RequestType" in event:
if event['RequestType'] in ['Create','Update']:
try:
shield_client.create_subscription()
logger.info ("Shield Enabled!")
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == 'ResourceAlreadyExistsException':
logger.info ("Subscription already active")
else:
logger.error(error.response['Error'])
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "SubscribeFailed")
return ()
else:
responseData = {}
cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CFNDeleteGracefulContinue")
return()
try:
emergencyContactList = []
emergencyContactList.append({
"EmailAddress": os.environ['EmergencyContactEmail1'],
"PhoneNumber": os.environ['EmergencyContactPhone1']
})
if emergencyContactCount == 2:
emergencyContactList.append({
"EmailAddress": os.environ['EmergencyContactEmail2'],
"PhoneNumber": os.environ['EmergencyContactPhone2']
})
except KeyError as error:
responseData = {}
responseData['Error'] = "KeyError for: " + error
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "BuildContactListFailed")
return ()
#Activate Shield Subscription
#Create DRT Role if needed
try:
iam_role_response = iam_client.get_role(
RoleName='AWSSRTAccess'
)
roleArn = iam_role_response['Role']['Arn']
logger.debug ("AWS SRTAccess already exists")
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == 'NoSuchEntity':
try:
iam_role_response = iam_client.create_role(
RoleName='AWSSRTAccess',
AssumeRolePolicyDocument='{"Version":"2012-10-17","Statement":[{"Sid":"","Effect":"Allow","Principal":{"Service":"drt.shield.amazonaws.com"},"Action":"sts:AssumeRole"}]}',
MaxSessionDuration=3600,
)
roleArn = iam_role_response['Role']['Arn']
except:
logger.error(error.response['Error'])
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "CreateDRTRoleFailed")
return ()
else:
logger.error(error.response['Error'])
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "SRTRolePolicyConfigFailed")
return ()
#Ensure DRT Policy Attached to Role
try:
logger.info("Listing attached role policies for AWSSRTAccess role.")
iam_response = iam_client.list_attached_role_policies(
RoleName='AWSSRTAccess'
)
policyList = []
for p in iam_response['AttachedPolicies']:
policyList.append(p['PolicyName'])
if 'AWSShieldDRTAccessPolicy' not in policyList:
logger.info("Required Policy not attached to role, attaching")
response = iam_client.attach_role_policy(
RoleName='AWSSRTAccess',
PolicyArn='arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy'
)
else:
logger.debug ("Required Policy Already attached")
except botocore.exceptions.ClientError as error:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "SRTRolePolicyConfigFailed")
return ()
if enableDRTAccess == 'true':
try:
logger.info("Associating DRT role.")
shield_response = shield_client.associate_drt_role(
RoleArn=roleArn
)
except botocore.exceptions.ClientError as error:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "SRTEnablementFailed")
return ()
else:
try:
logger.info("Describing DRT access.")
shield_drt_response = shield_client.describe_drt_access()
if 'RoleArn' in shield_drt_response:
logger.info("Disassociating DRT role.")
shield_drt_response = shield_client.disassociate_drt_role()
except botocore.exceptions.ClientError as error:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "SRTDisableFailed")
return ()
try:
logger.info("Updating emergency contact settings.")
shield_response = shield_client.update_emergency_contact_settings(
EmergencyContactList=emergencyContactList
)
logger.debug(shield_response)
except botocore.exceptions.ClientError as error:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "EmergencyContactUpdateFailed")
return ()
if enabledProactiveEngagement == 'true':
try:
logger.info("Enabling proactive engagement.")
shield_response = shield_client.enable_proactive_engagement()
logger.info("Associating proactive engagement details.")
shield_client.associate_proactive_engagement_details(
EmergencyContactList=emergencyContactList)
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == 'InvalidOperationException':
logger.info("ProactiveEngagementAlreadyEnabled")
elif error.response['Error']['Code'] == 'InvalidParameterException':
logger.info("Error Enabling Proactive Support, continue regardless")
else:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "ProactiveEngagementEnableFailed")
return ()
else:
try:
logger.info("Disabling proactive engagement.")
shield_response = shield_client.disable_proactive_engagement()
except botocore.exceptions.ClientError as error:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "ProactiveEngagementEnableFailed")
return ()
responseData = {}
cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "ConfigureShieldAdvancedSucceesful")
return()
| 1.890625 | 2 |
report_builder_demo/demo_second_app/admin.py | nazmizorlu/django-report-builder | 560 | 12773634 | from django.contrib import admin
from .models import Bar
@admin.register(Bar)
class BarAdmin(admin.ModelAdmin):
pass
| 1.21875 | 1 |
datasets/prepare_ytbvos.py | svip-lab/IVOS-W | 26 | 12773635 | import os
import json
import shutil
import argparse
import numpy as np
from PIL import Image
def getSeqInfo(dataset_dir, seq):
ann_dir = os.path.join(dataset_dir, 'Annotations', '480p')
seq_path = os.path.join(ann_dir, seq)
frame_list = os.listdir(seq_path)
frame_num = len(frame_list)
frames = os.listdir(os.path.join(ann_dir, seq))
masks = np.stack([np.array(Image.open(os.path.join(ann_dir, seq, f)).convert('P'), dtype=np.uint8) for f in frames])
img_size = [masks.shape[1], masks.shape[0]]
obj_ids = np.delete(np.unique(masks), 0)
return frame_num, img_size, len(obj_ids)
def create_json(root_dir):
val_txt_dst = os.path.join(root_dir, 'ImageSets', '2017', 'val.txt')
with open(val_txt_dst, 'r') as f:
val_seqs = f.readlines()
f.close()
val_seqs = list(map(lambda elem: elem.strip(), val_seqs))
# create davis.json
'''Generate global json'''
json_dict = dict()
json_dict['attributes'] = []
json_dict['sets'] = ["train", "val"]
json_dict['years'] = [2018]
json_dict['sequences'] = dict()
for idx, seq in enumerate(val_seqs):
seq = seq.strip()
seq_dict = {'attributes': [], 'eval_t': True, 'name': seq, 'set': 'val', 'year': 2018, 'num_scribbles': 3}
seq_dict['num_frames'], seq_dict['image_size'], seq_dict['num_objects'] = getSeqInfo(root_dir, seq)
json_dict['sequences'][seq] = seq_dict
print(f'valid: {idx+1}')
global_json_path = os.path.join(root_dir, 'scb_ytbvos.json')
with open(global_json_path, 'wt') as f:
json.dump(json_dict, f, indent=2, separators=(',', ': '))
def create_dataset(src_ytbvos_path, dst_ytbvos_path, scb_ytbvos_path):
if os.path.exists(src_ytbvos_path):
os.makedirs(dst_ytbvos_path, exist_ok=True)
# set youtube original path
src_dir_JPEGImages = os.path.join(src_ytbvos_path, 'train', 'JPEGImages')
src_dir_Annotations = os.path.join(src_ytbvos_path, 'train', 'CleanedAnnotations')
# set youtube davis-like path
dst_dir_ImageSets = os.path.join(dst_ytbvos_path, 'ImageSets', '2017')
dst_dir_JPEGImages = os.path.join(dst_ytbvos_path, 'JPEGImages', '480p')
dst_dir_Annotations = os.path.join(dst_ytbvos_path, 'Annotations', '480p')
dst_dir_Scribbles = os.path.join(dst_ytbvos_path, 'Scribbles')
if os.path.isdir(src_dir_JPEGImages) and os.path.isdir(src_dir_Annotations) and os.path.isdir(scb_ytbvos_path):
# load sequence list
assert len(os.listdir(src_dir_JPEGImages)) == len(os.listdir(src_dir_Annotations))
with open(os.path.join(scb_ytbvos_path, 'val.txt'), 'r') as f:
seqs_list = f.readlines()
f.close()
seqs_list = list(map(lambda elem: elem.strip(), seqs_list))
else:
if not os.path.isdir(src_dir_JPEGImages): print(f"{src_dir_JPEGImages} is not found in {src_ytbvos_path}")
if not os.path.isdir(src_dir_Annotations): print(f"{src_dir_Annotations} is not found in {src_ytbvos_path}")
if not os.path.isdir(scb_ytbvos_path): print(f"{scb_ytbvos_path} is not found")
return
# create dist dirs
os.makedirs(dst_dir_ImageSets, exist_ok=True)
os.makedirs(dst_dir_JPEGImages, exist_ok=True)
os.makedirs(dst_dir_Annotations, exist_ok=True)
os.makedirs(dst_dir_Scribbles, exist_ok=True)
# --- copy files ---
# ImageSets
shutil.copyfile(os.path.join(scb_ytbvos_path, 'val.txt'), os.path.join(dst_dir_ImageSets, 'val.txt'))
len_seq = []
for i, seq in enumerate(seqs_list):
print(f"validation set {i+1}")
# JPEGImages
src_dir_JPEGImages_seq = os.path.join(src_dir_JPEGImages, seq)
dst_dir_JPEGImages_seq = os.path.join(dst_dir_JPEGImages, seq)
os.makedirs(dst_dir_JPEGImages_seq, exist_ok=True)
file_name = np.sort(os.listdir(src_dir_JPEGImages_seq))
for j, file in enumerate(file_name):
src_path = os.path.join(src_dir_JPEGImages_seq, file)
dst_path = os.path.join(dst_dir_JPEGImages_seq, f"{str(j).zfill(5)}.jpg")
if not os.path.exists(dst_path): shutil.copyfile(src_path, dst_path)
# if not os.path.exists(dst_path): os.symlink(src_path, dst_path)
# Annotations
src_dir_Annotations_seq = os.path.join(src_dir_Annotations, seq)
dst_dir_Annotations_seq = os.path.join(dst_dir_Annotations, seq)
os.makedirs(dst_dir_Annotations_seq, exist_ok=True)
file_name = np.sort(os.listdir(src_dir_Annotations_seq))
for j, file in enumerate(file_name):
src_path = os.path.join(src_dir_Annotations_seq, file)
dst_path = os.path.join(dst_dir_Annotations_seq, f"{str(j).zfill(5)}.png")
if not os.path.exists(dst_path): shutil.copyfile(src_path, dst_path)
# if not os.path.exists(dst_path): os.symlink(src_path, dst_path)
# Scribbles
src_dir_Scribbles_seq = os.path.join(scb_ytbvos_path, seq)
dst_dir_Scribbles_seq = os.path.join(dst_dir_Scribbles, seq)
os.makedirs(dst_dir_Scribbles_seq, exist_ok=True)
file_name = np.sort(os.listdir(src_dir_Scribbles_seq))
for j, file in enumerate(file_name):
src_path = os.path.join(src_dir_Scribbles_seq, file)
dst_path = os.path.join(dst_dir_Scribbles_seq, file)
if not os.path.exists(dst_path): shutil.copyfile(src_path, dst_path)
# statistic
file_name = np.sort(os.listdir(src_dir_JPEGImages_seq))
len_seq.append(len(file_name))
# create sequences information
create_json(dst_ytbvos_path)
print(f"done")
else:
print(f"{src_ytbvos_path} not existed")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src', type=str, required=True)
parser.add_argument('--scb', type=str, required=True)
parser.add_argument('--dst', type=str, default='data/Scribble_Youtube_VOS')
args = parser.parse_args()
src_ytbvos_path = args.src
dst_ytbvos_path = args.dst
scb_ytbvos_path = args.scb
create_dataset(src_ytbvos_path, dst_ytbvos_path, scb_ytbvos_path)
if __name__ == '__main__':
main() | 2.3125 | 2 |
fhireval/test_suite/search/test_mod_prefix.py | ncpi-fhir/ncpi-fhir-platform-evaluation | 0 | 12773636 | <filename>fhireval/test_suite/search/test_mod_prefix.py
import pytest
import pdb
from fhir_walk.model.patient import Patient
test_id = f"{'2.2.1.2':<10} - Search Modifiers and Prefix"
test_weight = 15
# TODO Write some tests
def test_search_by_date(host):
assert 0 == 1, "TODO - Write Test"
def test_search_by_value(host):
assert 0 == 1, "TODO - Write Test"
def test_search_by_quantity(host):
assert 0 == 1, "TODO - Write Test"
def test_search_by_token(host):
assert 0 == 1, "TODO - Write Test"
| 2.265625 | 2 |
01_Python by Example Learning to Program in 150 Challenges by Nichola Lacey/03_chapter three strings/problem26.py | Magdyedwar1996/python-level-one-codes | 1 | 12773637 | """
026
Pig Latin takes the first consonant of a word,
moves it to the end of the word and adds on an
“ay”. If a word begins with a vowel you just add
“way” to the end. For example, pig becomes igpay,
banana becomes ananabay, and aadvark becomes
aadvarkway. Create a program that will ask the
user to enter a word and change it into Pig Latin.
Make sure the new word is displayed in lower case
"""
word = input("enter a word plz : ")
vowels = ["a", "A", "i", "I", "e" , "E","O", "o","u","U"]
if word[0] in vowels :
print(word +"way")
else :
length = len(word)
newword = word[1:length]
print(newword + word[0]+"ay") | 4.3125 | 4 |
src/graph_visualize.py | sawyerWeld/PrefGAN | 0 | 12773638 | #graph_visualize.py
from graphviz import Digraph
import pairwise
# Given a vector of the form generated in pairwise.py for
# easy reading into NNs, produce a diagram of the represented graph
def vec_to_graph(vec, name='no_name_graph', save=False, fromTorch=True):
matrix = None
if fromTorch:
matrix = pairwise.vec_to_matrix(vec.numpy())
else:
matrix = pairwise.vec_to_matrix(vec)
n_cands = len(matrix[0])
dot = Digraph(comment='Preference Graph',format='png')
# init nodes
for i, row in enumerate(matrix):
dot.node(chr(i+97), 'alt {}'.format(i+1))
# init edges
for i, row in enumerate(matrix):
# only care about the upper triangluar part
li = row[i+1:]
for j, alt in enumerate(li):
# math got confusing
a = i+1
b = i+j+2
p_a = chr(a+96)
p_b = chr(b+96)
if alt == 1:
dot.edge(p_a, p_b)
elif alt == -1:
dot.edge(p_b, p_a)
file_output = '../diagrams/graph_views/{}'.format(name)
if save:
dot.render(file_output,view=False)
return dot
def vote_to_graph(vote, name='no_name_graph', save=False):
if 0 in vote:
raise Exception('There should be no 0 values in vote vector')
return vec_to_graph(pairwise.process_vote(vote), name, save, fromTorch=False) | 2.78125 | 3 |
alephnull/live/broker.py | flatM/AlephNull | 234 | 12773639 | __author__ = 'oglebrandon'
from logbook import Logger
from ib.ext.Contract import Contract
from ib.ext.ExecutionFilter import ExecutionFilter
from ib.ext.Order import Order as IBOrder
from alephnull.finance.blotter import Blotter
from alephnull.utils.protocol_utils import Enum
from alephnull.finance.slippage import Transaction
import alephnull.protocol as zp
# Medici fork of IbPy
# https://github.com/CarterBain/Medici
from ib.client.IBrokers import IBClient
import datetime as dt
import pytz
log = Logger('Blotter')
ORDER_STATUS = Enum(
'OPEN',
'FILLED',
'CANCELLED'
)
def round_for_minimum_price_variation(x):
#Todo: modify to round to minimum tick
return x
class LiveBlotter(Blotter):
id_map = {}
def __init__(self):
super(LiveBlotter, self).__init__()
def order(self, sid, amount, limit_price, stop_price, order_id=None):
id = super(LiveBlotter, self).order(sid, amount, limit_price, stop_price, order_id=None)
order_obj = self.orders[id]
ib_order = IBOrder()
ib_order.m_transmit = True
ib_order.m_orderRef = order_obj.id
ib_order.m_totalQuantity = order_obj.amount
ib_order.m_action = ['BUY' if ib_order.m_totalQuantity > 0 else 'SELL'][0]
ib_order.m_tif = 'DAY'
#Todo: make the FA params configurable
ib_order.m_faGroup = 'ALL'
ib_order.m_faMethod = 'AvailableEquity'
# infer order type
if order_obj.stop and not order_obj.limit:
ib_order.m_orderType = 'STP'
ib_order.m_auxPrice = float(order_obj.stop)
elif order_obj.limit and not order_obj.stop:
ib_order.m_orderType = 'LMT'
ib_order.m_lmtPrice = float(order_obj.limit)
elif order_obj.stop and order_obj.limit:
ib_order.m_orderType = 'STPLMT'
ib_order.m_auxPrice = float(order_obj.stop)
ib_order.m_lmtPrice = float(order_obj.limit)
else:
ib_order.m_orderType = 'MKT'
contract = Contract()
contract.m_symbol = order_obj.sid
contract.m_currency = 'USD'
if hasattr(order_obj, 'contract'):
# This is a futures contract
contract.m_secType = 'FUT'
contract.m_exchange = 'GLOBEX'
contract.m_expiry = order_obj.contract
else:
# This is a stock
contract.m_secType = 'STK'
contract.m_exchange = 'SMART'
ib_id = self.place_order(contract, ib_order)
self.id_map[order_obj.id] = ib_id
return order_obj.id
def cancel(self, order_id):
ib_id = self.id_map[order_id]
self.cancel_order(ib_id)
super(Blotter, self).order(order_id)
def process_trade(self, trade_event):
# checks if event is trade
if trade_event.type != zp.DATASOURCE_TYPE.TRADE:
return
# checks if is future contract
if hasattr(trade_event, 'contract'):
sid = (trade_event.sid, trade_event.cotract)
else:
sid = trade_event.sid
if sid in self.open_orders:
orders = self.open_orders[sid]
# sort orders by datetime, and filter out future dates
# lambda x: sort([order.dt for order in orders])
else:
return
for order, txn in self.get_transactions(trade_event, orders):
# check that not commission
order.filled += txn.amount
if order.amount - order.filled == 0:
order.status = ORDER_STATUS.FILLED
order.dt = txn.dt
print txn.__dict__
yield txn, order
self.open_orders[sid] = \
[order for order
in self.open_orders[sid]
if order.open]
class LiveExecution(IBClient):
"""Client connection to the Interactive Brokers API
inherits from IBClient in the Medici fork of IbPy
"""
def __init__(self, call_msg):
super(LiveExecution, self).__init__(call_msg=call_msg)
self._blotter = LiveBlotter()
self._blotter.place_order = self.place_order
self._blotter.get_transactions = self.get_transactions
self._blotter.cancel_order = self.cancel_order
super(LiveExecution, self).__track_orders__()
@property
def blotter(self):
return self._blotter
def __ib_to_aleph_sym_map__(self, contract):
decade = dt.date.today().strftime('%y')[0]
sym = contract.m_symbol
exp = contract.m_localSymbol.split(sym)[1]
exp = exp[0] + decade[0] + exp[1]
return (sym, exp)
def total_cash(self):
cash = 0
for acct in self.account.child_accounts:
try:
cash += float([x.value for x in self.account_details(acct)
if x.key == 'TotalCashValue'][0])
except:
return self.total_cash()
return cash
def ib_portfolio(self):
portfolio_store = zp.Portfolio()
positions_store = zp.Positions()
for acct in self.account.child_accounts:
positions = self.portfolio(acct)
for pos in positions:
# Skip empty requests
if hasattr(pos, 'contract'):
contract = pos.contract
# determine position sid
if contract.m_secType == 'STK':
sid = contract.m_localSymbol
if contract.m_secType == 'FUT':
sid = self.__ib_to_aleph_sym_map__(contract)
# if sid not in positions create a new position object
if sid not in positions_store:
if type(sid) is tuple:
positions_store[sid] = zp.Position(sid[0], contract=sid[1])
else:
positions_store[sid] = zp.Position(sid)
positions_store[sid].amount = pos.position_size
positions_store[sid].last_sale_price = pos.market_price
positions_store[sid].cost_basis = pos.avg_cost
else:
current_size = positions_store[sid].amount
# adjust cost basis:
# this should never result in a different value unless
# IB doesn't enforce best execution
positions_store[sid].amount += pos.position_size
if positions_store[sid].amount != 0:
mkt_value = positions_store[sid].cost_basis * current_size
added_value = pos.avg_cost * pos.position_size
positions_store[sid].cost_basis = (mkt_value + added_value) / \
positions_store[sid].amount
portfolio_store.positions_value += pos.market_value
portfolio_store.pnl = pos.realized_pnl + pos.unrealized_pnl
portfolio_store.positions = positions_store
return portfolio_store
def get_transactions(self, event, orders):
import time
time.sleep(1)
efilter = ExecutionFilter()
efilter.m_symbol = event.sid
for order in orders:
# Todo: I need to refactor how executions are summoned, this is currently a huge bottleneck
# cycle through all executions matching the event sid
for execution in self.executions(efilter):
prior_execution = None
# further filter out any executions not matching the order.id
if execution.m_orderRef == order.id:
# prevent processing of duplicate executions
if execution != prior_execution:
order_status_vals = (0, 0)
# cycle through the order status messages to get transaction details
for status in self.order_status(execution.m_orderId):
# filter out duplicate transaction messages
if (status['remaining'], status['filled']) != order_status_vals:
# get execution date
date = dt.datetime.strptime(execution.m_time,
'%Y%m%d %H:%M:%S').replace(tzinfo=pytz.utc)
amount = status['filled'] - order_status_vals[1]
txn = {'sid': event.sid,
'amount': int(amount),
'dt': date,
'price': status['lastFillPrice'],
'order_id': order.id}
transaction = Transaction(**txn)
order_status_vals = (status['remaining'], status['filled'])
#TODO: pretty sure there is still transactions are being duplicated still
if order.status == ORDER_STATUS.OPEN:
yield order, transaction
prior_execution = execution
| 2.1875 | 2 |
assignment/assignment_day3_ans_cnn.py | minssoj/Learning_cnn | 0 | 12773640 | from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras import utils
import matplotlib.pyplot as plt
num_classes = 10
im_rows = 32
im_cols = 32
in_shape = (im_rows, im_cols, 3)
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
y_train = utils.to_categorical(y_train, num_classes)
y_test = utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=in_shape))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
hist = model.fit(X_train, y_train,
batch_size=32, epochs=50,
verbose=1,
validation_data=(X_test, y_test))
score = model.evaluate(X_test, y_test, verbose=1)
print('accuracy=', score[1], 'loss=', score[0])
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('Accuracy')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
model.save_weights('cifar10-cnn-weight.h5')
| 3.46875 | 3 |
students/k3340/laboratory_works/Pahmurin_Maksim/laboratiry_work_2/program/migrations/0007_auto_20200609_1524.py | imasyalol/ITMO_ICT_WebProgramming_2020 | 0 | 12773641 | # Generated by Django 3.0.7 on 2020-06-09 12:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('program', '0006_auto_20200609_1522'),
]
operations = [
migrations.AlterField(
model_name='shedule',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='program.Students', verbose_name='Номер группы'),
),
migrations.AlterField(
model_name='students',
name='group',
field=models.CharField(max_length=10, verbose_name='Номер группы'),
),
migrations.AlterField(
model_name='teachers',
name='class_number',
field=models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Номер кабинета'),
),
migrations.DeleteModel(
name='Group',
),
]
| 1.421875 | 1 |
country_manager/countries/api/serializers.py | aleducode/country-manager | 0 | 12773642 | from django.db.models import fields
from rest_framework import serializers
from country_manager.countries.models import Country, Currency
class SerializerCurrency(serializers.ModelSerializer):
class Meta:
model = Currency
fields = ['code']
class BaseSerializerCountry(serializers.ModelSerializer):
currency = serializers.StringRelatedField(many=False)
class Meta:
model = Country
fields = ('id', 'name', 'flag_icon', 'currency')
class SerializerCountry(BaseSerializerCountry):
class Meta(BaseSerializerCountry.Meta):
fields = BaseSerializerCountry.Meta.fields + ('phone_prefix',)
class SerializerRestrictedCountry(serializers.ModelSerializer):
currency = SerializerCurrency(many=False)
class Meta:
model = Country
fields = ["name", "currency"]
| 2.15625 | 2 |
pdb_profiling/processors/pdbe/api.py | NatureGeorge/pdb-profiling | 5 | 12773643 | # @Created Date: 2020-01-12 01:27:18 pm
# @Filename: api.py
# @Email: <EMAIL>
# @Author: <NAME>
# @Last Modified: 2020-02-11 04:22:22 pm
# @Copyright (c) 2020 MinghuiGroup, Soochow University
from numpy import array, nan, count_nonzero
import pandas as pd
from typing import Union, Optional, Iterator, Iterable, Dict, List, Any, Generator, Callable, Tuple
import orjson as json
from pathlib import Path
from aiofiles import open as aiofiles_open
from collections import defaultdict
from unsync import unsync, Unfuture
from random import choice
from hashlib import sha1
from pdb_profiling.processors.recordbase import IdentifierBase
from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams
from pdb_profiling.log import Abclog
from pdb_profiling.fetcher.webfetch import UnsyncFetch
from pdb_profiling.processors.transformer import Dict2Tabular
from pdb_profiling.exceptions import WithoutExpectedKeyError, InvalidFileContentError
from pdb_profiling.ensure import EnsureBase
from tenacity import retry, wait_random, stop_after_attempt, retry_if_exception_type, RetryError
ensure = EnsureBase()
msc_rt_kw = dict(wait=wait_random(max=1), stop=stop_after_attempt(3), retry=retry_if_exception_type(InvalidFileContentError))
BASE_URL: str = 'https://www.ebi.ac.uk/pdbe/'
FTP_URL: str = 'ftp://ftp.ebi.ac.uk/'
FTP_DEFAULT_PATH: str = 'pub/databases/msd/sifts/flatfiles/tsv/uniprot_pdb.tsv.gz'
PDB_ARCHIVE_URL_EBI: str = 'http://ftp.ebi.ac.uk/pub/databases/pdb/data/structures/'
PDB_ARCHIVE_URL_WWPDB: str = 'https://ftp.wwpdb.org/pub/pdb/data/structures/'
PDB_ARCHIVE_VERSIONED_URL: str = 'http://ftp-versioned.wwpdb.org/pdb_versioned/data/'
# https://ftp.wwpdb.org/pub/pdb/data/structures/obsolete/mmCIF/a0/2a01.cif.gz
# http://ftp.ebi.ac.uk/pub/databases/pdb/data/structures/obsolete/mmCIF/a0/2a01.cif.gz
# http://ftp-versioned.wwpdb.org/pdb_versioned/data/entries/wm/pdb_00002wmg/pdb_00002wmg_xyz_v1-2.cif.gz
FUNCS = []
def mask_ib(i, default='', raise_error=False):
if i.source == 'PDB' and i.level == 'entry':
return 'pdb_id'
elif i.source == 'UniProt':
return 'UniProt'
elif raise_error:
raise AssertionError('Unexpected Case!')
else:
return default
def str_number_converter(x):
try:
return int(x)
except ValueError:
return -100000
def dispatch_on_set(*keys):
'''
Decorator to add new dispatch functions
'''
def register(func):
FUNCS.append((func, frozenset(keys)))
return func
return register
def traverseSuffixes(query: Any, *args):
for func, keySet in FUNCS:
if query in keySet:
return func(*args)
else:
raise ValueError(f'Invalid query: {query}')
class ProcessPDBe(Abclog):
headers = {'Connection': 'close', 'Content-Type': 'application/json'}
converters = {
'pdb_id': str,
'chain_id': str,
'struct_asym_id': str,
'entity_id': str_number_converter,
'author_residue_number': int,
'residue_number': str_number_converter,
'author_insertion_code': str,
'id': int,
'interface_id': int,
'interface_number': int,
'pdb_code': str,
'assemble_code': int,
'assembly_id': int,
'oper_expression': str,
'structure_1.range': str,
'structure_2.range': str,
'alt_code': str,
'sheet_id': str_number_converter
}
@classmethod
def yieldTasks(cls, pdbs: Union[Iterable, Iterator], suffix: str, method: str, folder: Union[str, Path], chunksize: int = 25, task_id: int = 0) -> Generator:
file_prefix = suffix.replace('/', '%')
method = method.lower()
if method == 'post':
url = f'{BASE_URL}{suffix}'
for i in range(0, len(pdbs), chunksize):
params = {'headers': cls.headers, 'url': url, 'data': ','.join(pdbs[i:i+chunksize])}
yield method, params, folder/f'{file_prefix}+{task_id}+{i}.json'
elif method == 'get':
for pdb in pdbs:
identifier = pdb.replace('/', '%')
yield method, {'headers': cls.headers, 'url': f'{BASE_URL}{suffix}{pdb}'}, folder/f'{file_prefix}+{identifier}.json'
else:
raise ValueError(
f'Invalid method: {method}, method should either be "get" or "post"')
@classmethod
def single_retrieve(cls, pdb: str, suffix: str, method: str, folder: Union[Path, str], semaphore, rate: float = 1.5, **kwargs):
return UnsyncFetch.single_task(
task=next(cls.yieldTasks((pdb, ), suffix, method, folder)),
semaphore=semaphore,
to_do_func=kwargs.get('to_do_func', cls.process),
rate=rate)
@classmethod
def retrieve(cls, pdbs: Union[Iterable, Iterator], suffix: str, method: str, folder: Union[str, Path], chunksize: int = 20, concur_req: int = 20, rate: float = 1.5, task_id: int = 0, ret_res: bool = True, **kwargs):
# t0 = time.perf_counter()
res = UnsyncFetch.multi_tasks(
cls.yieldTasks(pdbs, suffix, method, folder, chunksize, task_id),
cls.process,
concur_req=concur_req,
rate=rate,
ret_res=ret_res,
semaphore=kwargs.get('semaphore', None))
# elapsed = time.perf_counter() - t0
# cls.logger.info('{} ids downloaded in {:.2f}s'.format(len(res), elapsed))
return res
@classmethod
@unsync
@ensure.make_sure_complete(**msc_rt_kw)
async def json2tsv(cls, suffix:str, ori_path: Union[str, Path], path: Union[str, Path]):
cls.logger.debug('Start to decode')
async with aiofiles_open(ori_path) as handle:
try:
data = json.loads(await handle.read())
except Exception as e:
cls.logger.error(f"Error in '{ori_path}'")
raise e
res = Dict2Tabular.pyexcel_io(traverseSuffixes(suffix, data))
if res is not None:
if isinstance(res, Generator):
count = 0
for r in res:
if r is not None:
await pipe_out(df=r, path=path, format='tsv', mode='a' if count else 'w')
count += 1
if not count:
cls.logger.debug(f"Without Expected Data ({suffix}): {data}")
return None
else:
await pipe_out(df=res, path=path, format='tsv', mode='w')
cls.logger.debug(f"Decoded file in '{path}'")
return path
else:
cls.logger.debug(f"Without Expected Data ({suffix}): {data}")
return None
@classmethod
@unsync
async def process(cls, path: Union[str, Path, Unfuture]):
if not isinstance(path, (str, Path)):
path = await path
if path is None:
return
path = Path(path)
suffix = path.name.replace('%', '/').split('+')[0]
new_path = Path(str(path).replace('.json', '.tsv'))
try:
return await cls.json2tsv(suffix=suffix, ori_path=path, path=new_path)
except RetryError:
cls.logger.error(f"Retry failed for: {path.name} -> {new_path.name}")
raise
class PDBeDecoder(object):
@staticmethod
@dispatch_on_set('api/pdb/entry/status/', 'api/pdb/entry/summary/', 'api/pdb/entry/modified_AA_or_NA/',
'api/pdb/entry/mutated_AA_or_NA/', 'api/pdb/entry/cofactor/', 'api/pdb/entry/molecules/',
'api/pdb/entry/entities/',
'api/pdb/entry/ligand_monomers/', 'api/pdb/entry/experiment/', 'api/pdb/entry/carbohydrate_polymer/',
'api/pdb/entry/electron_density_statistics/', 'api/pdb/entry/related_experiment_data/',
'api/pdb/entry/drugbank/', 'api/mappings/best_structures/',
'graph-api/pdb/mutated_AA_or_NA/', 'graph-api/pdb/modified_AA_or_NA/',
'graph-api/mappings/best_structures/', 'graph-api/compound/atoms/',
'graph-api/compound/bonds/', 'graph-api/compound/summary/',
'graph-api/compound/cofactors/', 'graph-api/pdb/funpdbe/',
'graph-api/pdb/bound_excluding_branched/',
'graph-api/pdb/bound_molecules/', 'graph-api/pdb/ligand_monomers/',
'api/validation/global-percentiles/entry/', 'api/validation/summary_quality_scores/entry/',
'api/validation/key_validation_stats/entry/', 'api/validation/xray_refine_data_stats/entry/',
'api/validation/vdw_clashes/entry/', 'api/validation/outliers/all/',
'api/validation/nmr_cyrange_cores/entry/', # TODO: 2tablar
'api/validation/nmr_ensemble_clustering/entry/'
)
def yieldCommon(data: Dict) -> Generator:
for pdb in data:
values = data[pdb]
for value in values:
for key in value:
if isinstance(value[key], (Dict, List)):
value[key] = json.dumps(value[key]).decode('utf-8')
yield values, (mask_ib(IdentifierBase(pdb), '_code_'),), (pdb,)
@staticmethod
@dispatch_on_set('api/pdb/entry/polymer_coverage/')
def yieldPolymerCoverage(data: Dict) -> Generator:
for pdb in data:
molecules = data[pdb]['molecules']
for entity in molecules:
chains = entity['chains']
for chain in chains:
observed = chain['observed']
for fragement in observed:
for key in ('start', 'end'):
fragement[key] = json.dumps(
fragement[key]).decode('utf-8')
yield observed, ('chain_id', 'struct_asym_id', 'entity_id', 'pdb_id'), (chain['chain_id'], chain['struct_asym_id'], entity['entity_id'], pdb)
@staticmethod
@dispatch_on_set('api/pdb/entry/observed_residues_ratio/')
def yieldObservedResiduesRatio(data: Dict) -> Generator:
for pdb in data:
for entity_id, entity in data[pdb].items():
yield entity, ('entity_id', 'pdb_id'), (entity_id, pdb)
@staticmethod
@dispatch_on_set('api/pdb/entry/residue_listing/')
def yieldResidues(data: Dict) -> Generator:
for pdb in data:
molecules = data[pdb]['molecules']
for entity in molecules:
chains = entity['chains']
for chain in chains:
residues = chain['residues']
for res in residues:
if 'multiple_conformers' not in res:
res['multiple_conformers'] = ''
else:
res['multiple_conformers'] = json.dumps(
res['multiple_conformers']).decode('utf-8')
yield residues, ('chain_id', 'struct_asym_id', 'entity_id', 'pdb_id'), (chain['chain_id'], chain['struct_asym_id'], entity['entity_id'], pdb)
@staticmethod
@dispatch_on_set('api/pdb/entry/secondary_structure/', 'graph-api/pdb/secondary_structure/')
def yieldSecondaryStructure(data: Dict) -> Generator:
for pdb in data:
molecules = data[pdb]['molecules']
for entity in molecules:
chains = entity['chains']
for chain in chains:
secondary_structure = chain['secondary_structure']
for name in secondary_structure:
fragment = secondary_structure[name]
for record in fragment:
for key in record:
if isinstance(record[key], (Dict, List)):
record[key] = json.dumps(
record[key]).decode('utf-8')
if 'sheet_id' not in record:
record['sheet_id'] = None
yield fragment, ('secondary_structure', 'chain_id', 'struct_asym_id', 'entity_id', 'pdb_id'), (name, chain['chain_id'], chain['struct_asym_id'], entity['entity_id'], pdb)
@staticmethod
@dispatch_on_set('api/pdb/entry/binding_sites/')
def yieldBindingSites(data: Dict) -> Generator:
for pdb in data:
for site in data[pdb]:
for tage in ('site_residues', 'ligand_residues'):
residues = site[tage]
for res in residues:
if 'symmetry_symbol' not in res:
res['symmetry_symbol'] = None
yield residues, ('residues_type', 'details', 'evidence_code', 'site_id', 'pdb_id'), (tage, site['details'], site['evidence_code'], site['site_id'], pdb)
@staticmethod
@dispatch_on_set('api/pdb/entry/assembly/')
def yieldAssembly(data: Dict) -> Generator:
for pdb in data:
for biounit in data[pdb]:
entities = biounit['entities']
for entity in entities:
for key in entity:
if isinstance(entity[key], (Dict, List)):
entity[key] = json.dumps(
entity[key]).decode('utf-8')
keys = list(biounit)
keys.remove('entities')
yield entities, tuple(keys)+('pdb_id',), tuple(biounit[key] for key in keys)+(pdb, )
@staticmethod
@dispatch_on_set('api/pdb/entry/files/')
def yieldAssociatedFiles(data: Dict) -> Generator:
for pdb in data:
for key in data[pdb]:
for innerKey in data[pdb][key]:
record = data[pdb][key][innerKey]
if record:
yield record, ('innerKey', 'key', 'pdb_id'), (innerKey, key, pdb)
else:
continue
@staticmethod
@dispatch_on_set('api/mappings/all_isoforms/', 'api/mappings/uniprot/',
'api/mappings/uniprot_segments/', 'api/mappings/isoforms/',
'api/mappings/uniref90/', 'api/mappings/homologene_uniref90/',
'api/mappings/interpro/', 'api/mappings/pfam/',
'api/mappings/cath/', 'api/mappings/cath_b/',
'api/mappings/scop/', 'api/mappings/go/',
'api/mappings/ec/', 'api/mappings/ensembl/',
'api/mappings/hmmer/', 'api/mappings/sequence_domains/',
'api/mappings/structural_domains/', 'api/mappings/homologene/',
'api/mappings/uniprot_to_pfam/', 'api/mappings/uniprot_publications/',
'graph-api/mappings/uniprot/', 'graph-api/mappings/uniprot_segments/',
'graph-api/mappings/all_isoforms/', 'graph-api/mappings/',
'graph-api/mappings/isoforms/', 'graph-api/mappings/ensembl/',
'graph-api/mappings/homologene/', 'graph-api/mappings/sequence_domains/',
'api/mappings/', 'api/nucleic_mappings/', 'api/nucleic_mappings/rfam/',
'api/nucleic_mappings/sequence_domains/'
# 'graph-api/uniprot/'
)
def yieldSIFTSAnnotation(data: Dict) -> Generator:
valid_annotation_set = {'UniProt', 'Ensembl', 'Pfam', 'CATH',
'CATH-B', 'SCOP', 'InterPro', 'GO', 'EC', 'Homologene', 'HMMER', 'Rfam'}
for top_root in data:
# top_root: PDB_ID or else ID
if data[top_root].keys() <= valid_annotation_set:
# from PDB to ('UniProt', 'Ensembl', 'Pfam', 'CATH', 'CATH-B', 'SCOP', 'InterPro', 'GO', 'EC', 'Homologene', 'HMMER')
# from PDB_ENTITY (i.e. graph-api/mappings/homologene/)
# OR: from Uniprot (i.e. api/mappings/uniprot_to_pfam/)
for sec_root in data[top_root]:
child = data[top_root][sec_root]
for annotation in child:
chains = child[annotation]['mappings']
for chain in chains:
for key, value in chain.items():
chain[key] = json.dumps(value).decode(
'utf-8') if isinstance(value, Dict) else value
for key, value in child[annotation].items():
if key == 'mappings':
continue
chain[key] = json.dumps(value).decode(
'utf-8') if isinstance(value, Dict) else value
chain[mask_ib(IdentifierBase(top_root), raise_error=True)] = top_root
chain[sec_root] = annotation
yield chains, None
elif len(data[top_root].keys()) == 1 and 'PDB' in data[top_root].keys():
# from UniProt to PDB
for sec_root in data[top_root]:
child = data[top_root][sec_root]
for pdb in child:
chains = child[pdb]
for chain in chains:
chain['start'] = json.dumps(
chain['start']).decode('utf-8')
chain['end'] = json.dumps(
chain['end']).decode('utf-8')
yield chains, ('pdb_id', 'UniProt'), (pdb, top_root)
else:
raise ValueError(
f'Unexpected data structure for inputted data: {data}')
@staticmethod
@dispatch_on_set('api/pisa/asiscomponent/')
def yield_pisa_asiscomponent(data: Dict):
for pdb in data:
if data[pdb]['status'] != 'Ok' or 'assembly_detail' not in data[pdb]:
raise WithoutExpectedKeyError(f"Without Expected interfacelist info: {data}")
try:
records = data[pdb]['assembly_detail']['engaged_interfaces_list']['engaged_interfaces_array']
except KeyError:
raise WithoutExpectedKeyError(f"Without Expected interfacelist info: {data}")
yield records, ('pdb_id',), (pdb,)
@staticmethod
@dispatch_on_set('api/pisa/interfacelist/')
def yieldPISAInterfaceList(data: Dict):
for pdb in data:
try:
records = data[pdb]['interfaceentries']
except KeyError:
raise WithoutExpectedKeyError(
f"Without Expected interface_detail: {data}")
for record in records:
flatten_dict(record, 'structure_1')
flatten_dict(record, 'structure_2')
yield records, ('pdb_id', 'assembly_id'), (pdb, data[pdb]['page_title']['assemble_code'])
@staticmethod
@dispatch_on_set('api/pisa/interfacedetail/')
def yieldPISAInterfaceDetail(data: Dict):
usecols = (
'pdb_code', 'assemble_code', 'interface_number',
'interface_detail.interface_structure_1.structure.selection',
'interface_detail.interface_structure_2.structure.selection')
# 'interface_atoms', 'interface_residue', 'interface_area', 'solvation_energy'
edge_cols1 = ('structure',)
# 'interface_atoms', 'interface_residues', 'interface_area', 'solvation_energy'
edge_cols2 = ('structure',)
for pdb in data:
try:
records = data[pdb]['interface_detail']
except KeyError:
raise WithoutExpectedKeyError(
f"Without Expected interface_detail: {data}")
del records['bonds']
for col in edge_cols1:
flatten_dict(records['interface_structure_1'], col)
for col in edge_cols2:
flatten_dict(records['interface_structure_2'], col)
flatten_dict(data[pdb], 'page_title', False)
flatten_dict(records, 'interface_structure_1')
flatten_dict(records, 'interface_structure_2')
flatten_dict(data[pdb], 'interface_detail')
# cols = sorted(i for i in data[pdb].keys() if i != 'interface_detail.residues')
yield data[pdb]['interface_detail.residues']['residue1']['residue']['residue_array'], usecols, tuple(data[pdb][col] for col in usecols)
yield data[pdb]['interface_detail.residues']['residue2']['residue']['residue_array'], usecols, tuple(data[pdb][col] for col in usecols)
@staticmethod
@dispatch_on_set('graph-api/residue_mapping/')
def graph_api_residue_mapping(data: Dict):
'''
* <https://www.ebi.ac.uk/pdbe/graph-api/residue_mapping/:pdbId/:entityId/:residueNumber>
* <https://www.ebi.ac.uk/pdbe/graph-api/residue_mapping/:pdbId/:entityId/:residueStart/:residueEnd>
NOTE: only yield UniProt Residue Related Data
'''
cols = (
'pdb_id', 'entity_id', 'chain_id', 'struct_asym_id',
'residue_number', 'author_residue_number',
'author_insertion_code', 'observed', 'UniProt')
for pdb_id in data:
assert len(data[pdb_id]) == 1, f"Unexpected Cases: {pdb_id}"
molecules = data[pdb_id][0]
for chain in molecules['chains']:
for residue in chain['residues']:
yield list({**dict(zip(cols, (
pdb_id, molecules['entity_id'], chain['auth_asym_id'],
chain['struct_asym_id'], residue['residue_number'],
residue['author_residue_number'], residue['author_insertion_code'],
residue['observed'], feature_tag))), **feature} for feature_tag, feature in residue['features']['UniProt'].items()), None
@staticmethod
@dispatch_on_set('graph-api/pdb/sequence_conservation/')
def sequence_conservation(data: Dict):
for pdb in data:
yield [{
'pdb_id': pdb,
'entity_id': data[pdb]['entity_id'],
'length': data[pdb]['length'],
'residue_number': val['start'],
'conservation_score': val['conservation_score'],
'letter_array': json.dumps(tuple(i['letter'] for i in val['amino'])).decode('utf-8'),
'proba_array': json.dumps(tuple(i['proba'] for i in val['amino'])).decode('utf-8')}
for val in data[pdb]['data']], None
# letter_array, proba_array = zip(*((i['letter'], i['proba']) for i in val['amino']))
@staticmethod
@dispatch_on_set('graph-api/pdb/funpdbe_annotation/depth/',
'graph-api/pdb/funpdbe_annotation/cath-funsites/',
'graph-api/pdb/funpdbe_annotation/3Dcomplex/',
'graph-api/pdb/funpdbe_annotation/akid/',
'graph-api/pdb/funpdbe_annotation/3dligandsite/',
'graph-api/pdb/funpdbe_annotation/camkinet/',
'graph-api/pdb/funpdbe_annotation/canSAR/',
'graph-api/pdb/funpdbe_annotation/ChannelsDB/',
'graph-api/pdb/funpdbe_annotation/dynamine/',
'graph-api/pdb/funpdbe_annotation/FoldX/',
'graph-api/pdb/funpdbe_annotation/MetalPDB/',
'graph-api/pdb/funpdbe_annotation/M-CSA/',
'graph-api/pdb/funpdbe_annotation/p2rank/',
'graph-api/pdb/funpdbe_annotation/Missense3D/',
'graph-api/pdb/funpdbe_annotation/POPScomp_PDBML/',
'graph-api/pdb/funpdbe_annotation/ProKinO/',
'graph-api/pdb/funpdbe_annotation/14-3-3-pred/',
'graph-api/pdb/funpdbe_annotation/'
)
def funpdbe_resources(data: Dict):
for pdb in data:
info = data[pdb]
for val in info:
for annotation in val['annotations']:
yield annotation['site_residues'], ('pdb_id', 'origin', 'evidence_codes', 'site_id', 'label'), (pdb, val['origin'], val['evidence_codes'], annotation['site_id'], annotation['label'])
@staticmethod
@dispatch_on_set('graph-api/pdbe_pages/rfam/',
'graph-api/pdbe_pages/annotations/',
'graph-api/pdbe_pages/uniprot_mapping/',
'graph-api/pdbe_pages/binding_sites/',
'graph-api/pdbe_pages/interfaces/',
'graph-api/pdbe_pages/secondary_structure/',
'graph-api/pdbe_pages/domains/',
'graph-api/uniprot/unipdb/',
'graph-api/uniprot/annotations/',
'graph-api/uniprot/interface_residues/',
'graph-api/uniprot/ligand_sites/',
'graph-api/uniprot/secondary_structures/',
'graph-api/uniprot/domains/',
'graph-api/uniprot/sequence_conservation/')
def graph_api_data_common(data: Dict):
for pdb in data:
id_type = 'pdb_id' if len(pdb) == 4 else 'UniProt'
for info in data[pdb]['data']:
if 'additionalData' in info:
flatten_dict(info, 'additionalData')
com_keys = tuple(key for key in info.keys()
if key != 'residues')
yield info['residues'], (id_type,)+com_keys, (pdb,)+tuple(info[key] for key in com_keys)
@staticmethod
@dispatch_on_set('graph-api/pdb/bound_molecule_interactions/')
def graph_api_bound(data: Dict):
for pdb in data:
info = data[pdb]
for interactions in info:
ret = [{j: json.dumps(i[j]).decode('utf-8') for j in i.keys()}
for i in interactions['interactions']]
yield ret, ('pdb_id', 'bm_id'), (pdb, interactions['bm_id'])
@staticmethod
@dispatch_on_set('api/validation/protein-ramachandran-sidechain-outliers/entry/', 'api/validation/RNA_pucker_suite_outliers/entry/')
def yield_protein_ramachandran_sidechain_outlier(data):
for pdb in data:
for tage in data[pdb]:
residues = data[pdb][tage]
yield residues, ('_type_', 'pdb_id'), (tage, pdb)
@staticmethod
@dispatch_on_set('api/validation/rama_sidechain_listing/entry/', 'api/validation/residuewise_outlier_summary/entry/',
'api/validation/protein-RNA-DNA-geometry-outlier-residues/entry/')
def yield_rama_sidechain_listing(data):
for pdb in data:
molecules = data[pdb]['molecules']
for entity in molecules:
chains = entity['chains']
for chain in chains:
models = chain['models']
for model in models:
residues = model['residues']
yield residues, ('chain_id', 'struct_asym_id', 'model_id', 'entity_id', 'pdb_id'), (chain['chain_id'], chain['struct_asym_id'], model['model_id'], entity['entity_id'], pdb)
@staticmethod
@dispatch_on_set('graph-api/uniprot/superposition/')
def yield_unp_pdb_struct_cluster(data):
for unp in data:
for segment_id, segment in enumerate(data[unp]):
clusters = segment['clusters']
for sub_cluster_id, sub_cluster in enumerate(clusters):
yield sub_cluster, ('pdbekb_cluster', 'segment_start', 'segment_end', 'UniProt'), (f'{segment_id}_{sub_cluster_id}', segment['segment_start'], segment['segment_end'], unp)
class PDBeModelServer(object):
'''
Implement ModelServer API
'''
pdbe_root = f'{BASE_URL}model-server/v1/'
rcsb_root = 'https://models.rcsb.org/v1/'
root = rcsb_root
headers = {'Connection': 'close', 'accept': 'text/plain', 'Content-Type': 'application/json'}
api_set = frozenset(('atoms', 'residueInteraction', 'assembly', 'full', 'ligand'
'residueSurroundings', 'symmetryMates', 'query-many'))
@classmethod
def task_unit(cls, pdb, suffix, method, folder, data_collection, params, filename='_subset'):
if data_collection is None:
assert method == 'get', 'Invalid method!'
args = dict(
url=f'{cls.root}{pdb}/{suffix}?{dumpsParams(params)}',
headers=cls.headers)
else:
assert method == 'post', 'Invalid method!'
args = dict(
url=f'{cls.root}{pdb}/{suffix}?{dumpsParams(params)}',
headers=cls.headers,
data=data_collection)
return method, args, folder/f'{pdb}{filename}.{params.get("encoding", "cif")}'
@classmethod
def single_retrieve(cls, pdb: str, suffix: str, method: str, folder: Union[Path, str], semaphore, params=None, data_collection=None, rate: float = 1.5, filename='_subset'):
if params is None or len(params) == 0:
params = {'model_nums': 1, 'encoding': 'cif'}
return UnsyncFetch.single_task(
task=cls.task_unit(pdb, suffix, method, folder,
data_collection, params, filename=filename),
semaphore=semaphore,
rate=rate)
class PDBeCoordinateServer(object):
roots = (f'{BASE_URL}coordinates/', 'https://cs.litemol.org/')
headers = {'Connection': 'close', 'accept': 'text/plain'}
api_set = frozenset(('ambientResidues', 'assembly', 'backbone', 'cartoon', 'chains'
'entities', 'full', 'het', 'ligandInteraction', 'residueRange',
'residues', 'sidechain', 'symmetryMates', 'trace', 'water'))
def __init__(self, root: str = 'random'):
if root == 'random':
self.root = choice(self.roots)
elif root == 'ebi':
self.root = self.roots[0]
elif root == 'litemol':
self.root = self.roots[1]
else:
raise ValueError("root should be (ebi, litemol, random)")
def __repr__(self):
return f'<CoordinateServerAPI: {self.root}>'
def task_unit(self, pdb_id, suffix: str, params, folder):
args = dict(
url=f'{self.root}{pdb_id}/{suffix}?',
headers=self.headers,
params=params)
return 'get', args, Path(folder)/f'{pdb_id}_{dumpsParams(params)}.{params.get("encoding", "cif")}'
def single_retrieve(self, pdb_id: str, suffix: str, params: Dict, folder: Union[Path, str], semaphore, rate: float = 1.5):
return UnsyncFetch.single_task(
task=self.task_unit(pdb_id, suffix, params, folder),
semaphore=semaphore,
rate=rate)
class PDBArchive(object):
'''
Download files from PDB Archive
* wwPDB/RCSB: PDB_ARCHIVE_URL_WWPDB: str = 'https://ftp.wwpdb.org/pub/pdb/data/structures/'
* EBI: PDB_ARCHIVE_URL_EBI: str = 'http://ftp.ebi.ac.uk/pub/databases/pdb/data/structures/'
'''
root = PDB_ARCHIVE_URL_EBI
api_set = frozenset(f'{i}/{j}/' for i in ('obsolete', 'divided')
for j in ('mmCIF', 'pdb', 'XML'))
file_dict = {
'mmCIF': '.cif.gz',
'pdb': '.ent.gz',
'XML': '.xml.gz'
}
@staticmethod
def wrap_id(pdb_id, suffix):
if suffix.endswith('pdb/'):
return f"pdb{pdb_id}"
else:
return pdb_id
@classmethod
def get_file_suffix(cls, api_suffix):
for key, value in cls.file_dict.items():
if key in api_suffix:
return value
raise AssertionError(
f"Unexpected Case for api_suffix: {api_suffix}, {cls.file_dict}")
@classmethod
def task_unit(cls, pdb: str, suffix: str, file_suffix: str, folder: Path):
args = dict(
url=f'{cls.root}{suffix}{pdb[1:3]}/{cls.wrap_id(pdb, suffix)}{cls.get_file_suffix(suffix)}')
return 'get', args, folder/f'{pdb}{file_suffix}'
@classmethod
def yieldTasks(cls, pdbs, suffix: str, file_suffix: str, folder: Path) -> Generator:
for pdb in pdbs:
yield cls.task_unit(pdb, suffix, file_suffix, folder)
@classmethod
def retrieve(cls, pdbs, suffix: str, folder: Path, file_suffix: Optional[str] = None, concur_req: int = 20, rate: float = 1.5, ret_res: bool = True, **kwargs):
res = UnsyncFetch.multi_tasks(
cls.yieldTasks(pdbs, suffix, file_suffix, folder),
concur_req=concur_req,
rate=rate,
ret_res=ret_res,
semaphore=kwargs.get('semaphore', None))
return res
@classmethod
def single_retrieve(cls, pdb, suffix: str, folder: Path, semaphore, file_suffix: Optional[str] = None, rate: float = 1.5):
if file_suffix is None:
file_suffix = cls.get_file_suffix(suffix)
return UnsyncFetch.single_task(
task=cls.task_unit(pdb, suffix, file_suffix, folder),
semaphore=semaphore,
rate=rate)
class PDBVersioned(PDBArchive):
'''
Download files from PDB Versioned
* wwPDB Versioned: PDB_ARCHIVE_VERSIONED_URL: str = 'http://ftp-versioned.wwpdb.org/pdb_versioned/data/entries/'
>>> PDBVersioned.single_retrieve(
('2wmg', '_v1-2'), 'entries/',
init_folder_from_suffix(Base.get_folder(), 'pdb-versioned/entries'),
Base.get_web_semaphore()).result()
'''
root = PDB_ARCHIVE_VERSIONED_URL
api_set = frozenset(('entries/', 'removed/'))
@classmethod
def task_unit(cls, pdb_with_version: Tuple, suffix: str, file_suffix: str, folder: Path):
pdb, version_info = pdb_with_version
file_name = f'pdb_0000{pdb}_xyz{version_info}{file_suffix}'
args = dict(url=f'{cls.root}{suffix}{pdb[1:3]}/pdb_0000{pdb}/{file_name}')
return 'get', args, folder/file_name
class PDBeKBAnnotations(object):
ftp_root = f"{FTP_URL}pub/databases/pdbe-kb/annotations/"
https_root = ftp_root.replace('ftp:', 'https:')
root = https_root
api_set = frozenset({
'14-3-3-pred/', '3DComplex/',
'3DLigandSite/', 'AKID/',
'COSPI-Depth/', 'CamKinet/',
'ChannelsDB/', 'Covalentizer/',
'DynaMine/', 'FireProtDB/',
'FoldX/', 'KnotProt/',
'M-CSA/', 'MetalPDB/',
'Missense3D/', 'P2rank/',
'POPScomp_PDBML/', 'ProKinO/',
'Scop3P/', 'canSAR/',
'cath-funsites/', 'webNMA/'})
@staticmethod
def wrap_id(pdb_id, suffix):
if suffix == 'M-CSA/':
return f"{pdb_id}-mcsa"
else:
return pdb_id
@classmethod
def task_unit(cls, pdb: str, suffix: str, folder: Path):
pdb_ = cls.wrap_id(pdb, suffix)
args = dict(
url=f'{cls.root}{suffix}{pdb[1:3]}/{pdb_}.json')
return 'ftp' if cls.root == cls.ftp_root else 'get', args, folder/f'{pdb_}.json'
@classmethod
def single_retrieve(cls, pdb, suffix: str, folder: Path, semaphore, rate: float = 1.5):
return UnsyncFetch.single_task(
task=cls.task_unit(pdb, suffix, folder),
semaphore=semaphore,
rate=rate)
@staticmethod
def yieldPDBeKBAnnotations(data):
for chain in data['chains']:
yield chain['residues'], ('data_resource', 'pdb_id', 'chain_id'), (data['data_resource'], data['pdb_id'], chain['chain_label'])
| 1.9375 | 2 |
pitop/robotics/pan_tilt_object_tracker.py | pi-top/pi-top-Python-SDK | 28 | 12773644 | from time import time
from pitop.pma.servo_controller import ServoHardwareSpecs
from .simple_pid import PID
class PanTiltObjectTracker:
_pid_tunings = {
"slow": {"kp": 0.075, "ki": 0.002, "kd": 0.04},
"normal": {"kp": 0.25, "ki": 0.005, "kd": 0.1},
}
_target_lock_range = 10
_slow_fps_limit = 5.0
def __init__(self, pan_servo, tilt_servo):
self.__pan_servo = pan_servo
self.__tilt_servo = tilt_servo
self._previous_time = time()
self.pan_pid = PID(
setpoint=0,
output_limits=(
-ServoHardwareSpecs.SPEED_RANGE,
ServoHardwareSpecs.SPEED_RANGE,
),
)
self.tilt_pid = PID(
setpoint=0,
output_limits=(
-ServoHardwareSpecs.SPEED_RANGE,
ServoHardwareSpecs.SPEED_RANGE,
),
)
self.__set_pid_tunings(pid_mode="normal")
def __call__(self, center):
current_time = time()
dt = current_time - self._previous_time
if dt > 1 / self._slow_fps_limit:
pid_mode = "slow"
else:
pid_mode = "normal"
self._previous_time = current_time
self.__set_pid_tunings(pid_mode=pid_mode)
x, y = center
if abs(x) < self._target_lock_range:
self.__pan_servo.sweep(speed=0)
self.pan_pid.reset()
else:
pan_speed = self.pan_pid(x)
self.__pan_servo.sweep(pan_speed)
if abs(y) < self._target_lock_range:
self.__tilt_servo.sweep(speed=0)
self.tilt_pid.reset()
else:
tilt_speed = self.tilt_pid(y)
self.__tilt_servo.sweep(tilt_speed)
def __set_pid_tunings(self, pid_mode):
self.pan_pid.tunings = list(self._pid_tunings[pid_mode].values())
self.tilt_pid.tunings = list(self._pid_tunings[pid_mode].values())
def reset(self):
self.pan_pid.reset()
self.tilt_pid.reset()
def stop(self):
self.__pan_servo.sweep(0)
self.__tilt_servo.sweep(0)
self.reset()
| 2.46875 | 2 |
bootstrapvz/providers/ec2/tasks/network.py | null0000/bootstrap-vz | 0 | 12773645 | <reponame>null0000/bootstrap-vz
from bootstrapvz.base import Task
from bootstrapvz.common import phases
from bootstrapvz.common.tasks import apt
from bootstrapvz.common.tasks import kernel
import os.path
class EnableDHCPCDDNS(Task):
description = 'Configuring the DHCP client to set the nameservers'
phase = phases.system_modification
@classmethod
def run(cls, info):
# The dhcp client that ships with debian sets the DNS servers per default.
# For dhcpcd in Wheezy and earlier we need to configure it to do that.
if info.release_codename not in {'jessie', 'sid'}:
from bootstrapvz.common.tools import sed_i
dhcpcd = os.path.join(info.root, 'etc/default/dhcpcd')
sed_i(dhcpcd, '^#*SET_DNS=.*', 'SET_DNS=\'yes\'')
class AddBuildEssentialPackage(Task):
description = 'Adding build-essential package'
phase = phases.preparation
predecessors = [apt.AddDefaultSources]
@classmethod
def run(cls, info):
info.packages.add('build-essential')
class InstallEnhancedNetworking(Task):
description = 'Installing enhanced networking kernel driver using DKMS'
phase = phases.system_modification
successors = [kernel.UpdateInitramfs]
@classmethod
def run(cls, info):
version = '2.15.3'
drivers_url = 'http://downloads.sourceforge.net/project/e1000/ixgbevf stable/%s/ixgbevf-%s.tar.gz' % (version, version)
archive = os.path.join(info.root, 'tmp', 'ixgbevf-%s.tar.gz' % (version))
module_path = os.path.join(info.root, 'usr', 'src', 'ixgbevf-%s' % (version))
import urllib
urllib.urlretrieve(drivers_url, archive)
from bootstrapvz.common.tools import log_check_call
log_check_call(['tar', '--ungzip',
'--extract',
'--file', archive,
'--directory', os.path.join(info.root, 'usr', 'src')])
with open(os.path.join(module_path, 'dkms.conf'), 'w') as dkms_conf:
dkms_conf.write("""PACKAGE_NAME="ixgbevf"
PACKAGE_VERSION="%s"
CLEAN="cd src/; make clean"
MAKE="cd src/; make BUILD_KERNEL=${kernelver}"
BUILT_MODULE_LOCATION[0]="src/"
BUILT_MODULE_NAME[0]="ixgbevf"
DEST_MODULE_LOCATION[0]="/updates"
DEST_MODULE_NAME[0]="ixgbevf"
AUTOINSTALL="yes"
""" % (version))
for task in ['add', 'build', 'install']:
# Invoke DKMS task using specified kernel module (-m) and version (-v)
log_check_call(['chroot', info.root,
'dkms', task, '-m', 'ixgbevf', '-v', version])
| 2 | 2 |
vmware_nsx/plugins/nsx_v/vshield/edge_appliance_driver.py | yebinama/vmware-nsx | 0 | 12773646 | # Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from distutils import version
import random
import time
from neutron_lib import constants as lib_const
from neutron_lib import context as q_context
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from sqlalchemy.orm import exc as sa_exc
from vmware_nsx._i18n import _
from vmware_nsx.common import exceptions as nsxv_exc
from vmware_nsx.common import nsxv_constants
from vmware_nsx.common import utils
from vmware_nsx.db import nsxv_db
from vmware_nsx.plugins.nsx_v.vshield.common import constants
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions
from vmware_nsx.plugins.nsx_v.vshield import edge_utils
from vmware_nsx.plugins.nsx_v.vshield.tasks import (
constants as task_constants)
from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks
LOG = logging.getLogger(__name__)
class EdgeApplianceDriver(object):
def __init__(self):
super(EdgeApplianceDriver, self).__init__()
# store the last task per edge that has the latest config
self.updated_task = {
'nat': {},
'route': {},
}
random.seed()
def _assemble_edge(self, name, appliance_size="compact",
deployment_container_id=None, datacenter_moid=None,
enable_aesni=True, dist=False,
enable_fips=False, remote_access=False,
edge_ha=False):
edge = {
'name': name,
'fqdn': None,
'enableAesni': enable_aesni,
'enableFips': enable_fips,
'featureConfigs': {
'features': [
{
'featureType': 'firewall_4.0',
'globalConfig': {
'tcpTimeoutEstablished': 7200
}
}
]
},
'cliSettings': {
'remoteAccess': remote_access
},
'autoConfiguration': {
'enabled': False,
'rulePriority': 'high'
},
'appliances': {
'applianceSize': appliance_size
},
}
if not dist:
edge['type'] = "gatewayServices"
edge['vnics'] = {'vnics': []}
else:
edge['type'] = "distributedRouter"
edge['interfaces'] = {'interfaces': []}
if deployment_container_id:
edge['appliances']['deploymentContainerId'] = (
deployment_container_id)
if datacenter_moid:
edge['datacenterMoid'] = datacenter_moid
if not dist and edge_ha:
self._enable_high_availability(edge)
return edge
def _select_datastores(self, availability_zone):
primary_ds = availability_zone.datastore_id
secondary_ds = availability_zone.ha_datastore_id
if availability_zone.ha_placement_random:
# we want to switch primary and secondary datastores
# half of the times, to balance it
if random.random() > 0.5:
primary_ds = availability_zone.ha_datastore_id
secondary_ds = availability_zone.datastore_id
return primary_ds, secondary_ds
def _assemble_edge_appliances(self, availability_zone):
appliances = []
if availability_zone.ha_datastore_id and availability_zone.edge_ha:
# create appliance with HA
primary_ds, secondary_ds = self._select_datastores(
availability_zone)
appliances.append(self._assemble_edge_appliance(
availability_zone.resource_pool,
primary_ds))
appliances.append(self._assemble_edge_appliance(
availability_zone.resource_pool,
secondary_ds))
elif availability_zone.datastore_id:
# Single datastore
appliances.append(self._assemble_edge_appliance(
availability_zone.resource_pool,
availability_zone.datastore_id))
return appliances
def _assemble_edge_appliance(self, resource_pool_id, datastore_id):
appliance = {}
if resource_pool_id:
appliance['resourcePoolId'] = resource_pool_id
if datastore_id:
appliance['datastoreId'] = datastore_id
return appliance
def _assemble_edge_vnic(self, name, index, portgroup_id, tunnel_index=-1,
primary_address=None, subnet_mask=None,
secondary=None,
type="internal",
enable_proxy_arp=False,
enable_send_redirects=True,
is_connected=True,
mtu=1500,
address_groups=None):
vnic = {
'index': index,
'name': name,
'type': type,
'portgroupId': portgroup_id,
'mtu': mtu,
'enableProxyArp': enable_proxy_arp,
'enableSendRedirects': enable_send_redirects,
'isConnected': is_connected
}
if address_groups is None:
address_groups = []
if not address_groups:
if primary_address and subnet_mask:
address_group = {
'primaryAddress': primary_address,
'subnetMask': subnet_mask
}
if secondary:
address_group['secondaryAddresses'] = {
'ipAddress': secondary,
'type': 'secondary_addresses'
}
vnic['addressGroups'] = {
'addressGroups': [address_group]
}
else:
vnic['subInterfaces'] = {'subInterfaces': address_groups}
else:
if tunnel_index < 0:
vnic['addressGroups'] = {'addressGroups': address_groups}
else:
vnic['subInterfaces'] = {'subInterfaces': address_groups}
return vnic
def _assemble_vdr_interface(self, portgroup_id,
primary_address=None, subnet_mask=None,
secondary=None,
type="internal",
is_connected=True,
mtu=1500,
address_groups=None):
interface = {
'type': type,
'connectedToId': portgroup_id,
'mtu': mtu,
'isConnected': is_connected
}
if address_groups is None:
address_groups = []
if not address_groups:
if primary_address and subnet_mask:
address_group = {
'primaryAddress': primary_address,
'subnetMask': subnet_mask
}
if secondary:
address_group['secondaryAddresses'] = {
'ipAddress': secondary,
'type': 'secondary_addresses'
}
interface['addressGroups'] = {
'addressGroups': [address_group]
}
else:
interface['addressGroups'] = {'addressGroups': address_groups}
interfaces = {'interfaces': [interface]}
return interfaces
def _edge_status_to_level(self, status):
if status == 'GREEN':
status_level = constants.RouterStatus.ROUTER_STATUS_ACTIVE
elif status in ('GREY', 'YELLOW'):
status_level = constants.RouterStatus.ROUTER_STATUS_DOWN
else:
status_level = constants.RouterStatus.ROUTER_STATUS_ERROR
return status_level
def _enable_loadbalancer(self, edge):
if (not edge.get('featureConfigs') or
not edge['featureConfigs'].get('features')):
edge['featureConfigs'] = {'features': []}
edge['featureConfigs']['features'].append(
{'featureType': 'loadbalancer_4.0',
'enabled': True})
def _enable_high_availability(self, edge):
if (not edge.get('featureConfigs') or
not edge['featureConfigs'].get('features')):
edge['featureConfigs'] = {'features': []}
edge['featureConfigs']['features'].append(
{'featureType': 'highavailability_4.0',
'enabled': True})
def get_edge_status(self, edge_id):
try:
response = self.vcns.get_edge_status(edge_id)[1]
status_level = self._edge_status_to_level(
response['edgeStatus'])
except exceptions.VcnsApiException as e:
LOG.error("VCNS: Failed to get edge %(edge_id)s status: "
"Reason: %(reason)s",
{'edge_id': edge_id, 'reason': e.response})
status_level = constants.RouterStatus.ROUTER_STATUS_ERROR
try:
desc = jsonutils.loads(e.response)
if desc.get('errorCode') == (
constants.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
status_level = constants.RouterStatus.ROUTER_STATUS_DOWN
except ValueError:
LOG.error('Error code not present. %s', e.response)
return status_level
def get_interface(self, edge_id, vnic_index):
# get vnic interface address groups
try:
return self.vcns.query_interface(edge_id, vnic_index)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("NSXv: Failed to query vnic %s", vnic_index)
def update_interface(self, router_id, edge_id, index, network,
tunnel_index=-1, address=None, netmask=None,
secondary=None, is_connected=True,
address_groups=None):
LOG.debug("VCNS: update vnic %(index)d: %(addr)s %(netmask)s", {
'index': index, 'addr': address, 'netmask': netmask})
if index == constants.EXTERNAL_VNIC_INDEX:
name = constants.EXTERNAL_VNIC_NAME
intf_type = 'uplink'
else:
name = constants.INTERNAL_VNIC_NAME + str(index)
if tunnel_index < 0:
intf_type = 'internal'
else:
intf_type = 'trunk'
config = self._assemble_edge_vnic(
name, index, network, tunnel_index,
address, netmask, secondary, type=intf_type,
address_groups=address_groups, is_connected=is_connected)
self.vcns.update_interface(edge_id, config)
def add_vdr_internal_interface(self, edge_id,
network, address=None, netmask=None,
secondary=None, address_groups=None,
type="internal", is_connected=True):
LOG.debug("Add VDR interface on edge: %s", edge_id)
if address_groups is None:
address_groups = []
interface_req = (
self._assemble_vdr_interface(network, address, netmask, secondary,
address_groups=address_groups,
is_connected=is_connected, type=type))
self.vcns.add_vdr_internal_interface(edge_id, interface_req)
header, response = self.vcns.get_edge_interfaces(edge_id)
for interface in response['interfaces']:
if interface['connectedToId'] == network:
vnic_index = int(interface['index'])
return vnic_index
def update_vdr_internal_interface(self, edge_id, index, network,
address_groups=None, is_connected=True):
if not address_groups:
address_groups = []
interface = {
'type': 'internal',
'connectedToId': network,
'mtu': 1500,
'isConnected': is_connected,
'addressGroups': {'addressGroup': address_groups}
}
interface_req = {'interface': interface}
try:
header, response = self.vcns.update_vdr_internal_interface(
edge_id, index, interface_req)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to update vdr interface on edge: "
"%s", edge_id)
def delete_vdr_internal_interface(self, edge_id, interface_index):
LOG.debug("Delete VDR interface on edge: %s", edge_id)
try:
header, response = self.vcns.delete_vdr_internal_interface(
edge_id, interface_index)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to delete vdr interface on edge: "
"%s",
edge_id)
def delete_interface(self, router_id, edge_id, index):
LOG.debug("Deleting vnic %(vnic_index)s: on edge %(edge_id)s",
{'vnic_index': index, 'edge_id': edge_id})
try:
self.vcns.delete_interface(edge_id, index)
except exceptions.ResourceNotFound:
LOG.error('Failed to delete vnic %(vnic_index)s on edge '
'%(edge_id)s: edge was not found',
{'vnic_index': index,
'edge_id': edge_id})
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to delete vnic %(vnic_index)s: "
"on edge %(edge_id)s",
{'vnic_index': index,
'edge_id': edge_id})
LOG.debug("Deletion complete vnic %(vnic_index)s: on edge %(edge_id)s",
{'vnic_index': index, 'edge_id': edge_id})
def deploy_edge(self, context, router_id, name, internal_network,
dist=False, loadbalancer_enable=True,
appliance_size=nsxv_constants.LARGE,
availability_zone=None, deploy_metadata=False):
edge_name = name
edge = self._assemble_edge(
edge_name, datacenter_moid=availability_zone.datacenter_moid,
deployment_container_id=self.deployment_container_id,
appliance_size=appliance_size, remote_access=False, dist=dist,
edge_ha=availability_zone.edge_ha)
appliances = self._assemble_edge_appliances(availability_zone)
if appliances:
edge['appliances']['appliances'] = appliances
if not dist:
vnic_external = self._assemble_edge_vnic(
constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX,
availability_zone.external_network, type="uplink")
edge['vnics']['vnics'].append(vnic_external)
else:
edge['mgmtInterface'] = {
'connectedToId': availability_zone.external_network,
'name': "mgmtInterface"}
if internal_network:
vnic_inside = self._assemble_edge_vnic(
constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX,
internal_network,
edge_utils.get_vdr_transit_network_plr_address(),
edge_utils.get_vdr_transit_network_netmask(),
type="internal")
edge['vnics']['vnics'].append(vnic_inside)
# If default login credentials for Edge are set, configure accordingly
if (cfg.CONF.nsxv.edge_appliance_user and
cfg.CONF.nsxv.edge_appliance_password):
edge['cliSettings'].update({
'userName': cfg.CONF.nsxv.edge_appliance_user,
'password': cfg.CONF.nsxv.edge_appliance_password})
if not dist and loadbalancer_enable:
self._enable_loadbalancer(edge)
edge_id = None
try:
header = self.vcns.deploy_edge(edge)[0]
edge_id = header.get('location', '/').split('/')[-1]
if edge_id:
nsxv_db.update_nsxv_router_binding(
context.session, router_id, edge_id=edge_id)
if not dist:
# Init Edge vnic binding
nsxv_db.init_edge_vnic_binding(
context.session, edge_id)
else:
if router_id:
nsxv_db.update_nsxv_router_binding(
context.session, router_id,
status=lib_const.ERROR)
error = _('Failed to deploy edge')
raise nsxv_exc.NsxPluginException(err_msg=error)
self.callbacks.complete_edge_creation(
context, edge_id, name, router_id, dist, True,
availability_zone=availability_zone,
deploy_metadata=deploy_metadata)
except exceptions.VcnsApiException:
self.callbacks.complete_edge_creation(
context, edge_id, name, router_id, dist, False,
availability_zone=availability_zone)
with excutils.save_and_reraise_exception():
LOG.exception("NSXv: deploy edge failed.")
return edge_id
def update_edge(self, context, router_id, edge_id, name, internal_network,
dist=False, loadbalancer_enable=True,
appliance_size=nsxv_constants.LARGE,
set_errors=False, availability_zone=None):
"""Update edge name."""
edge = self._assemble_edge(
name, datacenter_moid=availability_zone.datacenter_moid,
deployment_container_id=self.deployment_container_id,
appliance_size=appliance_size, remote_access=False, dist=dist,
edge_ha=availability_zone.edge_ha)
edge['id'] = edge_id
appliances = self._assemble_edge_appliances(availability_zone)
if appliances:
edge['appliances']['appliances'] = appliances
if not dist:
vnic_external = self._assemble_edge_vnic(
constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX,
availability_zone.external_network, type="uplink")
edge['vnics']['vnics'].append(vnic_external)
else:
edge['mgmtInterface'] = {
'connectedToId': availability_zone.external_network,
'name': "mgmtInterface"}
if internal_network:
internal_vnic = self._assemble_edge_vnic(
constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX,
internal_network,
edge_utils.get_vdr_transit_network_plr_address(),
edge_utils.get_vdr_transit_network_netmask(),
type="internal")
edge['vnics']['vnics'].append(internal_vnic)
if not dist and loadbalancer_enable:
self._enable_loadbalancer(edge)
try:
self.vcns.update_edge(edge_id, edge)
self.callbacks.complete_edge_update(
context, edge_id, router_id, True, set_errors)
except exceptions.VcnsApiException as e:
LOG.error("Failed to update edge: %s",
e.response)
self.callbacks.complete_edge_update(
context, edge_id, router_id, False, set_errors)
return False
return True
def rename_edge(self, edge_id, name):
"""rename edge."""
try:
# First get the current edge structure
# [0] is the status, [1] is the body
edge = self.vcns.get_edge(edge_id)[1]
if edge['name'] == name:
LOG.debug('Edge %s is already named %s', edge_id, name)
return
# remove some data that will make the update fail
edge_utils.remove_irrelevant_keys_from_edge_request(edge)
# set the new name in the request
edge['name'] = name
# update the edge
self.vcns.update_edge(edge_id, edge)
except exceptions.VcnsApiException as e:
LOG.error("Failed to rename edge: %s",
e.response)
def resize_edge(self, edge_id, size):
"""update the size of a router edge."""
try:
# First get the current edge structure
# [0] is the status, [1] is the body
edge = self.vcns.get_edge(edge_id)[1]
if edge.get('appliances'):
if edge['appliances']['applianceSize'] == size:
LOG.debug('Edge %s is already with size %s',
edge_id, size)
return
ver = self.vcns.get_version()
if version.LooseVersion(ver) < version.LooseVersion('6.2.3'):
# remove some data that will make the update fail
edge_utils.remove_irrelevant_keys_from_edge_request(edge)
# set the new size in the request
edge['appliances']['applianceSize'] = size
# update the edge
self.vcns.update_edge(edge_id, edge)
except exceptions.VcnsApiException as e:
LOG.error("Failed to resize edge: %s", e.response)
def delete_edge(self, context, router_id, edge_id, dist=False):
LOG.debug("Deleting edge %s", edge_id)
if context is None:
context = q_context.get_admin_context()
try:
LOG.debug("Deleting router binding %s", router_id)
nsxv_db.delete_nsxv_router_binding(context.session, router_id)
if not dist:
LOG.debug("Deleting vnic bindings for edge %s", edge_id)
nsxv_db.clean_edge_vnic_binding(context.session, edge_id)
except sa_exc.NoResultFound:
LOG.warning("Router Binding for %s not found", router_id)
if edge_id:
try:
self.vcns.delete_edge(edge_id)
return True
except exceptions.ResourceNotFound:
return True
except exceptions.VcnsApiException as e:
LOG.exception("VCNS: Failed to delete %(edge_id)s:\n"
"%(response)s",
{'edge_id': edge_id, 'response': e.response})
return False
except Exception:
LOG.exception("VCNS: Failed to delete %s", edge_id)
return False
def _assemble_nat_rule(self, action, original_address,
translated_address,
vnic_index=None,
enabled=True,
protocol='any',
original_port='any',
translated_port='any'):
nat_rule = {}
nat_rule['action'] = action
if vnic_index is not None:
nat_rule['vnic'] = vnic_index
nat_rule['originalAddress'] = original_address
nat_rule['translatedAddress'] = translated_address
nat_rule['enabled'] = enabled
nat_rule['protocol'] = protocol
nat_rule['originalPort'] = original_port
nat_rule['translatedPort'] = translated_port
return nat_rule
def get_nat_config(self, edge_id):
try:
return self.vcns.get_nat_config(edge_id)[1]
except exceptions.VcnsApiException as e:
LOG.exception("VCNS: Failed to get nat config:\n%s",
e.response)
raise e
def update_nat_rules(self, edge_id, snats, dnats, indices=None):
LOG.debug("VCNS: update nat rule\n"
"SNAT:%(snat)s\n"
"DNAT:%(dnat)s\n"
"INDICES: %(index)s\n", {
'snat': snats, 'dnat': dnats, 'index': indices})
nat_rules = []
for dnat in dnats:
vnic_index = None
if 'vnic_index' in dnat:
vnic_index = dnat['vnic_index']
if vnic_index or not indices:
# we are adding a predefined index or
# adding to all interfaces
nat_rules.append(self._assemble_nat_rule(
'dnat', dnat['dst'], dnat['translated'],
vnic_index=vnic_index
))
nat_rules.append(self._assemble_nat_rule(
'snat', dnat['translated'], dnat['dst'],
vnic_index=vnic_index
))
else:
for index in indices:
nat_rules.append(self._assemble_nat_rule(
'dnat', dnat['dst'], dnat['translated'],
vnic_index=index
))
nat_rules.append(self._assemble_nat_rule(
'snat', dnat['translated'], dnat['dst'],
vnic_index=index
))
for snat in snats:
vnic_index = None
if 'vnic_index' in snat:
vnic_index = snat['vnic_index']
if vnic_index or not indices:
# we are adding a predefined index
# or adding to all interfaces
nat_rules.append(self._assemble_nat_rule(
'snat', snat['src'], snat['translated'],
vnic_index=vnic_index
))
else:
for index in indices:
nat_rules.append(self._assemble_nat_rule(
'snat', snat['src'], snat['translated'],
vnic_index=index
))
nat = {
'featureType': 'nat',
'rules': {
'natRulesDtos': nat_rules
}
}
try:
self.vcns.update_nat_config(edge_id, nat)
return True
except exceptions.VcnsApiException as e:
LOG.exception("VCNS: Failed to create snat rule:\n%s",
e.response)
return False
def update_routes(self, edge_id, gateway, routes):
if gateway:
gateway = gateway.split('/')[0]
static_routes = []
for route in routes:
if route.get('vnic_index') is None:
static_routes.append({
"description": "",
"vnic": constants.INTERNAL_VNIC_INDEX,
"network": route['cidr'],
"nextHop": route['nexthop']
})
else:
static_routes.append({
"description": "",
"vnic": route['vnic_index'],
"network": route['cidr'],
"nextHop": route['nexthop']
})
request = {
"staticRoutes": {
"staticRoutes": static_routes
}
}
if gateway:
request["defaultRoute"] = {
"description": "default-gateway",
"gatewayAddress": gateway
}
try:
self.vcns.update_routes(edge_id, request)
return True
except exceptions.VcnsApiException as e:
LOG.exception("VCNS: Failed to update routes:\n%s",
e.response)
return False
def create_lswitch(self, name, tz_config, tags=None,
port_isolation=False, replication_mode="service"):
lsconfig = {
'display_name': utils.check_and_truncate(name),
"tags": tags or [],
"type": "LogicalSwitchConfig",
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
"transport_zones": tz_config
}
if port_isolation is bool:
lsconfig["port_isolation_enabled"] = port_isolation
if replication_mode:
lsconfig["replication_mode"] = replication_mode
response = self.vcns.create_lswitch(lsconfig)[1]
return response
def delete_lswitch(self, lswitch_id):
self.vcns.delete_lswitch(lswitch_id)
def get_loadbalancer_config(self, edge_id):
try:
header, response = self.vcns.get_loadbalancer_config(
edge_id)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to get service config")
return response
def enable_service_loadbalancer(self, edge_id):
config = self.get_loadbalancer_config(
edge_id)
if not config['enabled']:
config['enabled'] = True
try:
self.vcns.enable_service_loadbalancer(edge_id, config)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to enable loadbalancer "
"service config")
def _delete_port_group(self, task):
try:
self.vcns.delete_port_group(
task.userdata['dvs_id'],
task.userdata['port_group_id'])
except Exception as e:
LOG.error('Unable to delete %(pg)s exception %(ex)s',
{'pg': task.userdata['port_group_id'],
'ex': e})
return task_constants.TaskStatus.ERROR
return task_constants.TaskStatus.COMPLETED
def _retry_task(self, task):
delay = 0.5
max_retries = max(cfg.CONF.nsxv.retries, 1)
args = task.userdata.get('args', [])
kwargs = task.userdata.get('kwargs', {})
retry_number = task.userdata['retry_number']
retry_command = task.userdata['retry_command']
try:
retry_command(*args, **kwargs)
except Exception as exc:
LOG.debug("Task %(name)s retry %(retry)s failed %(exc)s",
{'name': task.name,
'exc': exc,
'retry': retry_number})
retry_number += 1
if retry_number > max_retries:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to %s", task.name)
else:
task.userdata['retry_number'] = retry_number
# Sleep twice as long as the previous retry
tts = (2 ** (retry_number - 1)) * delay
time.sleep(min(tts, 60))
return task_constants.TaskStatus.PENDING
LOG.info("Task %(name)s completed.", {'name': task.name})
return task_constants.TaskStatus.COMPLETED
def delete_port_group(self, dvs_id, port_group_id):
task_name = 'delete-port-group-%s-%s' % (port_group_id, dvs_id)
userdata = {'retry_number': 1,
'retry_command': self.vcns.delete_port_group,
'args': [dvs_id, port_group_id]}
task = tasks.Task(task_name, port_group_id,
self._retry_task,
status_callback=self._retry_task,
userdata=userdata)
self.task_manager.add(task)
def delete_virtual_wire(self, vw_id):
task_name = 'delete-virtualwire-%s' % vw_id
userdata = {'retry_number': 1,
'retry_command': self.vcns.delete_virtual_wire,
'args': [vw_id]}
task = tasks.Task(task_name, vw_id,
self._retry_task,
status_callback=self._retry_task,
userdata=userdata)
self.task_manager.add(task)
def create_bridge(self, device_name, bridge):
try:
self.vcns.create_bridge(device_name, bridge)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to create bridge in the %s",
device_name)
def delete_bridge(self, device_name):
try:
self.vcns.delete_bridge(device_name)
except exceptions.VcnsApiException:
LOG.exception("Failed to delete bridge in the %s",
device_name)
def update_edge_ha(self, edge_id):
ha_request = {
'featureType': "highavailability_4.0",
'enabled': True}
self.vcns.enable_ha(edge_id, ha_request)
def update_edge_syslog(self, edge_id, syslog_config, router_id):
if 'server_ip' not in syslog_config:
LOG.warning("Server IP missing in syslog config for %s",
router_id)
return
protocol = syslog_config.get('protocol', 'tcp')
if protocol not in ['tcp', 'udp']:
LOG.warning("Invalid protocol in syslog config for %s",
router_id)
return
loglevel = syslog_config.get('log_level')
if loglevel and loglevel not in edge_utils.SUPPORTED_EDGE_LOG_LEVELS:
LOG.warning("Invalid loglevel in syslog config for %s",
router_id)
return
server_ip = syslog_config['server_ip']
request = {'featureType': 'syslog',
'protocol': protocol,
'serverAddresses': {'ipAddress': [server_ip],
'type': 'IpAddressesDto'}}
# edge allows up to 2 syslog servers
if 'server2_ip' in syslog_config:
request['serverAddresses']['ipAddress'].append(
syslog_config['server2_ip'])
self.vcns.update_edge_syslog(edge_id, request)
# update log level for routing in separate API call
if loglevel:
edge_utils.update_edge_loglevel(self.vcns, edge_id,
'routing', loglevel)
| 1.507813 | 2 |
devconf/ast/mixins/node.py | everclear72216/ucapi | 0 | 12773647 | <filename>devconf/ast/mixins/node.py<gh_stars>0
class Node(object):
def __init__(self):
super().__init__()
self.__filename = ''
self.__children = []
self.__line_number = 0
self.__column_number = 0
def get_children(self) -> list:
return self.__children
def add_child(self, child: 'Node') -> None:
assert isinstance(child, Node)
self.__children.append(child)
def get_line_number(self) -> int:
return self.__line_number
def set_line_number(self, n: int) -> None:
self.__line_number = int(n)
def get_column_number(self) -> int:
return self.__column_number
def set_column_number(self, n: int) -> None:
self.__column_number = int(n)
def get_file_name(self) -> str:
return self.__filename
def set_file_name(self, f: str) -> None:
self.__filename = str(f)
def is_leaf(self) -> bool:
if len(self.__children) == 0:
return True
return False
| 2.75 | 3 |
name/__main__.py | nettle/python-template | 0 | 12773648 | <reponame>nettle/python-template<filename>name/__main__.py
"""
Name main
"""
import launcher
if __name__ == "__main__":
launcher.NameLauncher().run()
| 1.796875 | 2 |
lib.py | LeeBergstrand/HMMER-DB | 1 | 12773649 | #!/usr/bin/env python
"""
Created by: <NAME>
Description: Functions for HMMER-DB.
"""
# Imports & Setup:
import csv
import sys
from Bio import SeqIO
import subprocess
import re
# Regex's
LocusRegex = re.compile("\(Locus:\s\S*\)")
LocationRegex = re.compile("\(Location:\s\[(\S*)\:(\S*)\]\((\S)\)\)")
# ----------------------------------------------------------------------------------------
def extract_sequence_records(organism_file_path, file_type):
"""
Read in sequence files as a sequence record object using Biopython.
:param organism_file_path: The path to the input file.
:return: Biopython sequence record object.
"""
try:
print(">> Opening FASTA file: " + organism_file_path)
handle = open(organism_file_path, "rU")
try:
records = list(SeqIO.parse(handle, file_type))
except ValueError as error:
print("Error has occurred while parsing " + organism_file_path + "!")
print(str(error))
sys.exit(1)
handle.close()
except IOError:
print("Failed to open " + organism_file_path)
sys.exit(1)
return records
# -----------------------------------------------------------------------------------------------------------
def check_extensions(organism_file_path, csv_file_path, hmm_file_paths, sql_file_paths):
"""
Performs file extension checks.
:param organism_file_path: Path to the organism database file.
:param csv_file_path: Path to the organism information database file.
:param hmm_file_paths: Path to the HMM model file.
:param sql_file_paths: Path to the sqlite3 file.
"""
print(">> Performing file extension checks...")
if not organism_file_path.endswith(".faa"):
print("[Warning] " + organism_file_path + " may not be a fasta file!")
if not csv_file_path.endswith(".csv"):
print("[Warning] " + csv_file_path + " may not be a csv file!")
for hmm_path in hmm_file_paths:
if not hmm_path.endswith(".hmm"):
print("[Warning] " + hmm_path + " may not be a HMM file!")
if not sql_file_paths.endswith(".sqlite"):
print("[Warning] " + sql_file_paths + " may not be a sqlite file!")
# ----------------------------------------------------------------------------------------
def generate_fasta_string(sec_record_list):
"""
Creates a FASTA formatted string containing sequences from a list of sequence record objects.
:param sec_record_list: List of Biopython sequence record objects.
:return: String containing FASTA formatted strings.
"""
fasta_string_list = []
for record in sec_record_list:
fasta_string_list.append(record.format("fasta"))
fasta_string = ''.join(fasta_string_list)
return fasta_string
# ----------------------------------------------------------------------------------------
def generate_fasta_dict(sec_record_list):
"""
Creates a dictionary containing FASTA formatted strings from a list of sequence record objects.
This dictionary is keyed by the sequence ID.
:param sec_record_list: List of Biopython sequence record objects.
:return: Dictionary containing FASTA formatted strings.
"""
fasta_string_dict = {}
for record in sec_record_list:
fasta_string_dict[record.id] = record.format("fasta")
return fasta_string_dict
# ----------------------------------------------------------------------------------------
def hmm_search(fasta_string, hmmer_model_path, processes):
"""
Runs HMMER with settings specific for extracting subject sequences.
:param fasta_string: String containing protein sequences in FASTA format.
:param hmmer_model_path: Path to the HMM model to be used as a query.
:return: String containing hmmsearch output.
"""
process = subprocess.Popen(["hmmsearch", "--acc", "--cpu", str(processes), hmmer_model_path, "-"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=1)
# This returns a list with both stderr and stdout. Only return stdout. Fail if error.
stdout, error = process.communicate(fasta_string)
if error:
print(str(error))
sys.exit(1)
else:
return stdout
# ------------------------------------------------------------------------------------------------------------
def get_hit_protein_data(hmm_hit_table, annotation_fasta_dict, organism_accession):
"""
Creates a list of lists which contain protein information.
:param hmm_hit_table: Table of HMM hit objects.
:param annotation_fasta_dict: Dictionary containing FASTA sequences keyed by their IDs
:param organism_accession: The accession of the organism.
:return: A list of lists of hit protein properties.
"""
hit_proteins = []
for hit in hmm_hit_table:
protein_accession = hit.target_protein
protein_fasta = annotation_fasta_dict[protein_accession]
locus = str(LocusRegex.search(protein_fasta).group(0))
locus = locus.split()[1].rstrip(")")
location_data = LocationRegex.search(protein_fasta)
try:
start = int(location_data.group(1))
end = int(location_data.group(2))
strand = location_data.group(3)
protein_data = [protein_accession, organism_accession, locus, start, end, strand, protein_fasta]
hit_proteins.append(protein_data)
except AttributeError as error:
print(hit)
print(protein_fasta)
print(location_data)
print("This is the organism: ", organism_accession)
print("The AttributeError was ", str(error))
sys.exit(1)
return hit_proteins
# -----------------------------------------------------------------------------------------------------------
def extract_csv_dict(input_csv_path):
"""
Opens OrganismDB CSV file for reading and stores as dictionary.
:param input_csv_path: Path to the input OrganismDB CSV file.
:return: Dictionary with each row in the CSV keyed by the organism accession (CSV row one).
"""
organism_data_csv = {}
try:
print(">> Opening organism CSV file: " + input_csv_path)
read_file = open(input_csv_path, "r")
reader = csv.reader(read_file)
for row in reader:
organism_data_csv[row[0].split('.')[0]] = row # Row[0] is the organism accession.
read_file.close()
except IOError:
print("Failed to open " + input_csv_path)
sys.exit(1)
return organism_data_csv
# -----------------------------------------------------------------------------------------------------------
def insert_organism_info(db_cursor, organism_info):
"""
Inserts organism info into DB.
:param db_cursor: Sqlite3 database cursor.
:param organism_info: List containing organism info.
"""
query = '''INSERT OR REPLACE INTO Organisms
(
Organism_Accession,
Accession_Type,
Organism_Description,
Source,
Organism_Phylogeny,
Sequence_Length
)
VALUES
(?,?,?,?,?,?)'''
db_cursor.execute(query, organism_info)
# -----------------------------------------------------------------------------------------------------------
# 8:
def insert_proteins(db_cursor, hit_proteins):
"""
Inserts protein info into DB.
:param db_cursor: Sqlite3 database cursor.
:param hit_proteins: List containing protein info.
"""
query = '''INSERT OR REPLACE INTO Proteins
(
Protein_Accession,
Organism_Accession,
Locus,
Start,
"End",
Strand,
FASTA_Sequence
)
VALUES
(?,?,?,?,?,?,?)'''
for protein in hit_proteins:
db_cursor.execute(query, protein)
# -----------------------------------------------------------------------------------------------------------
def insert_hits(cursor, hmm_hit_list):
"""
Inserts hits into DB and creates md5 hash for primary key.
:param cursor: Sqlite3 database cursor.
:param hmm_hit_list: List of hmm hit objects.
"""
query = '''INSERT OR REPLACE INTO HMM_Hits
(
Hit_HASH,
Protein_Accession,
HMM_Model,
HMM_Score,
HMM_E_Value,
Ali_From,
Ali_To,
HMM_From,
HMM_To,
HMM_Coverage
)
VALUES
(?,?,?,?,?,?,?,?,?,?)'''
for hit in hmm_hit_list:
hit_list = [hit.get_md5(), hit.target_protein, hit.hmm_name, hit.score, hit.e_value, hit.ali_from, hit.ali_to,
hit.hmm_from, hit.hmm_to, hit.hmm_coverage]
cursor.execute(query, hit_list)
| 2.765625 | 3 |
tests/request_parsers/test_api_gateway_request_parser.py | gregchagnon/orm.cloud | 0 | 12773650 | <filename>tests/request_parsers/test_api_gateway_request_parser.py
import unittest
from orm_cloud.request_parsers.api_gateway_request_parser import ApiGatewayRequestParser
json = """
{
"body": "eyJ0ZXN0IjoiYm9keSJ9",
"resource": "/{proxy+}",
"path": "/thing",
"httpMethod": "GET",
"isBase64Encoded": true,
"queryStringParameters": {
"filter": "first_name: Greg",
"sort_by": "-update_timestamp_gmt",
"limit": "20",
"offset": "0"
},
"pathParameters": {
"proxy": "/persons"
},
"stageVariables": {
"baz": "qux"
},
"headers": {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch",
"Accept-Language": "en-US,en;q=0.8",
"Cache-Control": "max-age=0",
"CloudFront-Forwarded-Proto": "https",
"CloudFront-Is-Desktop-Viewer": "true",
"CloudFront-Is-Mobile-Viewer": "false",
"CloudFront-Is-SmartTV-Viewer": "false",
"CloudFront-Is-Tablet-Viewer": "false",
"CloudFront-Viewer-Country": "US",
"Host": "1234567890.execute-api.us-east-1.amazonaws.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Custom User Agent String",
"Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)",
"X-Amz-Cf-Id": "cDehVQoZnx43VYQb9j2-nvCh-9z396Uhbp027Y2JvkCPNLmGJHqlaA==",
"X-Forwarded-For": "127.0.0.1, 127.0.0.2",
"X-Forwarded-Port": "443",
"X-Forwarded-Proto": "https"
},
"requestContext": {
"accountId": "123456789012",
"resourceId": "123456",
"stage": "prod",
"requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef",
"requestTime": "09/Apr/2015:12:34:56 +0000",
"requestTimeEpoch": 1428582896000,
"identity": {
"cognitoIdentityPoolId": null,
"accountId": null,
"cognitoIdentityId": null,
"caller": null,
"accessKey": null,
"sourceIp": "127.0.0.1",
"cognitoAuthenticationType": null,
"cognitoAuthenticationProvider": null,
"userArn": null,
"userAgent": "Custom User Agent String",
"user": null
},
"path": "/prod/thing",
"resourcePath": "/{proxy+}",
"httpMethod": "POST",
"apiId": "1234567890",
"protocol": "HTTP/1.1"
}
}"""
class TestParsing(unittest.TestCase):
def test_required_params(self):
parser = ApiGatewayRequestParser()
response = parser.parse(json)
self.assertEqual(response['path'], '/thing')
self.assertEqual(response['action'], 'GET')
self.assertTrue('filter' in response['query_string_params'])
self.assertTrue('headers' in response)
if __name__ == '__main__':
unittest.main()
| 2.21875 | 2 |