content
stringlengths 5
1.05M
|
|---|
from math import cos, pi, sqrt
INITIAL_X = 0 # meters
INITIAL_Y = 5 # meters
INITIAL_THETA = 0 # degrees
LANDMARK_1_LOCATION = [6, 4]
LANDMARK_2_LOCATION = [-7, 8]
LANDMARK_3_LOCATION = [6, -4]
LANDMARKS = [LANDMARK_1_LOCATION, LANDMARK_2_LOCATION, LANDMARK_3_LOCATION]
STD_DEV_LOCATION_RANGE = 0.1
STD_DEV_LOCATION_BEARING = 0.05
SAMPLE_PERIOD = 0.1 # seconds
TOTAL_TIME = 40 # seconds
ALPHA1 = 0.1
ALPHA2 = 0.01
ALPHA3 = 0.01
ALPHA4 = 0.1
|
from requests_html import HTML
from spells.forum import Spell
from unittest.mock import patch
from unittest.mock import MagicMock
from utils.utils import load_html
from utils.dummy import DummyResponse
class DummyItem:
def __init__(self, html):
self._html = html
@property
def html(self):
return self._html
class DummyLink:
def __init__(self, links):
self._links = links
@property
def absolute_links(self):
return self._links
class TestForum:
def _create_spell(self, options={}):
base = {
'entry': 'http://localhost/index'
}
return Spell({**base, **options})
def test_name(self):
name = Spell.name()
assert type(name) == str
assert len(name) > 0
def test_intro(self):
name = Spell.intro()
assert type(name) == str
assert len(name) > 0
@patch('spells.forum.HTMLSession')
def test_get_items_from_page(self, HTMLSession):
html = HTML(html=load_html('basepage'))
HTMLSession.return_value.get.return_value = DummyResponse(html)
spell = self._create_spell({
'itemListSelector': '#unselect'
})
result = spell._get_items_from_page('test_url')
for item in result:
assert item
@patch('spells.forum.HTMLSession')
def test_get_items_from_page_failed_response(self, HTMLSession):
HTMLSession.return_value.get.return_value = DummyResponse('', 404)
spell = self._create_spell()
result = spell._get_items_from_page('test_url')
assert len(result) == 0
@patch('spells.forum.HTMLSession')
def test_process_item(self, HTMLSession):
html = HTML(html=load_html('basepage'))
HTMLSession.return_value.get.return_value = DummyResponse(html)
spell = self._create_spell({
'titleSelector': '.title',
'dateSelector': '.date',
'contentSelector': '.description'
})
item = DummyLink(['test_link'])
result = spell.process_item(item)
assert result['title'] == 'title'
assert result['pubDate'] == 'date'
assert result['description'] == '<div class="description">description</div>'
assert result['link'] == 'test_link'
assert result['addition'] is None
def test_process_item_duplicated_link(self):
spell = self._create_spell({
})
item = DummyLink(['test_link', 'test_link2'])
result = spell.process_item(item)
assert result == {}
@patch('spells.forum.HTMLSession')
def test_process_item_wrong_selector(self, HTMLSession):
html = HTML(html=load_html('basepage'))
HTMLSession.return_value.get.return_value = DummyResponse(html)
spell = self._create_spell({
'titleSelector': '.wrongtitle',
'dateSelector': '.wrongdate',
'contentSelector': '.wrongdescription'
})
item = DummyLink(['test_link'])
result = spell.process_item(item)
assert result == {}
@patch('spells.forum.HTMLSession')
def test_go(self, HTMLSession):
html = HTML(html=load_html('basepage'))
HTMLSession.return_value.get.return_value = DummyResponse(html)
spell = self._create_spell({
'itemListSelector': '#unselect',
'titleSelector': '.title',
'dateSelector': '.date',
'contentSelector': '.description'
})
result = spell.go()
count = 0
for item in result:
count += 1
assert item
assert count == 1
@patch('spells.forum.HTMLSession')
def test_go_multi_page(self, HTMLSession):
html = HTML(html=load_html('basepage'))
HTMLSession.return_value.get.return_value = DummyResponse(html)
spell = self._create_spell({
'page_param': 'page',
'pages': '5',
'itemListSelector': '#unselect',
'titleSelector': '.title',
'dateSelector': '.date',
'contentSelector': '.description'
})
result = spell.go()
count = 0
for item in result:
count += 1
assert item
assert count == 5
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetCatalogResult',
'AwaitableGetCatalogResult',
'get_catalog',
]
@pulumi.output_type
class GetCatalogResult:
"""
A collection of values returned by getCatalog.
"""
def __init__(__self__, attached_catalog_private_endpoints=None, catalog_id=None, compartment_id=None, defined_tags=None, display_name=None, freeform_tags=None, id=None, lifecycle_details=None, number_of_objects=None, service_api_url=None, service_console_url=None, state=None, time_created=None, time_updated=None):
if attached_catalog_private_endpoints and not isinstance(attached_catalog_private_endpoints, list):
raise TypeError("Expected argument 'attached_catalog_private_endpoints' to be a list")
pulumi.set(__self__, "attached_catalog_private_endpoints", attached_catalog_private_endpoints)
if catalog_id and not isinstance(catalog_id, str):
raise TypeError("Expected argument 'catalog_id' to be a str")
pulumi.set(__self__, "catalog_id", catalog_id)
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if defined_tags and not isinstance(defined_tags, dict):
raise TypeError("Expected argument 'defined_tags' to be a dict")
pulumi.set(__self__, "defined_tags", defined_tags)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if freeform_tags and not isinstance(freeform_tags, dict):
raise TypeError("Expected argument 'freeform_tags' to be a dict")
pulumi.set(__self__, "freeform_tags", freeform_tags)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if lifecycle_details and not isinstance(lifecycle_details, str):
raise TypeError("Expected argument 'lifecycle_details' to be a str")
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
if number_of_objects and not isinstance(number_of_objects, int):
raise TypeError("Expected argument 'number_of_objects' to be a int")
pulumi.set(__self__, "number_of_objects", number_of_objects)
if service_api_url and not isinstance(service_api_url, str):
raise TypeError("Expected argument 'service_api_url' to be a str")
pulumi.set(__self__, "service_api_url", service_api_url)
if service_console_url and not isinstance(service_console_url, str):
raise TypeError("Expected argument 'service_console_url' to be a str")
pulumi.set(__self__, "service_console_url", service_console_url)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if time_updated and not isinstance(time_updated, str):
raise TypeError("Expected argument 'time_updated' to be a str")
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="attachedCatalogPrivateEndpoints")
def attached_catalog_private_endpoints(self) -> Sequence[str]:
"""
The list of private reverse connection endpoints attached to the catalog
"""
return pulumi.get(self, "attached_catalog_private_endpoints")
@property
@pulumi.getter(name="catalogId")
def catalog_id(self) -> str:
return pulumi.get(self, "catalog_id")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
Compartment identifier.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
Data catalog identifier, which can be renamed.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
Unique identifier that is immutable on creation.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
An message describing the current state in more detail. For example, it can be used to provide actionable information for a resource in 'Failed' state.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="numberOfObjects")
def number_of_objects(self) -> int:
"""
The number of data objects added to the data catalog. Please see the data catalog documentation for further information on how this is calculated.
"""
return pulumi.get(self, "number_of_objects")
@property
@pulumi.getter(name="serviceApiUrl")
def service_api_url(self) -> str:
"""
The REST front endpoint URL to the data catalog instance.
"""
return pulumi.get(self, "service_api_url")
@property
@pulumi.getter(name="serviceConsoleUrl")
def service_console_url(self) -> str:
"""
The console front endpoint URL to the data catalog instance.
"""
return pulumi.get(self, "service_console_url")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the data catalog resource.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The time the data catalog was created. An [RFC3339](https://tools.ietf.org/html/rfc3339) formatted datetime string.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The time the data catalog was updated. An [RFC3339](https://tools.ietf.org/html/rfc3339) formatted datetime string.
"""
return pulumi.get(self, "time_updated")
class AwaitableGetCatalogResult(GetCatalogResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCatalogResult(
attached_catalog_private_endpoints=self.attached_catalog_private_endpoints,
catalog_id=self.catalog_id,
compartment_id=self.compartment_id,
defined_tags=self.defined_tags,
display_name=self.display_name,
freeform_tags=self.freeform_tags,
id=self.id,
lifecycle_details=self.lifecycle_details,
number_of_objects=self.number_of_objects,
service_api_url=self.service_api_url,
service_console_url=self.service_console_url,
state=self.state,
time_created=self.time_created,
time_updated=self.time_updated)
def get_catalog(catalog_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCatalogResult:
"""
This data source provides details about a specific Catalog resource in Oracle Cloud Infrastructure Data Catalog service.
Gets a data catalog by identifier.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_catalog = oci.datacatalog.get_catalog(catalog_id=oci_datacatalog_catalog["test_catalog"]["id"])
```
:param str catalog_id: Unique catalog identifier.
"""
__args__ = dict()
__args__['catalogId'] = catalog_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:datacatalog/getCatalog:getCatalog', __args__, opts=opts, typ=GetCatalogResult).value
return AwaitableGetCatalogResult(
attached_catalog_private_endpoints=__ret__.attached_catalog_private_endpoints,
catalog_id=__ret__.catalog_id,
compartment_id=__ret__.compartment_id,
defined_tags=__ret__.defined_tags,
display_name=__ret__.display_name,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
lifecycle_details=__ret__.lifecycle_details,
number_of_objects=__ret__.number_of_objects,
service_api_url=__ret__.service_api_url,
service_console_url=__ret__.service_console_url,
state=__ret__.state,
time_created=__ret__.time_created,
time_updated=__ret__.time_updated)
|
import sys
import os, os.path
import subprocess
import numpy
if __name__ == "__main__":
formula = sys.argv[1]
kind = sys.argv[2]
program = "bsub"
params = ["-n", "4", "-W", "04:00", "-R", "\"rusage[mem=4096]\""]
run_string = "\"mpirun -n 4 gpaw-python run_doping.py {0} {1} {2:.2f}\""
for fermi_shift in numpy.linspace(-1.0, 1.0, 41):
call_cmd = [program] + params + [run_string.format(formula, kind, fermi_shift)]
print(" ".join(call_cmd))
choice = input("Should we proceed? [y/n]")
if choice in ("y", "Y"):
for fermi_shift in numpy.linspace(-1.0, 1.0, 41):
call_cmd = [program] + params + [run_string.format(formula, kind, fermi_shift)]
proc = subprocess.run(" ".join(call_cmd),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
print(proc.stdout.decode("utf8"))
|
from __future__ import division
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.naive_bayes import MultinomialNB
pd.options.mode.chained_assignment = None
from sklearn.metrics import confusion_matrix
from sklearn.cross_validation import cross_val_score, train_test_split
print "Step1...reading data..."
df = pd.read_csv("../app/static/merged_data.csv")
print "Step1...done reading data..."
print "Step2...throwing out floats..."
essay_df = df[['_projectid', 'RESP', 'essay']]
essay_df['new_essay'] = essay_df['essay'].map(lambda x: type(x))
essay_df = essay_df[essay_df.new_essay == str]
print "Step2...done throwing out floats"
print "percent remaining", len(essay_df)/len(df)
print "Step3...decoding data..."
essay_df.new_essay = essay_df['essay'].map(lambda x: x.decode('utf-8'))
print "Step3...done decoding"
print "Step4...transforming data..."
documents = essay_df.new_essay.tolist()
classes = essay_df.RESP.tolist()
print "Step4...done transforming data"
print "Step5...vectorizing data..."
vectorizer = CountVectorizer(stop_words='english', ngram_range=(1,2))
doc_vectors = vectorizer.fit_transform(documents)
print "Step5...done vectorizing data...fitting model..." \
""
model = MultinomialNB().fit(doc_vectors, classes)
print "done fitting model"
precision = np.mean(cross_val_score(model, doc_vectors, classes, scoring='precision'))
cm = confusion_matrix(classes, model.predict(doc_vectors))
print "Precision", precision
print "Percentage off", cm[0][1]/(cm[0][0]+cm[0][1])
print cm
|
from typing import Iterable, Iterator, Sequence, List, Mapping, Dict
def f1() -> Iterable[int]:
pass
def f2() -> Iterator[int]:
pass
def f3() -> Sequence[int]:
pass
def f4() -> List[int]:
pass
def f5() -> Mapping[str, int]:
pass
def f6() -> Dict[str, int]:
pass
for x in f1():
pass
for x in f2():
pass
for x in f3():
pass
for x in f4():
pass
for x in f5():
pass
for x in f6():
pass
|
import pandas as pd
from .medscore import UserMacros
from .models import Food
from .views import FOOD_BEV, NUTR
CAL_FROM_G_PRO = 4
CAL_FROM_G_CHO = 4
CAL_FROM_G_FAT = 9
def getFoodMacros(foods):
## Get the foods that the user ate for the day
serve_field = Food._meta.get_field('servings')
name_field = Food._meta.get_field('food_text')
macros = UserMacros()
for food in foods:
servings = serve_field.value_from_object(food)
food_name = name_field.value_from_object(food)
db_food = FOOD_BEV.loc[FOOD_BEV['main_food_description'] == food_name]
food_code = db_food['food_code'].iloc[0]
food_item = NUTR.loc[NUTR['food_code'] == food_code]
macros.calories += float(food_item['energy_kcal']) * float(servings)
macros.vitD += float(food_item['vitamin_d_d2_+_d3_mcg']) * float(food.servings)
macros.vitE += float(food_item['vitamin_e_alpha-tocopherol_mg']) * float(food.servings)
macros.vitA += float(food_item['vitamin_a,_rae_mcg_rae']) * float(food.servings)
macros.vitK += float(food_item['vitamin_k_phylloquinone_mcg']) * float(food.servings)
macros.vitB12 += float(food_item['vitamin_b-12_mcg']) * float(food.servings)
macros.folate += float(food_item['folate,_total_mcg']) * float(food.servings)
# convert grams of nutrient consumed to calories obtained from that nutrient
macros.energyCHO += float(food_item['carbohydrate_g']) * CAL_FROM_G_CHO * float(food.servings)
macros.energyPro += float(food_item['protein_g']) * CAL_FROM_G_PRO * float(food.servings)
macros.energyFat += float(food_item['total_fat_g']) * CAL_FROM_G_FAT * float(food.servings)
macros.energyCHO = (macros.energyCHO / macros.calories) * 100
macros.energyPro = (macros.energyPro / macros.calories) * 100
macros.energyFat = (macros.energyFat / macros.calories) * 100
return macros
|
from dark_chess_app import login
from flask_login import UserMixin
# Note that this is a temporary solution to storing user sessions.
# it has the distinct disadvatage that it cannot be shared by
# multiple processes/servers. Sooner rather than later this should
# probably be refactored to make use of redis or something.
users = {}
@login.user_loader
def load_user(token):
return users[token] if token in users else None
class User(UserMixin):
def __init__(self, token, user_data):
# update the session manager
self.token = token
users[token] = self
# Cache user data
self.id = user_data['id']
self.username = user_data['username']
self.email_confirmed = user_data['email_confirmed']
self.registration_timestamp = user_data['registration_date']['timestamp']
self.friends = user_data['friends']
def get_id(self):
return self.token
def as_dict(self):
return {
'id' : self.id,
'username' : self.username,
}
|
from .phat import InkyPHAT, InkyPHAT_SSD1608 # noqa: F401
from .what import InkyWHAT # noqa: F401
from . import eeprom
def auto(i2c_bus=None):
_eeprom = eeprom.read_eeprom(i2c_bus=i2c_bus)
if _eeprom is None:
raise RuntimeError("No EEPROM detected! You must manually initialise your Inky board.")
"""
The EEPROM is used to disambiguate the variants of wHAT and pHAT
1 Red pHAT (High-Temp)
2 Yellow wHAT (1_E)
3 Black wHAT (1_E)
4 Black pHAT (Normal)
5 Yellow pHAT (DEP0213YNS75AFICP)
6 Red wHAT (Regular)
7 Red wHAT (High-Temp)
8 Red wHAT (DEPG0420RWS19AF0HP)
10 BW pHAT (ssd1608) (DEPG0213BNS800F13CP)
11 Red pHAT (ssd1608)
12 Yellow pHAT (ssd1608)
"""
if _eeprom.display_variant in (1, 4, 5):
return InkyPHAT(_eeprom.get_color())
if _eeprom.display_variant in (10, 11, 12):
return InkyPHAT_SSD1608(_eeprom.get_color())
return InkyWHAT(_eeprom.get_color())
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from clock.exports.views import ExportMonth, ExportMonthAPI, ExportContractMonthAPI, ExportNuke
urlpatterns = [
# Export URLs
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]+)/contract/(?P<pk>\d+)/$', ExportMonth.as_view(month_format='%m'),
name="contract"
),
url(r'^api/(?P<year>[0-9]{4})/(?P<month>[0-9]+)/$', ExportMonthAPI.as_view(month_format='%m'),
name="api_all"
),
# ListView for all shifts of a contract in a month
url(r'^api/(?P<year>[0-9]{4})/(?P<month>[0-9]+)/contract/(?P<pk>\d+)/$',
ExportContractMonthAPI.as_view(month_format='%m'),
name='api_contract'),
url(r'^(?P<year>[0-9]{4})/(?P<month>[0-9]+)/contract/(?P<pk>\d+)/hours/(?P<hours>\d+)/nuke/$',
ExportNuke.as_view(month_format='%m'),
name="contract_nuke"
),
]
|
import spotipy
from pprint import pprint
def main():
spotify = spotipy.Spotify(auth_manager=spotipy.SpotifyOAuth())
me = spotify.me()
pprint(me)
if __name__ == "__main__":
main()
|
from datasets import load_dataset, load_metric
import numpy as np
dataset = load_dataset("glue", "sst2")
datasetTrain = np.asarray(dataset["train"])
print(type(datasetTrain))
# print(datasetTrain)
import data_utils
import torch
from torch.utils.data import Dataset
print(type(Dataset))
print(Dataset)
class SST2Dataset(Dataset):
"""
A torch.utils.data.Dataset wrapper for the BoolQ dataset.
"""
def __init__(self, dataframe, tokenizer):
"""
Args:
dataframe: A Pandas dataframe containing the data.
tokenizer: A transformers.PreTrainedTokenizerFast object that is used to
tokenize the data.
max_seq_length: Maximum sequence length to either pad or truncate every
input example to.
"""
## TODO: Use encode_data() from data_utils to store the input IDs and
## attention masks for the data.
#HERE ENCODED DATA WILL BE A TUPLE
#self.encoded_data = data_utils.encode_data(dataframe, tokenizer)
## TODO: Use extract_labels() from data_utils to store the labels.
#self.label_list = data_utils.extract_labels(dataframe)
# print("The encoded data is: ")
# print(self.encoded_data)
def __len__(self):
#return len(self.label_list)
pass
def __getitem__(self, i):
"""
Returns:
example: A dictionary containing the input_ids, attention_mask, and
label for the i-th example, with the values being numeric tensors
and the keys being 'input_ids', 'attention_mask', and 'labels'.
"""
## TODO: Return the i-th example as a dictionary with the keys and values
## specified in the function docstring. You should be able to extract the
## necessary values from self.encoded_data and self.label_list.
#So a dictionary of element three
pass
# infoDICT = {
# "input_ids" : self.encoded_data[0][i],
# "attention_mask" : self.encoded_data[1][i],
# "labels" : self.label_list[i]
# }
# #print(infoDICT)
# return infoDICT
|
from os import kill
import numpy as np
from void_mesh import *
from functions import *
d1 = 1
d2 = 1
p = 6
m = 6
R = 0.2
element_type = 'D2QU4N'
defValue = 0.1 #Deformation Value
NL, EL = void_mesh(d1, d2, p, m, R, element_type)
BC_flag = 'extension'
ENL, DOFs, DOCs = assign_BCs(NL, BC_flag, defValue)
K = assemble_stiffness(ENL, EL, NL)
Fp = assemble_forces(ENL, NL)
Up = assemble_displacements(ENL, NL)
K_UU = K[0 : DOFs, 0 : DOFs]
K_UP = K[0 : DOFs, DOFs : DOCs + DOFs]
K_PU = K[DOFs : DOCs + DOFs, 0 : DOFs]
K_PP = K[DOFs : DOCs + DOFs, DOFs : DOCs + DOFs]
F = Fp - (K_UP @ Up)
Uu = np.linalg.solve(K_UU, F)
Fu = (K_PU @ Uu) + (K_PP @ Up)
ENL = update_nodes(ENL, Uu, Fu, NL)
print(K[1:6, 1:6])
|
from mltoolkit.mldp.steps.preprocessors import BasePreProcessor
from mltoolkit.mlutils.helpers.paths_and_files import get_file_paths
from numpy import random
class GroupFileShuffler(BasePreProcessor):
"""Reads the paths of group CSV files, shuffles them, and passes along."""
def __init__(self, **kwargs):
super(GroupFileShuffler, self).__init__(**kwargs)
def __call__(self, data_path, **kwargs):
file_paths = []
if not isinstance(data_path, list):
data_path = [data_path]
for _data_path in data_path:
file_paths += get_file_paths(_data_path)
random.shuffle(file_paths)
new_data_source = kwargs
new_data_source['data_path'] = file_paths
return new_data_source
|
#!/usr/bin/env python3
import aiohttp
import logging
from urllib.parse import quote
class Session:
def __init__(self, url_builder, headers, user = None, password = None, verify_ssl_certs = True):
self.logger = logging.getLogger(__name__)
self.url_builder = url_builder
self.headers = headers
self.basic_auth_credentials = None if (user is None or password is None) else aiohttp.BasicAuth(login = user, password = password)
self.verify_ssl_certs = verify_ssl_certs
async def __aenter__(self):
tcp_connector = None if self.verify_ssl_certs else aiohttp.TCPConnector(verify_ssl = False)
self.session = aiohttp.ClientSession(auth = self.basic_auth_credentials, headers = self.headers, connector = tcp_connector)
return self
async def __aexit__(self, exc_type, exc, tb):
await self.session.close()
async def get_resource_at_once(self, resource):
relative_url = self.url_builder.relative_url_from_resource(resource)
absolute_url = self.url_builder.absolute_url_from_relative(relative_url)
quoted_url = quote(absolute_url, safe = "%/:=&?~#+!$,;@()*[]")
self.logger.debug('Getting resource at URL: {:s}'.format(absolute_url))
async with self.session.get(quoted_url) as response:
return await response.content.read()
|
"""server"""
# -*- coding:utf-8 -*-
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-05 15:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import summer.validators
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, unique=True)),
('country', models.CharField(blank=True, max_length=100)),
('iata_code', models.CharField(max_length=3, unique=True)),
('timezone', models.CharField(max_length=50, validators=(summer.validators.timezone_exists_validator,))),
('has_dst', models.BooleanField(editable=False)),
('offset_to_utc', models.IntegerField(editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name_plural': 'cities',
'ordering': ('offset_to_utc', 'name'),
},
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cities', models.ManyToManyField(to='summer.City')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import json
import yaml
from .batch_cli_utils import get_batch_if_exists
def init_parser(parser):
parser.add_argument('batch_id', type=int, help="ID number of the desired batch")
parser.add_argument('-o', type=str, default='yaml', help="Specify output format",
choices=["yaml", "json"])
def main(args, pass_through_args, client):
maybe_batch = get_batch_if_exists(client, args.batch_id)
if maybe_batch is None:
print(f"Batch with id {args.batch_id} not found")
exit(1)
batch = maybe_batch
formatter = None
if args.o == "json":
formatter = lambda s: json.dumps(s, indent=2)
elif args.o == "yaml":
formatter = yaml.dump
print(formatter(batch.status()))
|
""" Library for predicates during queries. """
import abc
import db.rtype as rtype
# General superclass for all predicates
class Predicate(rtype.RType):
@abc.abstractmethod
def before_query(self):
"""
Function to run at the beginning of a query chain.
Useful for aggregations that have things to reset, e.g. counters.
"""
pass
def default(self):
""" Predicates by default return False. """
return False
@abc.abstractmethod
def on_record(self, record):
"""
Function to run on the given record.
Args:
record (rrecord.Record): the record to process according to the
predicate
Returns a truthy value if the record is to be included (satisfies the
predicate), or a falsy value if not.
"""
pass
# Concrete instances of predicates
class Where(Predicate):
def __init__(self, func):
"""
Uses func to evaluate each record to determine satisfaction.
In short, acts like the filter class.
Useful for examples like Where(lambda record: record['key'] % 2) for
filtering results.
Args:
func (callable): the predicate function.
"""
self._func = func
def before_query(self): pass
def on_record(self, record):
return self._func(record)
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import logging
import multiprocessing
import os
import unittest
import warnings
import numpy as np
from monty.serialization import loadfn
from pymatgen import SETTINGS
from pymatgen.analysis.pourbaix_diagram import (
IonEntry,
MultiEntry,
PourbaixDiagram,
PourbaixEntry,
PourbaixPlotter,
)
from pymatgen.core.ion import Ion
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.util.testing import PymatgenTest
logger = logging.getLogger(__name__)
class PourbaixEntryTest(unittest.TestCase):
_multiprocess_shared_ = True
"""
Test all functions using a fictitious entry
"""
def setUp(self):
# comp = Composition("Mn2O3")
self.solentry = ComputedEntry("Mn2O3", 49)
ion = Ion.from_formula("MnO4-")
self.ionentry = IonEntry(ion, 25)
self.PxIon = PourbaixEntry(self.ionentry)
self.PxSol = PourbaixEntry(self.solentry)
self.PxIon.concentration = 1e-4
def test_pourbaix_entry(self):
self.assertEqual(self.PxIon.entry.energy, 25, "Wrong Energy!")
self.assertEqual(self.PxIon.entry.name, "MnO4[-]", "Wrong Entry!")
self.assertEqual(self.PxSol.entry.energy, 49, "Wrong Energy!")
self.assertEqual(self.PxSol.entry.name, "Mn2O3", "Wrong Entry!")
# self.assertEqual(self.PxIon.energy, 25, "Wrong Energy!")
# self.assertEqual(self.PxSol.energy, 49, "Wrong Energy!")
self.assertEqual(self.PxIon.concentration, 1e-4, "Wrong concentration!")
def test_calc_coeff_terms(self):
self.assertEqual(self.PxIon.npH, -8, "Wrong npH!")
self.assertEqual(self.PxIon.nPhi, -7, "Wrong nPhi!")
self.assertEqual(self.PxIon.nH2O, 4, "Wrong nH2O!")
self.assertEqual(self.PxSol.npH, -6, "Wrong npH!")
self.assertEqual(self.PxSol.nPhi, -6, "Wrong nPhi!")
self.assertEqual(self.PxSol.nH2O, 3, "Wrong nH2O!")
def test_to_from_dict(self):
d = self.PxIon.as_dict()
ion_entry = self.PxIon.from_dict(d)
self.assertEqual(ion_entry.entry.name, "MnO4[-]", "Wrong Entry!")
d = self.PxSol.as_dict()
sol_entry = self.PxSol.from_dict(d)
self.assertEqual(sol_entry.name, "Mn2O3(s)", "Wrong Entry!")
self.assertEqual(
sol_entry.energy,
self.PxSol.energy,
"as_dict and from_dict energies unequal",
)
def test_energy_functions(self):
# TODO: test these for values
self.PxSol.energy_at_conditions(10, 0)
self.PxSol.energy_at_conditions(np.array([1, 2, 3]), 0)
self.PxSol.energy_at_conditions(10, np.array([1, 2, 3]))
self.PxSol.energy_at_conditions(np.array([1, 2, 3]), np.array([1, 2, 3]))
def test_multi_entry(self):
# TODO: More robust multientry test
m_entry = MultiEntry([self.PxSol, self.PxIon])
for attr in ["energy", "composition", "nPhi"]:
self.assertEqual(
getattr(m_entry, attr),
getattr(self.PxSol, attr) + getattr(self.PxIon, attr),
)
# As dict, from dict
m_entry_dict = m_entry.as_dict()
m_entry_new = MultiEntry.from_dict(m_entry_dict)
self.assertEqual(m_entry_new.energy, m_entry.energy)
def test_get_elt_fraction(self):
entry = ComputedEntry("Mn2Fe3O3", 49)
pbentry = PourbaixEntry(entry)
self.assertAlmostEqual(pbentry.get_element_fraction("Fe"), 0.6)
self.assertAlmostEqual(pbentry.get_element_fraction("Mn"), 0.4)
class PourbaixDiagramTest(unittest.TestCase):
_multiprocess_shared_ = True
@classmethod
def setUpClass(cls):
cls.test_data = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "pourbaix_test_data.json"))
cls.pbx = PourbaixDiagram(cls.test_data["Zn"], filter_solids=True)
cls.pbx_nofilter = PourbaixDiagram(cls.test_data["Zn"], filter_solids=False)
def test_pourbaix_diagram(self):
self.assertEqual(
set([e.name for e in self.pbx.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"},
"List of stable entries does not match",
)
self.assertEqual(
set([e.name for e in self.pbx_nofilter.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)", "ZnO2(s)", "ZnH(s)"},
"List of stable entries for unfiltered pbx does not match",
)
pbx_lowconc = PourbaixDiagram(
self.test_data["Zn"], conc_dict={"Zn": 1e-8}, filter_solids=True
)
self.assertEqual(
set([e.name for e in pbx_lowconc.stable_entries]),
{"Zn(HO)2(aq)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"},
)
def test_properties(self):
self.assertEqual(len(self.pbx.unstable_entries), 2)
def test_multicomponent(self):
# Assure no ions get filtered at high concentration
ag_n = [e for e in self.test_data["Ag-Te-N"] if "Te" not in e.composition]
highconc = PourbaixDiagram(
ag_n, filter_solids=True, conc_dict={"Ag": 1e-5, "N": 1}
)
entry_sets = [set(e.entry_id) for e in highconc.stable_entries]
self.assertIn({"mp-124", "ion-17"}, entry_sets)
# Binary system
pd_binary = PourbaixDiagram(
self.test_data["Ag-Te"],
filter_solids=True,
comp_dict={"Ag": 0.5, "Te": 0.5},
conc_dict={"Ag": 1e-8, "Te": 1e-8},
)
self.assertEqual(len(pd_binary.stable_entries), 30)
test_entry = pd_binary.find_stable_entry(8, 2)
self.assertTrue("mp-499" in test_entry.entry_id)
# Find a specific multientry to test
self.assertEqual(pd_binary.get_decomposition_energy(test_entry, 8, 2), 0)
pd_ternary = PourbaixDiagram(self.test_data["Ag-Te-N"], filter_solids=True)
self.assertEqual(len(pd_ternary.stable_entries), 49)
# Fetch a solid entry and a ground state entry mixture
ag_te_n = self.test_data["Ag-Te-N"][-1]
ground_state_ag_with_ions = MultiEntry(
[self.test_data["Ag-Te-N"][i] for i in [4, 18, 30]],
weights=[1 / 3, 1 / 3, 1 / 3],
)
self.assertAlmostEqual(
pd_ternary.get_decomposition_energy(ag_te_n, 2, -1), 2.767822855765
)
self.assertAlmostEqual(
pd_ternary.get_decomposition_energy(ag_te_n, 10, -2), 3.756840056890625
)
self.assertAlmostEqual(
pd_ternary.get_decomposition_energy(ground_state_ag_with_ions, 2, -1), 0
)
# Test invocation of pourbaix diagram from ternary data
new_ternary = PourbaixDiagram(pd_ternary.all_entries)
self.assertEqual(len(new_ternary.stable_entries), 49)
self.assertAlmostEqual(
new_ternary.get_decomposition_energy(ag_te_n, 2, -1), 2.767822855765
)
self.assertAlmostEqual(
new_ternary.get_decomposition_energy(ag_te_n, 10, -2), 3.756840056890625
)
self.assertAlmostEqual(
new_ternary.get_decomposition_energy(ground_state_ag_with_ions, 2, -1), 0
)
def test_get_pourbaix_domains(self):
domains = PourbaixDiagram.get_pourbaix_domains(self.test_data["Zn"])
self.assertEqual(len(domains[0]), 7)
def test_get_decomposition(self):
# Test a stable entry to ensure that it's zero in the stable region
entry = self.test_data["Zn"][12] # Should correspond to mp-2133
self.assertAlmostEqual(
self.pbx.get_decomposition_energy(entry, 10, 1),
0.0,
5,
"Decomposition energy of ZnO is not 0.",
)
# Test an unstable entry to ensure that it's never zero
entry = self.test_data["Zn"][11]
ph, v = np.meshgrid(np.linspace(0, 14), np.linspace(-2, 4))
result = self.pbx_nofilter.get_decomposition_energy(entry, ph, v)
self.assertTrue(
(result >= 0).all(), "Unstable energy has hull energy of 0 or less"
)
# Test an unstable hydride to ensure HER correction works
self.assertAlmostEqual(
self.pbx.get_decomposition_energy(entry, -3, -2), 3.6979147983333
)
# Test a list of pHs
self.pbx.get_decomposition_energy(entry, np.linspace(0, 2, 5), 2)
# Test a list of Vs
self.pbx.get_decomposition_energy(entry, 4, np.linspace(-3, 3, 10))
# Test a set of matching arrays
ph, v = np.meshgrid(np.linspace(0, 14), np.linspace(-3, 3))
self.pbx.get_decomposition_energy(entry, ph, v)
def test_get_stable_entry(self):
entry = self.pbx.get_stable_entry(0, 0)
self.assertEqual(entry.entry_id, "ion-0")
def test_multielement_parallel(self):
# Simple test to ensure that multiprocessing is working
test_entries = self.test_data["Ag-Te-N"]
nproc = multiprocessing.cpu_count()
pbx = PourbaixDiagram(test_entries, filter_solids=True, nproc=nproc)
self.assertEqual(len(pbx.stable_entries), 49)
def test_solid_filter(self):
entries = self.test_data["Zn"]
pbx = PourbaixDiagram(entries, filter_solids=False)
oxidized_phase = pbx.find_stable_entry(10, 2)
self.assertEqual(oxidized_phase.name, "ZnO2(s)")
entries = self.test_data["Zn"]
pbx = PourbaixDiagram(entries, filter_solids=True)
oxidized_phase = pbx.find_stable_entry(10, 2)
self.assertEqual(oxidized_phase.name, "ZnO(s)")
def test_serialization(self):
d = self.pbx.as_dict()
new = PourbaixDiagram.from_dict(d)
self.assertEqual(
set([e.name for e in new.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"},
"List of stable entries does not match",
)
# Test with unprocessed entries included, this should result in the
# previously filtered entries being included
d = self.pbx.as_dict(include_unprocessed_entries=True)
new = PourbaixDiagram.from_dict(d)
self.assertEqual(
set([e.name for e in new.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)", "ZnO2(s)", "ZnH(s)"},
"List of stable entries for unfiltered pbx does not match",
)
pd_binary = PourbaixDiagram(
self.test_data["Ag-Te"],
filter_solids=True,
comp_dict={"Ag": 0.5, "Te": 0.5},
conc_dict={"Ag": 1e-8, "Te": 1e-8},
)
new_binary = PourbaixDiagram.from_dict(pd_binary.as_dict())
self.assertEqual(len(pd_binary.stable_entries), len(new_binary.stable_entries))
# The two tests below rely on the MP Rest interface.
@unittest.skipIf(
not SETTINGS.get("PMG_MAPI_KEY"), "PMG_MAPI_KEY environment variable not set."
)
def test_heavy(self):
from pymatgen import MPRester
mpr = MPRester()
entries = mpr.get_pourbaix_entries(["Li", "Mg", "Sn", "Pd"])
pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)
entries = mpr.get_pourbaix_entries(["Ba", "Ca", "V", "Cu", "F"])
pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)
entries = mpr.get_pourbaix_entries(["Ba", "Ca", "V", "Cu", "F", "Fe"])
pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)
entries = mpr.get_pourbaix_entries(["Na", "Ca", "Nd", "Y", "Ho", "F"])
pbx = PourbaixDiagram(entries, nproc=4, filter_solids=False)
@unittest.skipIf(
not SETTINGS.get("PMG_MAPI_KEY"), "PMG_MAPI_KEY environment variable not set."
)
def test_mpr_pipeline(self):
from pymatgen import MPRester
mpr = MPRester()
data = mpr.get_pourbaix_entries(["Zn"])
pbx = PourbaixDiagram(data, filter_solids=True, conc_dict={"Zn": 1e-8})
pbx.find_stable_entry(10, 0)
data = mpr.get_pourbaix_entries(["Ag", "Te"])
pbx = PourbaixDiagram(
data, filter_solids=True, conc_dict={"Ag": 1e-8, "Te": 1e-8}
)
self.assertEqual(len(pbx.stable_entries), 30)
test_entry = pbx.find_stable_entry(8, 2)
self.assertAlmostEqual(test_entry.energy, 2.3894017960000009, 1)
# Test custom ions
entries = mpr.get_pourbaix_entries(["Sn", "C", "Na"])
ion = IonEntry(Ion.from_formula("NaO28H80Sn12C24+"), -161.676)
custom_ion_entry = PourbaixEntry(ion, entry_id="my_ion")
pbx = PourbaixDiagram(
entries + [custom_ion_entry],
filter_solids=True,
comp_dict={"Na": 1, "Sn": 12, "C": 24},
)
self.assertAlmostEqual(
pbx.get_decomposition_energy(custom_ion_entry, 5, 2), 2.1209002582, 1
)
# Test against ion sets with multiple equivalent ions (Bi-V regression)
entries = mpr.get_pourbaix_entries(["Bi", "V"])
pbx = PourbaixDiagram(
entries, filter_solids=True, conc_dict={"Bi": 1e-8, "V": 1e-8}
)
self.assertTrue(
all(
[
"Bi" in entry.composition and "V" in entry.composition
for entry in pbx.all_entries
]
)
)
class PourbaixPlotterTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
self.test_data = loadfn(os.path.join(PymatgenTest.TEST_FILES_DIR, "pourbaix_test_data.json"))
self.pd = PourbaixDiagram(self.test_data["Zn"])
self.plotter = PourbaixPlotter(self.pd)
def tearDown(self):
warnings.simplefilter("default")
def test_plot_pourbaix(self):
plotter = PourbaixPlotter(self.pd)
# Default limits
plotter.get_pourbaix_plot()
# Non-standard limits
plotter.get_pourbaix_plot(limits=[[-5, 4], [-2, 2]])
def test_plot_entry_stability(self):
entry = self.pd.all_entries[0]
self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-3, 3]])
# binary system
pd_binary = PourbaixDiagram(
self.test_data["Ag-Te"], comp_dict={"Ag": 0.5, "Te": 0.5}
)
binary_plotter = PourbaixPlotter(pd_binary)
plt = binary_plotter.plot_entry_stability(self.test_data["Ag-Te"][53])
plt.close()
if __name__ == "__main__":
unittest.main()
|
########################################################################################################
## pyFAST - Fingerprint and Similarity Thresholding in python
##
## Karianne Bergen
## 11/14/2016
##
## (see Yoon et. al. 2015, Sci. Adv. for algorithm details)
##
########################################################################################################
##
## Feature Extraction (Fingerprinting)
##
########################################################################################################
import numpy as np
import pywt as wt
from sklearn.preprocessing import normalize
from scipy.signal import spectrogram
from scipy.misc import imresize
def init_feature_extractor(params, ntimes):
feats = FeatureExtractor(sampling_rate=params['fingerprint']['sampling_rate'],
window_length=params['fingerprint']['spec_length'],
window_lag=params['fingerprint']['spec_lag'],
fingerprint_length=params['fingerprint']['fp_length'],
fingerprint_lag=params['fingerprint']['fp_lag'],
min_freq=params['fingerprint']["min_freq"],
max_freq=params['fingerprint']["max_freq"],
nfreq=params['fingerprint']['nfreq'],
ntimes=ntimes)
return feats
class FeatureExtractor(object):
def __init__(self, sampling_rate, window_length, window_lag, fingerprint_length, fingerprint_lag,
min_freq = 0, max_freq = None, nfreq = 32, ntimes = 64):
self.sampling_rate = sampling_rate #/ sampling rate
self.window_len = window_length #/ length of window (seconds) used in spectrogram
self.window_lag = window_lag #/ window lag (seconds) used in spectrogram
self.fp_len = fingerprint_length #/ width of fingerprint (samples)
self.fp_lag = fingerprint_lag #/ lag between fingerprints (samples)
self.max_freq = self._initialize_frequencies(max_freq) #/ minimum and maximum frequencies for bandpass filter
self.min_freq = min_freq
self.new_d1 = int(nfreq) #/ number of frequency / time bins in fingerprints (must be power of 2) - TODO: error checking
self.new_d2 = int(ntimes)
self.d1 = None #/ dimension of spectral images prior to resizing
self.d2 = None
self.haar_means = None
self.haar_stddevs = None
self.haar_medians = None
self.haar_absdevs = None
def _initialize_frequencies(self, max_freq): #/ initializes data structure
if max_freq is None:
max_freq = self.sampling_rate/2.0
return max_freq
def update(self, field, value):
if hasattr(self, field):
setattr(self, field, value)
else:
print('WARNING: object has no attribute: ' + field)
print('object has the following attributes:' + self.__dict__.keys())
return
def get_params(self):
mdict = dict()
for k in self.__dict__.keys():
if k not in ['haar_means','haar_stddevs','haar_absdevs','haar_medians']:
mdict[k] = self.__dict__[k]
return mdict
#/ returns indicies for overlapping windows
def get_window_params(self, N, L, dL):
idx0 = np.asarray(range(0, N+1, dL))
idx2 = np.asarray(range(L,N+1,dL))
nWindows = len(idx2)
idx1 = idx0[0:nWindows]
return nWindows, idx1, idx2
########################################################################
## FOR COMPUTING FINGERPRINTS ##
########################################################################
#/ computes spectrogram from continous timeseries data
def data_to_spectrogram(self, x_data, window_type = 'hanning'):
f, t, Sxx = spectrogram(x_data, fs=self.sampling_rate,
window=window_type, nperseg=int(self.sampling_rate*self.window_len),
noverlap = int(self.sampling_rate*(self.window_len - self.window_lag)))
# Truncate spectrogram, keep only passband frequencies
if self.min_freq > 0:
fidx_keep = (f >= self.min_freq)
Sxx = Sxx[fidx_keep, :]
f = f[fidx_keep]
if self.max_freq < f[-1]:
fidx_keep = (f <= self.max_freq)
Sxx = Sxx[fidx_keep, :]
f = f[fidx_keep]
self.frequencies = f
self.times = t
return f, t, Sxx
#/ breaks spectrogram into overlapping spectral images
def spectrogram_to_spectral_images(self, Sxx):
nFreq, nTimes = np.shape(Sxx)
nWindows, idx1, idx2 = self.get_window_params(nTimes, self.fp_len, self.fp_lag)
spectral_images = np.zeros([nWindows, nFreq, self.fp_len])
for i in range(nWindows):
spectral_images[i,:,:] = Sxx[:,idx1[i]:idx2[i]]
self.nwindows = nWindows
nWindows, self.d1, self.d2 = np.shape(spectral_images)
#self.new_d1, self.new_d2 = np.exp2(np.floor(np.log2([self.d1, self.d2])))
return spectral_images, nWindows, idx1, idx2
#/ resizes each spectral image to specified dimensions
def _resize_spectral_images(self, spectral_images, new_d1, new_d2):
new_spectral_images = np.zeros([self.nwindows,new_d1,new_d2])
for i in range(self.nwindows):
new_spectral_images[i,:,:] = imresize(spectral_images[i,:,:], (new_d1, new_d2), interp='bilinear', mode='F')
return new_spectral_images
#/ reshapes output from PyWavelets 2d wavelet transform into image
def _unwrap_wavelet_coeffs(self,coeffs):
L = len(coeffs)
cA = coeffs[0]
for i in range(1,L):
(cH, cV, cD) = coeffs[i]
cA = np.concatenate((np.concatenate((cA, cV),axis= 1),np.concatenate((cH, cD),axis = 1)),axis=0)
return cA
#/ computes wavelet transform for each spectral image
def spectral_images_to_wavelet(self, spectral_images, wavelet = wt.Wavelet('db1')):
if (int(self.new_d1)!=self.d1) or (int(self.new_d2)!=self.d2):
spectral_images = self._resize_spectral_images(spectral_images, self.new_d1, self.new_d2)
haar_images = np.zeros([self.nwindows,self.new_d1,self.new_d2])
for i in range(self.nwindows):
coeffs = wt.wavedec2(spectral_images[i,:,:], wavelet)
haar_images[i,:,:] = self._unwrap_wavelet_coeffs(coeffs)
return haar_images
#/ computes (normalized) haar_images from continous timeseries data
def data_to_haar_images(self, x_data):
f, t, Sxx = self.data_to_spectrogram(x_data)
spectral_images, nWindows, idx1, idx2 = self.spectrogram_to_spectral_images(Sxx)
haar_images = self.spectral_images_to_wavelet(spectral_images)
haar_images = normalize(self._images_to_vectors(haar_images), axis=1)
return haar_images, nWindows, idx1, idx2, Sxx, t
#/ converts set of images to array of vectors
def _images_to_vectors(self,images):
N,d1,d2 = np.shape(images)
vectors = np.zeros([N,d1*d2])
for i in range(N):
vectors[i,:] = np.reshape(images[i,:,:], (1,d1*d2))
return vectors
#/ converts set of vectors into set of images (of dimension d1 x d2)
def _vectors_to_images(self, vectors, d1, d2):
N,D = np.shape(vectors)
if D != d1*d2:
print('warning: invalid dimensions')
return vectors
else:
images = np.zeros([N,d1,d2])
for i in range(N):
images[i,:,:] = np.reshape(vectors[i,:], (d1,d2))
return images
def compute_haar_stats(self, haar_images,type = None):
if type is 'MAD':
shape = haar_images.shape
medians = []
for i in range(shape[1]):
medians.append(np.median(haar_images[:, i]))
self.haar_medians = np.array(medians)
mad = []
for i in range(shape[1]):
tmp = abs(haar_images[:, i] - medians[i])
mad.append(np.median(tmp))
self.haar_absdevs = np.array(mad)
return self.haar_medians, self.haar_absdevs
if type is 'Zscore':
self.haar_means = np.mean(haar_images,axis=0)
self.haar_stddevs = np.std(haar_images,axis=0)
return self.haar_means, self.haar_stddevs
def standardize_haar(self, haar_images, type = 'MAD'):
if type is 'Zscore':
haar_images = (haar_images - self.haar_means)/self.haar_stddevs
return haar_images
elif type is 'MAD':
haar_images = (haar_images - self.haar_medians)/self.haar_absdevs
return haar_images
else:
print('Warning: invalid type - select type MAD or Zscore')
return None
def binarize_vectors_topK_sign(self, coeff_vectors, K):
self.K = K
N,M = np.shape(coeff_vectors)
binary_vectors = np.zeros((N,2*M), dtype=bool)
for i in range(N):
idx = np.argsort(abs(coeff_vectors[i,:]))[-K:]
binary_vectors[i,idx] = coeff_vectors[i,idx] > 0
binary_vectors[i,idx+M] = coeff_vectors[i,idx] < 0
return binary_vectors
def vectors_to_topK_sign(self, coeff_vectors, K):
self.K = K
N,M = np.shape(coeff_vectors)
sign_vectors = np.zeros([N,M])
for i in range(N):
idx = np.argsort(abs(coeff_vectors[i,:]))[-K:]
sign_vectors[i,idx] = np.sign(coeff_vectors[i,idx])
return sign_vectors
def sign_to_binary(self, vector):
L = len(vector)
new_vec = np.zeros((L,2), dtype=bool)
new_vec[:,0] = vector > 0
new_vec[:,1] = vector < 0
return np.reshape(new_vec, (1,2*L))
def binarize_vectors_topK(self, coeff_vectors, K):
self.K = K
N,M = np.shape(coeff_vectors)
sign_vectors = np.zeros((N,M),dtype=bool)
for i in range(N):
idx = np.argsort(coeff_vectors[i,:])[-K:]
sign_vectors[i,idx] = 1
return sign_vectors
def jaccard_sim(self, vec1, vec2):
return sum(vec1 & vec2)/ (1.0*sum(vec1 | vec2))
|
import logging
import random
from .Genericgeneration import Generation
class Classify(Generation):
""" Classification class for classifying and categorizing number guess.
Attributes:
n(integer) - number of attempts
"""
def __init__(self, n):
super().__init__(n)
def guess_number(self):
"""Function to read in number entered by user in n attempts.
Compare the user entered number with the number to be guessed.
If any of guessed numbers entered by the user is greater than,
lesser than or equal to the number generated by randint function then
classify accordingly and break from loop using the loop control
statement(break)
Args:
guess(integer): the user entered number
Returns:
string: characteristics of guessed number compared to generated
number by randint function
"""
number = random.randint(1, 20)
attempts = 0
while attempts < self.n:
# Enter a number between 1 to 20
guess = int(input("Enter numbers between 1 and 20: "))
if guess == number:
logging.info('Congratulation YOU WON!!!')
logging.info('You may start over!!!')
break
elif guess > number:
logging.info(f"Your guess is too high, Guess a number lower than: {guess}")
else:
logging.info(f"Your guess is too low, Guess a number higher than: {guess}")
attempts += 1
if attempts == self.n:
logging.info(f"Sorry.. YOU LOSE!!! The number is: {number} ")
logging.info("You may start over!!!! ")
|
curso = 'Programacao em Python Essencial'
def funcao2():
return curso
|
from datetime import datetime
from flask import g, has_request_context, request, abort
from gatekeeping.db import get_db
from gatekeeping.api.audit import create_audit_log
from gatekeeping.api.user import get_users_with_password_reset_tokens
from gatekeeping.keys import mail
from gatekeeping.api.submission import (get_submissions_effective_date_today, get_submission_changes)
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
import smtplib
import schedule
import time
from numpy import nan as NaN
ALLOWED_EXTENSIONS = set(['txt', 'csv', 'tsv'])
LOGIN_REQUIRED_VIEWS = ['home', 'submission', 'position', 'help', 'faq']
ADMIN_REQUIRED_VIEWS = ['admin']
def allowed_file(filename):
"""Checks file type and name"""
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def update_position():
""" update positions for approved submissions """
db = get_db()
submissions = get_submissions_effective_date_today()
if submissions:
for submission in submissions:
submission_changes = get_submission_changes(submission['id'])
for change in submission_changes:
db.execute(
'UPDATE position SET ? = ? WHERE position_id = ?',
(change['field'], change['value'], submission['position_id'])
)
db.commit()
# create audit trail in db
create_audit_log('127.0.0.1', 'Server', '127.0.0.1', action='Successfully updated position {} on submission effective date'.format(submission['position_id']), table='position', function='UPDATE')
else:
# create audit trail in db
create_audit_log('127.0.0.1', 'Server', '127.0.0.1', action='No positions to update today', table='', function='')
def expire_password_reset_tokens():
""" expire all password tokens """
db = get_db()
users = get_users_with_password_reset_tokens()
if users:
for user in users:
if user['password_reset_token']:
db.execute(
'UPDATE user SET password_reset_token = ? WHERE position_id = ?',
('', user['id'])
)
# create audit trail in db
create_audit_log('127.0.0.1', 'Server', '127.0.0.1', action='Successfully expired password token for user {}'.format(user['email']), table='user', function='UPDATE')
db.commit()
else:
# create audit trail in db
create_audit_log('127.0.0.1', 'Server', '127.0.0.1', action='No password tokens to expire today', table='', function='')
def run_scheduler():
while True:
schedule.run_pending()
time.sleep(1)
def send_mail(send_from, send_to, subject, text, files=None,
server="127.0.0.1"):
assert isinstance(send_to, list)
""" Sends an email to stakeholders using microsoft mail """
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = COMMASPACE.join(send_to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(text))
for f in files or []:
with open(f, "rb") as fil:
part = MIMEApplication(
fil.read(),
Name=basename(f)
)
# After the file is closed
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(f)
msg.attach(part)
smtp = smtplib.SMTP(server, 587)
smtp.starttls()
smtp.login(mail[0], mail[1])
smtp.sendmail(send_from, send_to, msg.as_string())
smtp.close()
def is_upload_valid(df):
"""Checks the contents of the upload"""
for index, row in df.iterrows():
if row['Status'] is NaN:
print('Status is null at index', index)
return False
if row['Recruitment Status'] is NaN:
print('Recruitment Status is null at index', index)
return False
if row['Number'] is NaN:
print('Number is null at index', index)
return False
if row['Pillar'] is NaN:
print('Pillar is null at index', index)
return False
if row['Company'] is NaN:
print('Company is null at index', index)
return False
if row['Department'] is NaN:
print('Department Name is null at index', index)
return False
if row['Function'] is NaN:
print('Function is null at index', index)
return False
if row['Title'] is NaN:
print('Title is null at index', index)
return False
if row['FTE'] is NaN:
print('FTE is null at index', index)
return False
return True
|
print('----------------')
print('CAIXA ELETRÔNICO')
print('----------------')
cd = x = 1
sc = 0
print('\n\033[4;33mNotas disponíveis: [R$50,00] [R$20,00] [R$10,00] [R$1,00]\033[m')
v = int(input('\nDigite o valor inteiro que você deseja sacar: R$'))
while True:
cd = v // x % 10
if cd == 0:
break
else:
if x == 1:
print(cd,'nota(s) de R$1,00')
elif x == 10:
if cd % 2 == 0:
print(int(cd/2), 'nota(s) de R$20,00')
elif (cd - 1) % 2 == 0 and cd % 5 != 0:
print('1 nota(s) de R$10,00')
print(int((cd - 1) / 2), 'nota(s) de R$20,00')
elif cd % 5 == 0:
sc = cd/5
else:
print(cd, 'nota(s) de R$10,00')
else:
sc += (cd/5)*(x/10)
x = x * 10
if sc != 0:
print(int(sc), 'nota(s) de R$50,00')
|
import streamlit as st
import os
from werkzeug.utils import secure_filename
import logging
from pathlib import Path
import ocrmypdf
import zipfile
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
ALLOWED_EXTENSIONS = set(['pdf'])
CWD = Path(os.getcwd())
UPLOAD_DIRECTORY = CWD / "results_OCR"
if not os.path.exists(UPLOAD_DIRECTORY):
os.makedirs(UPLOAD_DIRECTORY)
st.title('OCR my PDF')
uploaded_files = st.file_uploader('Upload your file(s)', accept_multiple_files=True)
#if uploaded_files is not None:
list_pdf = []
list_txt = []
for uploaded_file in uploaded_files:
filename = uploaded_file.name
if filename[-3:].lower() in ALLOWED_EXTENSIONS:
filename = secure_filename(filename) #make sure we have a proper filename
logger.info(f'**found {filename}')
full_filename = UPLOAD_DIRECTORY / filename
ocrmypdf.ocr(uploaded_file, full_filename.with_suffix(".pdf"), sidecar=full_filename.with_suffix('.txt'), l="fra")
#uploaded_file.seek(0)
list_pdf.append(full_filename.with_suffix(".pdf"))
list_txt.append(full_filename.with_suffix(".txt"))
if len(list_txt)==1:
pdf_to_download = full_filename.with_suffix(".pdf")
pdf_name = filename
txt_to_download = full_filename.with_suffix(".txt")
txt_name = '{}.txt'.format(filename[:-4])
if len(list_txt) > 1:
pdf_to_download = UPLOAD_DIRECTORY / "folder_pdf.zip"
pdf_name = "folder_pdf.zip"
txt_to_download = UPLOAD_DIRECTORY / "folder_txt.zip"
txt_name = "folder_txt.zip"
with zipfile.ZipFile(UPLOAD_DIRECTORY / "folder_txt.zip", 'w') as zipMe:
for file in list_txt:
zipMe.write(file, os.path.basename(file), compress_type=zipfile.ZIP_DEFLATED)
with zipfile.ZipFile(UPLOAD_DIRECTORY / "folder_pdf.zip", 'w') as zipMe:
for file in list_pdf:
zipMe.write(file, os.path.basename(file), compress_type=zipfile.ZIP_DEFLATED)
if len(list_txt) > 0:
with open(txt_to_download, 'rb') as f:
st.download_button('Download Txt', f, file_name=txt_name) # Defaults to 'application/octet-stream'
with open(pdf_to_download, 'rb') as f:
st.download_button('Download PDF', f, file_name=pdf_name)
files = [os.path.join(UPLOAD_DIRECTORY, x) for x in os.listdir(UPLOAD_DIRECTORY)]
for f in files:
print(f)
os.remove(f)
|
import unittest
from katas.kyu_6.which_are_in import in_array
class InArrayTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(in_array(
['live', 'arp', 'strong'],
['lively', 'alive', 'harp', 'sharp', 'armstrong']),
['arp', 'live', 'strong'])
|
import os
import tempfile
import pyblish.api
from pype.vendor import clique
import pype.api
class ExtractReviewSP(pyblish.api.InstancePlugin):
"""Extracting Review mov file for Ftrack
Compulsory attribute of representation is tags list with "review",
otherwise the representation is ignored.
All new represetnations are created and encoded by ffmpeg following
presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension
filter values use preset's attributes `ext_filter`
"""
label = "Extract Review SP"
order = pyblish.api.ExtractorOrder + 0.02
families = ["review"]
hosts = ["standalonepublisher"]
def process(self, instance):
# adding plugin attributes from presets
presets = instance.context.data["presets"]
try:
publish_presets = presets["plugins"]["standalonepublisher"]["publish"]
plugin_attrs = publish_presets[self.__class__.__name__]
except KeyError:
raise KeyError("Preset for plugin \"{}\" are not set".format(
self.__class__.__name__
))
output_profiles = plugin_attrs.get("outputs", {})
fps = instance.data.get("fps")
start_frame = instance.data.get("frameStart")
self.log.debug("Families In: `{}`".format(instance.data["families"]))
# get specific profile if was defined
specific_profiles = instance.data.get("repreProfiles")
new_repres = []
# filter out mov and img sequences
for repre in instance.data["representations"]:
tags = repre.get("tags", [])
if "review" not in tags:
continue
staging_dir = repre["stagingDir"]
for name in specific_profiles:
profile = output_profiles.get(name)
if not profile:
self.log.warning(
"Profile \"{}\" was not found in presets".format(name)
)
continue
self.log.debug("Processing profile: {}".format(name))
ext = profile.get("ext", None)
if not ext:
ext = "mov"
self.log.debug((
"`ext` attribute not in output profile \"{}\"."
" Setting to default ext: `mov`"
).format(name))
if isinstance(repre["files"], list):
collections, remainder = clique.assemble(repre["files"])
full_input_path = os.path.join(
staging_dir,
collections[0].format("{head}{padding}{tail}")
)
filename = collections[0].format('{head}')
if filename.endswith("."):
filename = filename[:-1]
else:
full_input_path = os.path.join(staging_dir, repre["files"])
filename = repre["files"].split(".")[0]
# prepare output file
repr_file = filename + "_{0}.{1}".format(name, ext)
out_stagigng_dir = tempfile.mkdtemp(prefix="extract_review_")
full_output_path = os.path.join(out_stagigng_dir, repr_file)
self.log.info("input {}".format(full_input_path))
self.log.info("output {}".format(full_output_path))
repre_new = repre.copy()
new_tags = [x for x in tags if x != "delete"]
p_tags = profile.get("tags", [])
self.log.info("p_tags: `{}`".format(p_tags))
for _tag in p_tags:
if _tag not in new_tags:
new_tags.append(_tag)
self.log.info("new_tags: `{}`".format(new_tags))
input_args = []
# overrides output file
input_args.append("-y")
# preset's input data
input_args.extend(profile.get("input", []))
# necessary input data
# adds start arg only if image sequence
if isinstance(repre["files"], list):
input_args.extend([
"-start_number {}".format(start_frame),
"-framerate {}".format(fps)
])
input_args.append("-i {}".format(full_input_path))
output_args = []
# preset's output data
output_args.extend(profile.get("output", []))
if isinstance(repre["files"], list):
# set length of video by len of inserted files
video_len = len(repre["files"])
else:
video_len = repre["frameEnd"] - repre["frameStart"] + 1
output_args.append(
"-frames {}".format(video_len)
)
# letter_box
lb_string = (
"-filter:v "
"drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,"
"drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:"
"round((ih-(iw*(1/{0})))/2):t=fill:c=black"
)
letter_box = profile.get("letter_box", None)
if letter_box:
output_args.append(lb_string.format(letter_box))
# output filename
output_args.append(full_output_path)
ffmpeg_path = os.getenv("FFMPEG_PATH", "")
if ffmpeg_path:
ffmpeg_path += "/ffmpeg"
else:
ffmpeg_path = "ffmpeg"
mov_args = [
ffmpeg_path,
" ".join(input_args),
" ".join(output_args)
]
subprcs_cmd = " ".join(mov_args)
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
"name": name,
"ext": ext,
"files": repr_file,
"stagingDir": out_stagigng_dir,
"tags": new_tags,
"outputName": name,
"startFrameReview": 1,
"endFrameReview": video_len
})
# cleanup thumbnail from new repre
if repre_new.get("thumbnail"):
repre_new.pop("thumbnail")
if "thumbnail" in repre_new["tags"]:
repre_new["tags"].remove("thumbnail")
# adding representation
self.log.debug("Adding: {}".format(repre_new))
# cleanup repre from preview
if "preview" in repre:
repre.pop("preview")
if "preview" in repre["tags"]:
repre["tags"].remove("preview")
new_repres.append(repre_new)
for repre in instance.data["representations"]:
if "delete" in repre.get("tags", []):
instance.data["representations"].remove(repre)
for repre in new_repres:
self.log.debug("Adding repre: \"{}\"".format(
repre
))
instance.data["representations"].append(repre)
|
# Advent of Code 2020
# Day 19
# Author: irobin591
import os
import doctest
import re
with open(os.path.join(os.path.dirname(__file__), "input.txt"), 'r') as input_file:
input_data = input_file.read().strip().split('\n')
# Prep Input
# input_data = list(map(int, input_data.strip().split('\n')))
re_rule_int = re.compile(r'^(\d+): ([\d |]+)$')
re_rule_char = re.compile(r'^(\d+): "([a-z])"$')
re_rule_input = re.compile(r'^([ab]+)$')
def check_rule(string, rule_to_check, ruleset):
# end of string and still a rule to check: no success (and no str to return)
if len(string) == 0:
return
# if rule is a char rule, compare the first char and return the rest as remaining
if type(rule_to_check) == str:
if rule_to_check == string[0]:
yield string[1:]
return
# for each or (x | x) rule:
for rules in rule_to_check:
# start with input string as valid remaining string
remaining_strings = [string]
valid = True
# check all remaining strings for each sequential rule
for rule in rules:
remaining_strings_next = []
for remaining_string in remaining_strings:
results = check_rule(remaining_string, ruleset[rule], ruleset)
tmp = list(results)
remaining_strings_next.extend(tmp)
remaining_strings = remaining_strings_next
# if a remaining string is available: yield all
if len(remaining_strings):
yield from remaining_strings
def part1(input_data):
"""
>>> part1(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n'))
2
"""
rules = {}
count_valid_inputs = 0
for line in input_data:
# Regex for combined rule
r_rule_int = re_rule_int.match(line)
if r_rule_int:
# Add rule int:
rule_id, rule_set = r_rule_int.groups()
rules[int(rule_id)] = [list(map(int, x.split(' '))) for x in rule_set.split(' | ')]
continue
# Regex for final rules (resulting in a character)
r_rule_char = re_rule_char.match(line)
if r_rule_char:
# Add rule int:
rule_id, rule_char = r_rule_char.groups()
rules[int(rule_id)] = str(rule_char)
continue
# Regex for test input (rules have to be progressed first)
r_rule_input = re_rule_input.match(line)
if r_rule_input:
remaining_strings = list(check_rule(r_rule_input.group(1), rules[0], rules))
if "" in remaining_strings:
count_valid_inputs += 1
continue
return count_valid_inputs
def part2(input_data):
"""
>>> part2(open(os.path.join(os.path.dirname(__file__), "test_part2.txt"), 'r').read().strip().split('\\n'))
12
"""
rules = {}
count_valid_inputs = 0
for line in input_data:
# Regex for combined rule
r_rule_int = re_rule_int.match(line)
if r_rule_int:
# Add rule int:
rule_id, rule_set = r_rule_int.groups()
# OVERRIDE RULES 8 AND 11
if rule_id == '8':
rule_set = "42 | 42 8"
if rule_id == '11':
rule_set = "42 31 | 42 11 31"
rules[int(rule_id)] = [list(map(int, x.split(' '))) for x in rule_set.split(' | ')]
continue
# Regex for final rules (resulting in a character)
r_rule_char = re_rule_char.match(line)
if r_rule_char:
# Add rule int:
rule_id, rule_char = r_rule_char.groups()
rules[int(rule_id)] = str(rule_char)
continue
# Regex for test input (rules have to be progressed first)
r_rule_input = re_rule_input.match(line)
if r_rule_input:
# Output all possible remaining strings
remaining_strings = list(check_rule(r_rule_input.group(1), rules[0], rules))
# Check if "" is a remaining string
# (this means that everything has been successfully parsed
# and nothing remained afterwards)
if "" in remaining_strings:
count_valid_inputs += 1
continue
return count_valid_inputs
if __name__ == "__main__":
doctest.testmod()
print("Part One: {}".format(part1(input_data)))
print("Part Two: {}".format(part2(input_data)))
pass
|
'''A Pure Python DB-API 2.0 compliant interface to ThinkSQL.
Copyright © 2000-2012 Greg Gaughan
http://www.thinksql.co.uk
'''
from string import split,replace
import struct
import cStringIO
import warnings
import socket
import datetime
try:
from fixedpoint import FixedPoint
have_fixedpoint = True
except ImportError:
have_fixedpoint = False
apilevel='2.0'
threadsafety=1
paramstyle='qmark'
__copyright__ = "(c)Copyright 2000-2012 Greg Gaughan"
__author__ = "Greg Gaughan"
def connect(dsn=None, user=None, password=None, host=None,
database=None, port=None):
'''Create and open a connection.
connect(dsn=None, user='DEFAULT', password='', host='localhost',
database='', port=9075)
Returns a connection object.
'''
#Defaults
_d = {'host':'localhost','port':9075, 'database':'',
'user':'DEFAULT', 'password':''}
#todo Parse any DSN
#Now use any keywords
if (user is not None): _d['user'] = user
if (password is not None): _d['password'] = password
if (host is not None): _d['host'] = host
try:
params = split(host,':')
_d['host'] = params[0]
_d['port'] = params[1]
except:
pass
if (database is not None): _d['database']=database
if (port is not None):_d['port'] = port
return connection(_d['host'], _d['port'], _d['database'],
_d['user'], _d['password'])
#Connection class -------------------------------------------------------------
class connection:
def __init__(self, host=None, port=None, catalog=None,
user=None, password=None):
self.state=_stateClosed
self.marshalBuffer=marshalBuffer(host, port)
self._Server=''
self._Catalog=catalog
self._Username=user
self._Password=password
self._Autocommit=False #Python has it right, unlike ODBC, JDBC etc!
self.resultErrCode=None
self.resultErrText=''
if self.marshalBuffer.sendHandshake()!=_ok: #send raw handshake
raise HandshakeError, str(_seHandshakeFailed)+' '+_seHandshakeFailedText
#Now the server knows we're using CLI protocol we can use the marshal buffer
self.marshalBuffer.putSQLUSMALLINT(_clientCLIversion) #special version marker for initial protocol handshake
if _clientCLIversion>=93:
self.marshalBuffer.putSQLUSMALLINT(_CLI_PYTHON_DBAPI)
if self.marshalBuffer.send()!=_ok:
raise HandshakeError, str(_seHandshakeFailed)+' '+_seHandshakeFailedText
#Wait for handshake response
if self.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self.marshalBuffer.getFunction()
if functionId!=_SQL_API_handshake:
raise HandshakeError, str(_seHandshakeFailed)+' '+_seHandshakeFailedText
self.serverCLIversion=self.marshalBuffer.getSQLUSMALLINT() #note server's protocol
if self.serverCLIversion>=93:
self.serverTransactionKey=self.marshalBuffer.getSQLPOINTER() #note server's transaction key
#Now SQLconnect
self.marshalBuffer.clearToSend()
self.marshalBuffer.putFunction(_SQL_API_SQLCONNECT)
self.marshalBuffer.putSQLHDBC(0) #(int)(self)
if self._Catalog=='':
self.marshalBuffer.putpUCHAR_SWORD(self._Server)
else:
self.marshalBuffer.putpUCHAR_SWORD(self._Server+'.'+self._Catalog)
self.marshalBuffer.putpUCHAR_SWORD(self._Username)
self.marshalBuffer.putpUCHAR_SWORD(self._Password) #todo encrypt
if self.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for read to return the response
if self.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLCONNECT:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.resultCode=self.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self.marshalBuffer.getSQLINTEGER()
self.resultErrText=self.marshalBuffer.getpUCHAR_SWORD()
raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
#Ok, we're connected
self.state=_stateOpen
def cursor(self):
'''Create a new cursor on the connection.
'''
if self.state!=_stateOpen:
raise InterfaceError, str(_seConnectionNotOpen)+' '+_seConnectionNotOpenText
return cursor(self)
def close(self):
'''Close the connection.
'''
#Negotiate disconnection
if self.state!=_stateOpen:
raise InterfaceError, str(_seConnectionNotOpen)+' '+_seConnectionNotOpenText
self.marshalBuffer.clearToSend()
self.marshalBuffer.putFunction(_SQL_API_SQLDISCONNECT)
self.marshalBuffer.putSQLHDBC(0) #(int)(self)
if self.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for read to return the response
if self.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLDISCONNECT:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.resultCode=self.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self.marshalBuffer.getSQLINTEGER()
self.resultErrText=self.marshalBuffer.getpUCHAR_SWORD()
raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
#Try to disconnect from the server
self.marshalBuffer.close() #close this connection
self.state=_stateClosed
def commit(self):
'''Commit the current transaction.
'''
#Send SQLendTran to server
self.marshalBuffer.clearToSend()
self.marshalBuffer.putFunction(_SQL_API_SQLENDTRAN)
self.marshalBuffer.putSQLHDBC(0) #todo pass TranId?
self.marshalBuffer.putSQLSMALLINT(_SQL_COMMIT)
if self.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for read to return the response
if self.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLENDTRAN:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.resultCode=self.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self.marshalBuffer.getSQLINTEGER()
self.resultErrText=self.marshalBuffer.getpUCHAR_SWORD()
raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
def rollback(self):
'''Rollback the current transaction.
'''
#Send SQLendTran to server
self.marshalBuffer.clearToSend()
self.marshalBuffer.putFunction(_SQL_API_SQLENDTRAN)
self.marshalBuffer.putSQLHDBC(0) #todo pass TranId?
self.marshalBuffer.putSQLSMALLINT(_SQL_ROLLBACK)
if self.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for read to return the response
if self.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLENDTRAN:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.resultCode=self.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self.marshalBuffer.getSQLINTEGER()
self.resultErrText=self.marshalBuffer.getpUCHAR_SWORD()
raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
def __del__(self):
if self.state==_stateOpen:
self.close()
#Cursor class -----------------------------------------------------------------
class cursor(object):
def __init__(self, conn):
'''Open a new cursor (statement) on the specified connection.
'''
self._Con=conn
self.colCount=None
self.col=[]
self.paramCount=None
self.param=[]
self.serverStatementHandle=-1 #=> not connected
self.resultSet=False
self.prepared=False
self._affectedRowCount=None
self.lastSQL=None
self.lastArraySize=1 #server default
self._description=None
self.resultErrCode=None
self.resultErrText=''
#We notify the server of this new command(=stmt)
self._Con.marshalBuffer.clearToSend()
self._Con.marshalBuffer.putFunction(_SQL_API_SQLALLOCHANDLE)
self._Con.marshalBuffer.putSQLSMALLINT(_SQL_HANDLE_STMT)
self._Con.marshalBuffer.putSQLHDBC(0) #todo _Con
if self._Con.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for response
if self._Con.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self._Con.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLALLOCHANDLE:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
serverS=self._Con.marshalBuffer.getSQLHSTMT() #server will return 0 if failed
self.resultCode=self._Con.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self._Con.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self._Con.marshalBuffer.getSQLINTEGER()
self.resultErrText=self._Con.marshalBuffer.getpUCHAR_SWORD()
raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
self.serverStatementHandle=serverS #we will pass this reference to server in future calls
def close(self):
'''Close the cursor.
'''
if self.serverStatementHandle<=0:
pass
else:
self._resultSetClose() #close the server cursor, if any
#Free the server handle
self._Con.marshalBuffer.clearToSend()
self._Con.marshalBuffer.putFunction(_SQL_API_SQLFREEHANDLE)
self._Con.marshalBuffer.putSQLSMALLINT(_SQL_HANDLE_STMT)
self._Con.marshalBuffer.putSQLHSTMT(self.serverStatementHandle) #pass server statement ref
if self._Con.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for response
if self._Con.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self._Con.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLFREEHANDLE:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.resultCode=self._Con.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self._Con.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self._Con.marshalBuffer.getSQLINTEGER()
self.resultErrText=self._Con.marshalBuffer.getpUCHAR_SWORD()
raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
self.serverStatementHandle=-1 #not connected
def _resultSetClose(self):
if self.resultSet:
self._Con.marshalBuffer.clearToSend()
self._Con.marshalBuffer.putFunction(_SQL_API_SQLCLOSECURSOR)
self._Con.marshalBuffer.putSQLHSTMT(self.serverStatementHandle) #pass server statement ref
if self.prepared:
self._Con.marshalBuffer.putSQLSMALLINT(0) #keep server plan
else:
self._Con.marshalBuffer.putSQLSMALLINT(1) #remove server plan
if self._Con.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for response
if self._Con.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self._Con.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLCLOSECURSOR:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.resultCode=self._Con.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self._Con.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self._Con.marshalBuffer.getSQLINTEGER()
self.resultErrText=self._Con.marshalBuffer.getpUCHAR_SWORD()
raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
def _doPrepare(self,sql):
self.lastSQL=sql
#call server prepare
self._Con.marshalBuffer.clearToSend()
self._Con.marshalBuffer.putFunction(_SQL_API_SQLPREPARE)
self._Con.marshalBuffer.putSQLHSTMT(self.serverStatementHandle)
self._Con.marshalBuffer.putpUCHAR_SDWORD(sql)
if self._Con.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for response
if self._Con.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self._Con.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLPREPARE:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.resultCode=self._Con.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self._Con.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self._Con.marshalBuffer.getSQLINTEGER()
self.resultErrText=self._Con.marshalBuffer.getpUCHAR_SWORD()
#raise later...
resSet=self._Con.marshalBuffer.getSQLUSMALLINT()
#Remember this for future state changes
if resSet==_SQL_TRUE:
self.resultSet=True
else:
self.resultSet=False
if self.resultCode==_SQL_ERROR:
raise ProgrammingError, str(self.resultErrCode)+' '+self.resultErrText
if self.resultSet:
#Now get the cursor column count & definitions
self.col=[]
self.colCount=self._Con.marshalBuffer.getSQLINTEGER()
for i in range(self.colCount):
rn=self._Con.marshalBuffer.getSQLSMALLINT()
self.newcol=_columnSQL()
self.newcol.iFldNum=rn
self.newcol.colName=self._Con.marshalBuffer.getpSQLCHAR_SWORD()[:-1] #remove trailing \0
self.newcol.iFldType=self._Con.marshalBuffer.getSQLSMALLINT()
if self._Con.serverCLIversion>=93:
self.newcol.iUnits1=self._Con.marshalBuffer.getSQLINTEGER()
else:
self.newcol.iUnits1=self._Con.marshalBuffer.getSQLSMALLINT()
self.newcol.iUnits2=self._Con.marshalBuffer.getSQLSMALLINT()
self.newcol.iNullOffset=self._Con.marshalBuffer.getSQLSMALLINT()
self.col.append(self.newcol)
#Now set the description
self._description=[]
for newcol in self.col:
if newcol.iFldType==_SQL_NUMERIC or newcol.iFldType==_SQL_DECIMAL:
dsize=newcol.iUnits1+newcol.iUnits2+1
isize=newcol.iUnits1+newcol.iUnits2
elif newcol.iFldType==_SQL_SMALLINT:
dsize=5
isize=2
elif newcol.iFldType==_SQL_INTEGER:
dsize=10
isize=4
elif newcol.iFldType==_SQL_REAL:
dsize=7
isize=4
elif newcol.iFldType==_SQL_FLOAT or newcol.iFldType==_SQL_DOUBLE:
dsize=15
isize=8
elif newcol.iFldType==_SQL_TYPE_DATE:
dsize=10
isize=4
elif newcol.iFldType==_SQL_TYPE_TIME:
dsize=8
if newcol.iUnits2>0:
dsize=9+newcol.iUnits2
isize=8
elif newcol.iFldType==_SQL_TYPE_TIMESTAMP:
dsize=19
if newcol.iUnits2>0:
dsize=20+newcol.iUnits2
isize=12
else:
dsize=newcol.iUnits1
isize=newcol.iUnits1
self._description.append((newcol.colName, newcol.iFldType,
dsize, isize, newcol.iUnits1, newcol.iUnits2, None))
else:
#no result set
self._description=None
self.colCount=0
#Now get the param count & definitions
self.paramCount=self._Con.marshalBuffer.getSQLINTEGER()
for i in range(self.paramCount):
rn=self._Con.marshalBuffer.getSQLSMALLINT()
self.newparam=_paramSQL()
self.newparam.iParamNum=rn
self.newparam.colName=self._Con.marshalBuffer.getpSQLCHAR_SWORD()
self.newparam.iDataType=self._Con.marshalBuffer.getSQLSMALLINT()
self.newparam.iArgType=_ptInput #default
if self._Con.serverCLIversion>=93:
self.newparam.iUnits1=self._Con.marshalBuffer.getSQLINTEGER()
else:
self.newparam.iUnits1=self._Con.marshalBuffer.getSQLSMALLINT()
self.newparam.iUnits2=self._Con.marshalBuffer.getSQLSMALLINT()
x=self._Con.marshalBuffer.getSQLSMALLINT()
self.param.append(self.newparam)
#Now auto-bind all the columns
for i in range(self.colCount):
self._Con.marshalBuffer.clearToSend()
self._Con.marshalBuffer.putFunction(_SQL_API_SQLSETDESCFIELD)
self._Con.marshalBuffer.putSQLHSTMT(self.serverStatementHandle) #pass server statement ref
self._Con.marshalBuffer.putSQLSMALLINT(_SQL_ATTR_APP_ROW_DESC)
self._Con.marshalBuffer.putSQLSMALLINT(i+1) #=colRef(-1) on the server
self._Con.marshalBuffer.putSQLSMALLINT(_SQL_DESC_DATA_POINTER)
self._Con.marshalBuffer.putSQLPOINTER(1) #= 0=unbound, else bound
self._Con.marshalBuffer.putSQLINTEGER(0)
if self._Con.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for response
if self._Con.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self._Con.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLSETDESCFIELD:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.resultCode=self._Con.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self._Con.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self._Con.marshalBuffer.getSQLINTEGER()
self.resultErrText=self._Con.marshalBuffer.getpUCHAR_SWORD()
#raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
def _doExecute(self, parameters):
self._Con.marshalBuffer.clearToSend()
self._Con.marshalBuffer.putFunction(_SQL_API_SQLEXECUTE)
self._Con.marshalBuffer.putSQLHSTMT(self.serverStatementHandle) #pass server statement ref
#Write row count
self._Con.marshalBuffer.putSQLUINTEGER(1)
for row in range(1,2):
#Now send the param count & data for this row
if parameters is None:
self._Con.marshalBuffer.putSQLINTEGER(0)
else:
self._Con.marshalBuffer.putSQLINTEGER(len(parameters))
for i in range(len(parameters)):
self._Con.marshalBuffer.putSQLSMALLINT((i+1))
#Put the data
#Put the null flag
tempNull=_SQL_FALSE #default
if parameters[i] is None:
tempNull=_SQL_TRUE
self._Con.marshalBuffer.putSQLSMALLINT(tempNull)
#Note: we only send length+data if not null
if tempNull==_SQL_FALSE:
if isinstance(parameters[i],BinaryString):
self._Con.marshalBuffer.putpDataSDWORD(str(parameters[i])) #omit \0 if BLOB
else :
self._Con.marshalBuffer.putpDataSDWORD(str(parameters[i])+chr(0))
if self._Con.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for response
if self._Con.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self._Con.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLEXECUTE:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.resultCode=self._Con.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self._Con.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self._Con.marshalBuffer.getSQLINTEGER()
self.resultErrText=self._Con.marshalBuffer.getpUCHAR_SWORD()
raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
#Get the row count - only valid for insert/update/delete
self._affectedRowCount=self._Con.marshalBuffer.getSQLINTEGER()
if self._Con.serverCLIversion>=92:
#Now get any late (post-prepare) resultSet definition, i.e. for stored procedure return cursors
#False here doesn't mean we have no result set, it means we should use the details from SQLprepare
lateResSet=self._Con.marshalBuffer.getSQLUSMALLINT()
#Remember this for future state changes
if lateResSet==_SQL_TRUE:
self.resultSet=True
#else leave resultSet as was
if lateResSet==_SQL_TRUE:
#Now get the cursor column count & definitions
self.col=[]
self.colCount=self._Con.marshalBuffer.getSQLINTEGER()
for i in range(self.colCount):
rn=self._Con.marshalBuffer.getSQLSMALLINT()
self.newcol=_columnSQL()
self.newcol.iFldNum=rn
self.newcol.colName=self._Con.marshalBuffer.getpSQLCHAR_SWORD()[:-1] #remove trailing \0
self.newcol.iFldType=self._Con.marshalBuffer.getSQLSMALLINT()
if self._Con.serverCLIversion>=93:
self.newcol.iUnits1=self._Con.marshalBuffer.getSQLINTEGER()
else:
self.newcol.iUnits1=self._Con.marshalBuffer.getSQLSMALLINT()
self.newcol.iUnits2=self._Con.marshalBuffer.getSQLSMALLINT()
self.newcol.iNullOffset=self._Con.marshalBuffer.getSQLSMALLINT()
self.col.append(self.newcol)
#Now set the description
self._description=[]
for rn,newcol in self.col:
if newcol.iFldType==_SQL_NUMERIC or newcol.iFldType==_SQL_DECIMAL:
dsize=newcol.iUnits1+newcol.iUnits2+1
isize=newcol.iUnits1+newcol.iUnits2
elif newcol.iFldType==_SQL_SMALLINT:
dsize=5
isize=2
elif newcol.iFldType==_SQL_INTEGER:
dsize=10
isize=4
elif newcol.iFldType==_SQL_REAL:
dsize=7
isize=4
elif newcol.iFldType==_SQL_FLOAT or newcol.iFldType==_SQL_DOUBLE:
dsize=15
isize=8
elif newcol.iFldType==_SQL_TYPE_DATE:
dsize=10
isize=4
elif newcol.iFldType==_SQL_TYPE_TIME:
dsize=8
if newcol.iUnits2>0:
dsize=9+newcol.iUnits2
isize=8
elif newcol.iFldType==_SQL_TYPE_TIMESTAMP:
dsize=19
if newcol.iUnits2>0:
dsize=20+newcol.iUnits2
isize=12
else:
dsize=newcol.iUnits1
isize=newcol.iUnits1
self._description.append((newcol.colName, newcol.iFldType,
dsize, isize, newcol.iUnits1, newcol.iUnits2, None))
#else no late result set: leave as was (i.e. don't zeroise self.description)
#else young server cannot handle this
if self.resultCode==_SQL_SUCCESS or self.resultCode==_SQL_SUCCESS_WITH_INFO:
#we SQLendTran now if in autocommit mode & if not select/result-set
if not self.resultSet and self._Con._Autocommit:
self._Con.commit()
elif self.resultCode==_SQL_NEED_DATA:
if self.prepared:
rn=self._Con.marshalBuffer.getSQLSMALLINT() #this is the parameter id that's missing
raise InterfaceError, str(_seMissingParameter)+' '+_seMissingParameterText
def execute(self, operation=None, parameters=None):
'''Execute the specified SQL operation, possibly opening a result set.
'''
self._doPrepare(operation)
self._doExecute(parameters)
return None
def executemany(self, operation=None, seq_of_parameters=None):
'''Repeatedly execute the specified SQL operation, once for each set of
parameters in the sequence.
This is more efficient than repeatedly calling the execute method.
'''
self._doPrepare(operation)
self.prepared=True
for parameters in seq_of_parameters:
if self.resultSet: #close any existing query result set on this cursor before we re-execute
self._resultSetClose()
last=self._doExecute(parameters)
return last
def callproc(self, procname, parameters=None):
'''Call the specified stored procedure with the parameters.
May return a result set accessible using the fetchXXX methods.
'''
return self.execute('CALL '+procname+replace(replace(str(parameters),'[','('),']',')'))
def _setArraySize(self, arraySize=1):
'''Tell the server how many rows to return per fetch.
'''
self._Con.marshalBuffer.clearToSend()
self._Con.marshalBuffer.putFunction(_SQL_API_SQLSETDESCFIELD)
self._Con.marshalBuffer.putSQLHSTMT(self.serverStatementHandle)
self._Con.marshalBuffer.putSQLSMALLINT(_SQL_ATTR_APP_ROW_DESC)
self._Con.marshalBuffer.putSQLSMALLINT(0)
self._Con.marshalBuffer.putSQLSMALLINT(_SQL_DESC_ARRAY_SIZE)
self._Con.marshalBuffer.putSQLUINTEGER(arraySize)
self._Con.marshalBuffer.putSQLINTEGER(0) #bufferLength = n/a here
if self._Con.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for response
if self._Con.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self._Con.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLSETDESCFIELD:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.resultCode=self._Con.marshalBuffer.getRETCODE()
#if error, then get error details: local-number, default-text
self.errCount=self._Con.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self._Con.marshalBuffer.getSQLINTEGER()
self.resultErrText=self._Con.marshalBuffer.getpUCHAR_SWORD()
raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
#record the latest value
self.lastArraySize=arraySize
def _fetch(self, arraySize=None):
if self.resultSet:
res=[]
if arraySize!=None and arraySize!=self.lastArraySize:
self._setArraySize(arraySize)
#call server fetchScroll
self._Con.marshalBuffer.clearToSend()
self._Con.marshalBuffer.putFunction(_SQL_API_SQLFETCHSCROLL)
self._Con.marshalBuffer.putSQLHSTMT(self.serverStatementHandle)
self._Con.marshalBuffer.putSQLSMALLINT(_SQL_FETCH_NEXT)
self._Con.marshalBuffer.putSQLINTEGER(0)
if self._Con.marshalBuffer.send()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#Wait for response
if self._Con.marshalBuffer.read()!=_ok:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
functionId=self._Con.marshalBuffer.getFunction()
if functionId!=_SQL_API_SQLFETCHSCROLL:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
#resultCode comes later: first we retrieve any result data
#Read row count
rowCount=self._Con.marshalBuffer.getSQLUINTEGER()
for row in range(rowCount):
rowres=[]
#Now get the col count & data for this row
self.colCount=self._Con.marshalBuffer.getSQLINTEGER()
for i in range(self.colCount):
rn=self._Con.marshalBuffer.getSQLSMALLINT()
if rn<=self.colCount:
#Get the null flag
tempNull=self._Con.marshalBuffer.getSQLSMALLINT()
if tempNull==_SQL_TRUE:
rowres.append(None)
else:
#Note: we only get length+data if not null
self.col[i].data=self._Con.marshalBuffer.getpDataSDWORD()
#Convert the raw data to Python data
if self.col[i].iFldType==_SQL_CHAR or self.col[i].iFldType==_SQL_VARCHAR:
rowres.append(self.col[i].data)
elif self.col[i].iFldType==_SQL_NUMERIC or self.col[i].iFldType==_SQL_DECIMAL:
if have_fixedpoint:
fp=str(struct.unpack('<q',self.col[i].data)[0])
fp=fp[:len(fp)-self.col[i].iUnits2]+'.'+fp[len(fp)-self.col[i].iUnits2:]
rowres.append(FixedPoint(fp,self.col[i].iUnits2))
else:
rowres.append((struct.unpack('<q',self.col[i].data)[0]) / float(10**self.col[i].iUnits2)) #i.e. shift scale decimal places to the right
elif self.col[i].iFldType==_SQL_INTEGER:
rowres.append(struct.unpack('<i',self.col[i].data)[0])
elif self.col[i].iFldType==_SQL_SMALLINT:
rowres.append(struct.unpack('<h',self.col[i].data)[0])
elif self.col[i].iFldType==_SQL_FLOAT or self.col[i].iFldType==_SQL_REAL or self.col[i].iFldType==_SQL_DOUBLE:
rowres.append(struct.unpack('<d',self.col[i].data)[0])
elif self.col[i].iFldType==_SQL_TYPE_DATE:
p=struct.unpack(_sqlDate,self.col[i].data)
rowres.append(datetime.date(p[0],p[1],p[2]))
elif self.col[i].iFldType==_SQL_TYPE_TIME:
p=struct.unpack(_sqlTime,self.col[i].data)
rowres.append(datetime.time(p[0],p[1],int(p[2] / float(10**_TIME_MAX_SCALE)))) #todo Adjust the scale? p[3]
elif self.col[i].iFldType==_SQL_TYPE_TIMESTAMP:
p=struct.unpack(_sqlTimestamp,self.col[i].data)
rowres.append(datetime.datetime(p[0],p[1],p[2],p[3],p[4],int(p[5] / float(10**_TIME_MAX_SCALE)))) #todo Adjust the scale? p[6]
elif self.col[i].iFldType==_SQL_LONGVARCHAR or self.col[i].iFldType==_SQL_LONGVARBINARY:
rowres.append(self.col[i].data)
#todo SQL_INTERVAL etc.
else:
rowres.append('?') #todo use raw data or None instead?
else:
raise OperationalError, str(_seInvalidColumnIndex)+' '+_seInvalidColumnIndexText
#get row status
sqlRowStatus=self._Con.marshalBuffer.getSQLUSMALLINT()
if sqlRowStatus==_SQL_ROW_NOROW:
break #-> no more data
if arraySize>1:
res.append(tuple(rowres))
else:
res=tuple(rowres)
self.resultCode=self._Con.marshalBuffer.getRETCODE()
if self.resultCode==_SQL_NO_DATA and arraySize==1:
res=None #-> no more data
#if error, then get error details: local-number, default-text
self.errCount=self._Con.marshalBuffer.getSQLINTEGER() #error count
if self.resultCode==_SQL_ERROR:
for err in range(self.errCount):
self.resultErrCode=self._Con.marshalBuffer.getSQLINTEGER()
self.resultErrText=self._Con.marshalBuffer.getpUCHAR_SWORD()
raise DatabaseError, str(self.resultErrCode)+' '+self.resultErrText
return res
else:
raise InterfaceError, str(_seResultSetNotOpen)+' '+_seResultSetNotOpenText
def fetchone(self):
'''Fetch the next row from the cursor's result set.
'''
return self._fetch(1)
def fetchmany(self, size=None):
'''Fetch a number of rows from the cursor's result set.
'''
if size is None:
size=self.lastArraySize
return self._fetch(size)
def fetchall(self):
'''Fetch all the remaining rows from the cursor's result set.
'''
res=[]
while 1:
r=self.fetchone()
if r is None:
break
res.append(r)
return res
def _getaffectedRowCount(self):
return self._affectedRowCount
rowcount=property(_getaffectedRowCount, doc='Number of affected row(s)')
def _getdescription(self):
return self._description
def _getconnection(self):
warnings.warn('DB-API extension cursor.connection used')
return self._Con
description=property(_getdescription, doc='Column description(s)')
def _getarraysize(self):
return self.lastArraySize
arraysize=property(_getarraysize, _setArraySize, doc='Number of rows to fetch with fetchmany()')
def __iter__(self):
return self
def next(self):
x=self.fetchone()
if x is None:
raise StopIteration
return x
def __del__(self):
self.close()
#Column/Parameter classes -----------------------------------------------------
class _columnSQL:
def __init__(self):
self.iFldNum=None
self.iFldType=None
self.iUnits1=None
self.iUnits2=None
self.iNullOffset=None
self.colName=None
self.data=None
class _paramSQL:
def __init__(self):
self.iParamNum=None
self.colName=None
self.iDataType=None
self.iArgType=None
self.iUnits1=None
self.iUnits2=None
self.buffer=None
self.bufferLen=None
self.isNull=None
_clientCLIversion =100 #client parameter passing version
_CLI_ODBC=1
_CLI_JDBC=2
_CLI_DBEXPRESS=3
_CLI_ADO_NET=4
_CLI_PYTHON_DBAPI=5
_DriverName='ThinkSQL'
_DriverVersion='1.03.01'
__version__ = 1, 03, 01
_DriverMajorVersion=1
_DriverMinorVersion=03
_ok=0
_fail=-1
_failString=''
_stateClosed=0
_stateOpen=1
_sizeof_short=2
_sizeof_int=4
_sizeof_long=8
_sizeof_float=4
_sizeof_double=8
_sizeof_byte=8 #in bits
_sizeof_date=4
_sizeof_dateY=2
_sizeof_dateM=1
_sizeof_dateD=1
_sizeof_time=7
_sizeof_timeH=1
_sizeof_timeM=1
_sizeof_timeS=4
_sizeof_timeSc=1
_TIME_MAX_SCALE=6
_MAX_COL_PER_TABLE=300
_MAX_PARAM_PER_QUERY=300
_SQL_FALSE =0
_SQL_TRUE =1
#parameter types
_ptInput = 0
_ptOutput = 1
_EscapeChar='\\'
_SQL_ERROR=-1
_SQL_ERROR2=_SQL_ERROR
_SQL_SUCCESS=0
_SQL_SUCCESS_WITH_INFO=1
_SQL_STILL_EXECUTING=2
_SQL_NEED_DATA=99
_SQL_NO_DATA=100
_SQL_CHAR =1
_SQL_NUMERIC =2
_SQL_DECIMAL =3
_SQL_INTEGER =4
_SQL_SMALLINT =5
_SQL_FLOAT =6
_SQL_REAL =7
_SQL_DOUBLE =8
_SQL_DATETIME =9
_SQL_INTERVAL =10
_SQL_VARCHAR =12
_SQL_TYPE_DATE =91
_SQL_TYPE_TIME =92
_SQL_TYPE_TIMESTAMP =93
_SQL_LONGVARCHAR =-1
#SQL_BINARY =-2
#SQL_VARBINARY =-3
_SQL_LONGVARBINARY =-4
#future use: SQL_BIGINT =-5
_SQL_API_SQLCONNECT =7
_SQL_API_SQLDISCONNECT =9
_SQL_API_SQLEXECUTE =12
_SQL_API_SQLPREPARE =19
_SQL_API_SQLGETDATA =43
_SQL_API_SQLGETINFO =45
_SQL_API_SQLALLOCHANDLE =1001
_SQL_API_SQLCLOSECURSOR =1003
_SQL_API_SQLENDTRAN =1005
_SQL_API_SQLFREEHANDLE =1006
_SQL_API_SQLSETDESCFIELD =1017
_SQL_API_SQLFETCHSCROLL =1021
_SQL_ATTR_APP_ROW_DESC =10010
_SQL_DESC_ARRAY_SIZE =20
_SQL_DESC_DATA_POINTER =1010
_SQL_ROW_SUCCESS =0
_SQL_ROW_NOROW =3
_SQL_API_handshake =9999
_SQL_HANDLE_STMT =3
_SQL_ROLLBACK =1
_SQL_COMMIT =0
_SQL_FETCH_NEXT =1
_SQL_DBMS_NAME =17
_SQL_DBMS_VERSION =18
#Errors:
_seNotImplementedYet=500
_seNotImplementedYetText='Not implemented yet'
_seHandshakeFailed=1500
_seHandshakeFailedText='Handshake failed'
_seConnectionFailed=1502
_seConnectionFailedText='Communication link failure'
_seInvalidColumnIndex=1600
_seInvalidColumnIndexText='Invalid column index'
_seInvalidConversion=1602
_seInvalidConversionText='Invalid data conversion'
_seInvalidParameterIndex=1604
_seInvalidParameterIndexText='Invalid parameter index'
_seConnectionNotOpen=1700
_seConnectionNotOpenText='Connection not open'
_seResultSetNotOpen=1702
_seResultSetNotOpenText='No result set'
_seMissingParameter=1704
_seMissingParameterText='Not enough parameters passed'
_ss08001='08001'
_ss08S01='08S01'
_ss42000='42000'
_ssHY000='HY000'
_ssHY010='HT010'
_ssHYC00='HYC00' #optional feature not implemented yet
_ssNA='NA'
_sqlDate='<hbb' #year:smallint; month:shortint; day:shortint
_sqlTimezone='<bbbx' #sign:shortint (-1=negative, +1=positive, 0=no timezone); hour:shortint; minute:shortint
_sqlTime='<bbxxibxxx' # hour:shortint; minute:shortint; second:integer; (stored normalised as SSFFFFFF where number of Fs=TIME_MAX_SCALE) scale:shortint (used when formatting to dictate how many fractional places to display)
_sqlTimestamp='<hbb bbxxibxxx'
_TIME_MAX_SCALE=6
#Python specifics
#Exception classes ------------------------------------------------------------
class Error(StandardError):
'''Top-level DB API exception.'''
class Warning(StandardError):
'''Top-level DB API warning.'''
class InterfaceError(Error):
'''Interface error.'''
class DatabaseError(Error):
'''Database error.'''
class DataError(DatabaseError):
'''Data error.'''
class OperationalError(DatabaseError):
'''Operational error.'''
class IntegrityError(DatabaseError):
'''Integrity error.'''
class InternalError(DatabaseError):
'''Internal error.'''
class ProgrammingError(DatabaseError):
'''Programming error.'''
class NotSupportedError(DatabaseError):
'''Not supported error.'''
#ThinkSQL specific errors
class HandshakeError(OperationalError):
'''Handshake error.'''
class ConnectionError(OperationalError):
'''Connection error.'''
class DBAPITypeObject:
def __init__(self, name, *values):
self.name = name
self.values = values
def __repr__(self):
return self.name
def __cmp__(self, other):
if other in self.values:
return 0
elif other < self.values:
return 1
else:
return -1
#Type mappings
BINARY = DBAPITypeObject('BINARY', _SQL_LONGVARBINARY)
DATETIME = DBAPITypeObject('DATETIME', _SQL_DATETIME, _SQL_INTERVAL,
_SQL_TYPE_DATE, _SQL_TYPE_TIME, _SQL_TYPE_TIMESTAMP
)
NUMBER = DBAPITypeObject('NUMBER', _SQL_NUMERIC, _SQL_DECIMAL, _SQL_INTEGER, _SQL_SMALLINT,
_SQL_FLOAT, _SQL_REAL, _SQL_DOUBLE)
STRING = DBAPITypeObject('STRING', _SQL_CHAR, _SQL_VARCHAR, _SQL_LONGVARCHAR)
from time import localtime
def Date(year, month, day):
return '%04d/%02d/%02d' % (year, month, day)
def Time(hour, minute, second):
return '%02d:%02d:%02d' % (hour, minute, second)
def Timestamp(year, month, day, hour, minute, second):
return Date(year, month, day)+' '+Time(hour, minute, second)
def DateFromTicks(ticks):
t=localtime(ticks)
return Date(t[0],t[1],t[2])
def TimeFromTicks(ticks):
t=localtime(ticks)
return Time(t[3],t[4],t[5])
def TimestampFromTicks(ticks):
t=localtime(ticks)
return Timestamp(t[0],t[1],t[2],t[3],t[4],t[5])
class BinaryString:
def __init__(self,s):
self.value=s
def __str__(self):
return self.value
def Binary(string):
return BinaryString(string) #todo encode/make binary
#MarshalBuffer class ----------------------------------------------------------
class marshalBuffer:
marshalBufSize=16384
connectionTimeout=30
def __init__(self, host, port):
socket.setdefaulttimeout(self.__class__.connectionTimeout)
self.clientSocket=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.buffer=cStringIO.StringIO()
try:
self.clientSocket.connect((host, port))
except:
raise ConnectionError, str(_seConnectionFailed)+' '+_seConnectionFailedText
self.clearToSend() #initially clear buffer
self.bufferLen=0
self.bufferPtr=0
def __del__(self):
self.close()
def clearToSend(self):
#Clear before send
self.bufferLen=0
self.bufferPtr=0
self.buffer.truncate(0)
self.buffer.seek(0)
return _ok
def clearToReceive(self):
#Clear before receive
self.bufferPtr=0
self.bufferLen=0
self.buffer.truncate(0)
self.buffer.seek(0)
return _ok
def sendHandshake(self):
#Send raw handshake
try:
s=struct.pack('<h',_SQL_API_handshake)
self.clientSocket.send(s)
return _ok
except:
return _fail
def send(self):
'''Send a response and then clear the buffer.
'''
try:
i=(self.bufferLen+_sizeof_int)
#todo assert i=buffer size
s=struct.pack('<i', i)
self.clientSocket.send(s)
self.buffer.flush()
sent=0
while sent<(i-4):
sent=sent+self.clientSocket.send(self.buffer.getvalue()[sent:])
except:
return _fail
try:
self.clearToSend() #clear send buffer, once sent ok
return _ok
except:
return _fail
def read(self):
'''Wait for a response (clear buffer before receiving).
'''
self.clearToReceive()
#try:
s=self.clientSocket.recv(_sizeof_int)
i=struct.unpack('<i', s)
dataCount=i[0]
dataCount=(dataCount-_sizeof_int) #inclusive
if self.bufferLen+dataCount>self.__class__.marshalBufSize:
return _fail #overflow
#Read the block data into the marshal buffer
while self.buffer.tell()<dataCount:
self.buffer.write(self.clientSocket.recv(dataCount-self.buffer.tell()))
self.bufferLen=(self.bufferLen+dataCount)
self.buffer.seek(0) #reset the get pointer
return _ok
#except:
# return _fail
def putSQLUSMALLINT(self, usi):
if self.bufferLen+_sizeof_short>self.__class__.marshalBufSize:
if self.send()!=_ok:
return _fail #buffer overflow
if (self.bufferLen+_sizeof_short>self.__class__.marshalBufSize):
return _fail
s=struct.pack('<H', usi)
self.buffer.write(s)
self.bufferLen+=_sizeof_short
return _ok
def getSQLUSMALLINT(self):
if self.bufferPtr+_sizeof_short>self.bufferLen:
if self.bufferPtr==self.bufferLen:
self.read()
else:
return _fail
s=self.buffer.read(_sizeof_short)
self.bufferPtr+=_sizeof_short
return struct.unpack('<H', s)[0]
def getSQLSMALLINT(self):
if self.bufferPtr+_sizeof_short>self.bufferLen:
if self.bufferPtr==self.bufferLen:
self.read()
else:
return _fail
s=self.buffer.read(_sizeof_short)
self.bufferPtr+=_sizeof_short
return struct.unpack('<h', s)[0]
def putSQLINTEGER(self, i):
if self.bufferLen+_sizeof_int>self.__class__.marshalBufSize:
if self.send()!=_ok:
return _fail #buffer overflow
if (self.bufferLen+_sizeof_int>self.__class__.marshalBufSize):
return _fail
s=struct.pack('<i', i)
self.buffer.write(s)
self.bufferLen+=_sizeof_int
return _ok
def getSQLINTEGER(self):
if self.bufferPtr+_sizeof_int>self.bufferLen:
if self.bufferPtr==self.bufferLen:
self.read()
else:
return _fail
s=self.buffer.read(_sizeof_int)
self.bufferPtr+=_sizeof_int
return struct.unpack('<i', s)[0]
def putpUCHAR_SWORD(self,ss):
if self.bufferLen+_sizeof_short+len(ss)>self.__class__.marshalBufSize:
if self.send()!=_ok:
return _fail #buffer overflow
if (self.bufferLen+_sizeof_short+len(ss)>self.__class__.marshalBufSize):
return _fail
s=struct.pack('<h', len(ss))
self.buffer.write(s)
self.bufferLen+=_sizeof_short
self.buffer.write(ss)
self.bufferLen+=len(ss)
return _ok
def getpUCHAR_SWORD(self):
if self.bufferPtr+_sizeof_short>self.bufferLen:
if self.bufferPtr==self.bufferLen:
self.read()
else:
return _fail
s=self.buffer.read(_sizeof_short)
self.bufferPtr+=_sizeof_short
si=struct.unpack('<H', s)[0]
self.bufferPtr+=si
return self.buffer.read(si)
def putpUCHAR_SDWORD(self, ss):
if self.bufferLen+_sizeof_int+len(ss)>self.__class__.marshalBufSize:
if self.send()!=_ok:
return _fail #buffer overflow
if (self.bufferLen+_sizeof_int+len(ss)>self.__class__.marshalBufSize):
return _fail
s=struct.pack('<i', len(ss))
self.buffer.write(s)
self.bufferLen+=_sizeof_int
self.buffer.write(ss)
self.bufferLen+=len(ss)
return _ok
def putpDataSDWORD(self, ss):
if self.bufferLen>0 and self.bufferLen+_sizeof_int+len(ss)>self.__class__.marshalBufSize:
if self.send()!=_ok:
return _fail #buffer overflow
ui=len(ss)
s=struct.pack('<i', ui)
self.buffer.write(s)
self.bufferLen+=_sizeof_int
if self.bufferLen+_sizeof_int+ui>self.__class__.marshalBufSize: #can only happen when sending a large object - we send in multiple segments
offset=0
while offset<ui:
nextSegment=self.__class__.marshalBufSize-self.bufferLen-_sizeof_int #max. size of next segment that can fit in remaining buffer
if nextSegment>(ui-offset):
nextSegment=ui-offset #final segment
self.buffer.write(ss[offset:(offset+nextSegment)])
self.bufferLen+=nextSegment
#todo could/should avoid final Send... i.e. if offset+nextSegment>=sdw
if self.send()!=_ok:
return _fail #buffer overflow
offset=offset+nextSegment
#todo assert offset=sdw
return _ok
else: #fits in a single buffer
self.buffer.write(ss)
self.bufferLen+=ui
return _ok
def getpDataSDWORD(self):
if self.bufferPtr+_sizeof_int>self.bufferLen:
if self.bufferPtr==self.bufferLen:
self.read()
else:
return _fail
s=self.buffer.read(_sizeof_int)
self.bufferPtr+=_sizeof_int
si=struct.unpack('<i', s)[0] #todo <I?
if self.bufferPtr+si>self.bufferLen:
if self.bufferPtr==self.bufferLen:
self.read()
#can only happen when reading a large object - we read in multiple segments (or else we must be asking for the wrong data type)
res=cStringIO.StringIO()
offset=0
while offset<si:
nextSegment=self.bufferLen-self.bufferPtr #size of next segment in remaining buffer
res.write(self.buffer.read(nextSegment))
self.bufferPtr=self.bufferPtr+nextSegment
#todo could/should avoid final Read... i.e. if offset+nextSegment>=sdw
self.read() #todo check result! abort, else we've got the next buffer full
offset=offset+nextSegment
#todo assert offset=sdw
res.seek(0)
return res.read(si)
else: #fits in a single buffer
self.bufferPtr=self.bufferPtr+si
return self.buffer.read(si)
def putSQLSMALLINT(self, si):
return self.putSQLUSMALLINT(si)
def getSQLPOINTER(self):
return self.getSQLINTEGER()
def putSQLPOINTER(self, si):
return self.putSQLINTEGER(si)
def putSQLUINTEGER(self, si):
return self.putSQLINTEGER(si)
def getSQLUINTEGER(self):
return self.getSQLINTEGER()
def getpSQLCHAR_SWORD(self):
return self.getpUCHAR_SWORD()
def putFunction(self, functionId):
return self.putSQLUSMALLINT(functionId)
def getFunction(self):
return self.getSQLUSMALLINT()
def putSQLHDBC(self, connectionHandle):
return self.putSQLINTEGER(connectionHandle)
def putSQLHSTMT(self, stmtHandle):
return self.putSQLINTEGER(stmtHandle)
def getSQLHSTMT(self):
return self.getSQLINTEGER()
def getRETCODE(self):
return self.getSQLSMALLINT()
def close(self):
#if not self.buffer.closed:
# self.buffer.close()
self.clientSocket.close()
|
#!/usr/bin/env python3
# MEMO
#date -r 1584214882
#date -j -f "%Y %j %H %M %S" "2020 12 15 00 00 00" "+%s"
import copy
import datetime
from decimal import Decimal
import json
import os
import subprocess
import sys
import time
import traceback
os.system('clear')
first_time = True
#
# order_type: buy/sell
#
# key: open order key
# val: open order value
def delete_order(key, val = None):
try:
cmd = subprocess.Popen(["clikraken", "--raw", "x", key], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
cmd.wait()
out, err = cmd.communicate()
out_json = json.loads(out)
print(out_json['result'])
if (val != None):
print("DELETED ORDER: %s %s %s %s" % (key, val['descr']['type'], val['descr']['price'], Decimal(val['vol']) - Decimal(val['vol_exec'])))
else:
print("DELETED ORDER: %s %s %s %s" % (key, "Unknown", "Unknown", "Unknown"))
return None
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
def delete_orders(order_type, price = None):
try:
cmd = subprocess.Popen(["clikraken", "--raw", "ol"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
cmd.wait()
out, err = cmd.communicate()
out_json = json.loads(out)
ol_k = list(out_json['result']['open'].keys())
ol_v = list(out_json['result']['open'].values())
j = 0
print("DELETE ORDERS:")
for i in ol_v:
if (price == None):
if (i['descr']['type'] == order_type):
delete_order(ol_k[j], i)
else:
if (Decimal(i['descr']['price']) == Decimal(price)):
delete_order(ol_k[j], i)
j += 1
return None
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
#
# order_type: buy/sell
# start_price:
# end_price:
# step_price:
#
def add_orders(order_type, start_price, step_price, order_count, vol, lev, dry_run = False):
try:
print("PLACE ORDER:")
price = Decimal(start_price)
for i in range(1, order_count + 1):
args = ["clikraken", "--raw", "p", "-t", "limit", order_type, str(vol), str(price)]
if (lev != None and lev != "1:1"):
args.append("-l")
args.append(lev)
if (dry_run == True):
args.append("-v")
cmd = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
cmd.wait()
out, err = cmd.communicate()
out_json = json.loads(out)
try:
print(out_json['result'])
except:
print(out_json)
price += Decimal(step_price)
return None
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
def show_balance():
try:
cmd = subprocess.Popen(["clikraken", "--raw", "bal"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
cmd.wait()
out, err = cmd.communicate()
out_json = json.loads(out)
#print(out_json)
bal_xbt = Decimal(out_json['result']['XXBT'])
bal_usd = Decimal(out_json['result']['ZUSD'])
print("\033[36m{:<20s}{:>15s}\033[00m".format("ACCOUNT BALANCE:", "VOL"))
print("\033[96m{:<20s}{:>15.8f}\033[00m".format("BTC", bal_xbt))
print("\033[96m{:<20s}{:>15.8f}\033[00m".format("USD", bal_usd))
#print()
return None
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
def show_trade_balance(tot_fee = 0):
try:
tot_fee = Decimal(tot_fee)
cmd = subprocess.Popen(["clikraken", "--raw", "tbal"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
cmd.wait()
out, err = cmd.communicate()
out_json = json.loads(out)
#print(out_json)
trade_balance = Decimal(out_json['result']['tb'])
margin_used = Decimal(out_json['result']['m'])
pos_cost = Decimal(out_json['result']['c'])
pos_pnl = Decimal(out_json['result']['n'])
try:
margin_level = out_json['result']['ml']
except:
margin_level = "N/A"
print("\033[36mOPEN TRADE BALANCE:\033[00m")
print("\033[36m{:<20s}{:>15s}{:>15s}{:>15s}{:>15s}{:>15s}{:>15s}\033[00m".format("TOTAL ASSET (USD)", "", "", "TOTAL COST", "TOTAL MARGIN", "MARGIN LEVEL", "PNL W FEE"))
print("\033[96m{:<20.8f}{:>15s}{:>15s}{:>15.8f}{:>15.8f}{:>15s}{:>15.2f}\033[00m".format(trade_balance, "", "", pos_cost, margin_used, margin_level, pos_pnl - tot_fee))
#print()
return None
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
def get_ticker():
try:
cmd = subprocess.Popen(["clikraken", "--raw", "t"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
cmd.wait()
out, err = cmd.communicate()
out_json = json.loads(out)
#print(out_json)
ticker = dict()
ticker["price"] = out_json['result']['XXBTZUSD']['c'][0]
ticker["vol"] = out_json['result']['XXBTZUSD']['c'][1]
ticker["ave"] = out_json['result']['XXBTZUSD']['p'][1]
ticker["ask"] = out_json['result']['XXBTZUSD']['a'][0]
ticker["bid"] = out_json['result']['XXBTZUSD']['b'][0]
ticker["high"] = out_json['result']['XXBTZUSD']['h'][1]
ticker["low"] = out_json['result']['XXBTZUSD']['l'][1]
#print(ticker)
return ticker
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
def show_ticker(ticker):
try:
print("\033[96m{:<20s}{:>15s}{:>15s}{:>15s}{:>15s}{:>15s}{:>15s}\033[00m".format("TICKER:", "PRICE", "ASK", "BID", "WEIGHTED AVE", "HIGH", "LOW"))
print("\033[96m{:<20s}{:>15s}{:>15s}{:>15s}{:>15s}{:>15s}{:>15s}\033[00m".format("", ticker['price'], ticker['ask'], ticker['bid'], ticker['ave'], ticker['high'], ticker['low']))
return None
except:
print("\033[91m\033[91mUnexpected Error!!\033[00m\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
def show_ticker_and_depth():
# depth
try:
cmd = subprocess.Popen(["clikraken", "--raw", "d", "-c", "100"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
cmd.wait()
out, err = cmd.communicate()
out_json = json.loads(out)
#print(out_json)
asks = out_json['result']['XXBTZUSD']['asks']
asks_ave = Decimal(0)
asks_vol = Decimal(0)
for i in asks:
asks_ave += Decimal(i[0]) * Decimal(i[1])
asks_vol += Decimal(i[1])
asks_ave /= asks_vol
bids = out_json['result']['XXBTZUSD']['bids']
bids_ave = Decimal(0)
bids_vol = Decimal(0)
for i in bids:
bids_ave += Decimal(i[0]) * Decimal(i[1])
bids_vol += Decimal(i[1])
bids_ave /= bids_vol
return None
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
# ticker
try:
cmd = subprocess.Popen(["clikraken", "--raw", "t"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
cmd.wait()
out, err = cmd.communicate()
out_json = json.loads(out)
#print(out_json)
ticker_p = Decimal(out_json['result']['XXBTZUSD']['c'][0])
ticker_v = Decimal(out_json['result']['XXBTZUSD']['c'][1])
print("TICKER AND DEPTH")
print("{:<20s}{:>15.0f}{:>15.0f}".format("ASKS/WALL:", asks_ave, asks_vol))
print("{:<20s}{:>15.0f}{:>10.0f}{:>10.0f}".format("TICKER/SPREADS:", ticker_p, asks_ave-ticker_p, ticker_p-bids_ave))
print("{:<20s}{:>15.0f}{:>15.0f}".format("BIDS/WALL:", bids_ave, bids_vol))
#print()
return None
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
#
# open positions
#
def same_pos(pos_v, pos2_v):
try:
if (pos_v == None and pos2_v != None):
return False
if (pos_v != None and pos2_v == None):
return False
if (pos_v == None and pos2_v == None):
return True
pos_vol, pos_type = get_pos_vol(pos_v)
pos2_vol, pos2_type = get_pos_vol(pos2_v)
if (pos_type == pos2_type and pos_vol == pos2_vol):
return True
else:
print("Warn!! 2 pos NOT the same: " + str(pos_type) + ":" + str(pos_vol) + " v.s. " + str(pos2_type) + ":" + str(pos2_vol))
return False
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
def get_pos():
try:
cmd = subprocess.Popen(["clikraken", "--raw", "pos"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
cmd.wait()
out, err = cmd.communicate()
out_json = json.loads(out)
#print(out_json)
#pos_k = list(out_json['result'].keys())
pos_v = list(out_json['result'].values())
return pos_v
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
def get_pos_vol(pos_v):
try:
pos_vol = Decimal(0)
pos_type = None
for i in pos_v:
if (Decimal(i['margin']) > 0):
pos_vol += Decimal(i['vol']) - Decimal(i['vol_closed'])
if (pos_type == None):
pos_type = i['type']
elif (pos_type != i['type']):
print("ERROR!! Position type inconsistency detected!! Abort!!")
sys.exit()
else:
print("\033[91mINFO!! Non-margin position detected. Continue.\033[00m")
print(i)
return pos_vol, pos_type
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError, TypeError
# comment off itemized, use grouped instead
#def show_pos(pos_v):
# try:
# print("OPEN POSITIONS:")
# print("{:<25s}{:>5s}{:>15s}{:>15s}{:>15s}".format("ORDERID", "TYPE", "COST", "VOL", "PNL"))
# for i in pos_v:
# print("{:<25s}{:>5s}{:>15.8f}{:>15.8f}{:>15.2f}".format(i['ordertxid'], i['type'], Decimal(i['cost']), Decimal(i['vol']) - Decimal(i['vol_closed']), Decimal(i['net'])))
# # beep sound
# #print("\a")
# return None
# except:
# print("\033[91mUnexpected Error!!\033[00m")
# print('-'*60)
# traceback.print_exc(file=sys.stdout)
# print('-'*60)
# return TypeError
# show grouped/aggregated cost/vol with the same order id and show
def show_pos(pos_v):
try:
pos_copy = copy.deepcopy(pos_v)
dist = {}
for v in pos_copy:
#print("DIST {}".format(dist))
if (v['ordertxid'] in dist):
#print("FOUND IN DIST {}".format(v['ordertxid']))
dist_v = dist[v['ordertxid']]
dist_v['cost'] = str(Decimal(dist_v['cost']) + Decimal(v['cost']))
dist_v['vol'] = str(Decimal(dist_v['vol']) + Decimal(v['vol']))
dist_v['vol_closed'] = str(Decimal(dist_v['vol_closed']) + Decimal(v['vol_closed']))
dist_v['fee'] = str(Decimal(dist_v['fee']) + Decimal(v['fee']))
dist_v['value'] = str(Decimal(dist_v['value']) + Decimal(v['value']))
dist_v['margin'] = str(Decimal(dist_v['margin']) + Decimal(v['margin']))
dist_v['net'] = str(Decimal(dist_v['net']) + Decimal(v['net']))
dist[v['ordertxid']] = dist_v
else:
#print("NOT FOUND IN DIST {}".format(v['ordertxid']))
dist[v['ordertxid']] = v
tot = None
print("\033[36mGROUPED OPEN POSITIONS:\033[00m")
print("\033[36m{:<20s}{:>15s}{:>15s}{:>15s}{:>15s}{:>15s}{:>15s}\033[00m".format("ORDERID", "TYPE", "AVE PRICE", "TOTAL COST", "TOTAL MARGIN", "TOTAL VOL", "PNL W FEE"))
for v in dist.values():
print("\033[96m{:<20s}{:>15s}{:>15.8f}{:>15.8f}{:>15.8f}{:>15.8f}{:>15.2f}\033[00m".format(v['ordertxid'], v['type'], Decimal(v['cost']) / (Decimal(v['vol']) - Decimal(v['vol_closed'])), Decimal(v['cost']), Decimal(v['margin']), Decimal(v['vol']) - Decimal(v['vol_closed']), Decimal(v['net']) - Decimal(v['fee'])))
# # beep sound
# #print("\a"
if (tot is None):
tot = v
else:
tot['cost'] = str(Decimal(tot['cost']) + Decimal(v['cost']))
tot['vol'] = str(Decimal(tot['vol']) + Decimal(v['vol']))
tot['vol_closed'] = str(Decimal(tot['vol_closed']) + Decimal(v['vol_closed']))
tot['fee'] = str(Decimal(tot['fee']) + Decimal(v['fee']))
tot['value'] = str(Decimal(tot['value']) + Decimal(v['value']))
tot['margin'] = str(Decimal(tot['margin']) + Decimal(v['margin']))
tot['net'] = str(Decimal(tot['net']) + Decimal(v['net']))
# if (tot is not None):
# print("\033[36mSUM:\033[00m")
# print("\033[36m{:<20s}{:>15s}{:>15s}{:>15s}{:>15s}{:>15s}{:>15s}\033[00m".format("ORDERID", "TYPE", "AVE PRICE", "TOTAL COST", "TOTAL MARGIN", "TOTAL VOL", "PNL W/O FEE"))
# print("\033[96m{:<20s}{:>15s}{:>15.8f}{:>15.8f}{:>15.8f}{:>15.8f}{:>15.2f}\033[00m".format("", "", Decimal(tot['cost']) / (Decimal(tot['vol']) - Decimal(tot['vol_closed'])), Decimal(tot['cost']), Decimal(tot['margin']), Decimal(tot['vol']) - Decimal(tot['vol_closed']), Decimal(tot['net'])))
#print()
if (tot == None):
tot_fee = 0
else:
tot_fee = tot['fee']
return tot_fee
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
#
# open orders
#
def get_open_orders():
try:
cmd = subprocess.Popen(["clikraken", "--raw", "ol"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
cmd.wait()
out, err = cmd.communicate()
out_json = json.loads(out)
#print(out_json)
ol_k = list(out_json['result']['open'].keys())
ol_v = list(out_json['result']['open'].values())
return ol_k, ol_v
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError, TypeError
def get_next_buy(ol_k, ol_v):
return get_next_open(ol_k, ol_v, "buy")
def get_next_sell(ol_k, ol_v):
return get_next_open(ol_k, ol_v, "sell")
def get_next_open(ol_k, ol_v, order_type):
try:
price = None
order = None
index = 0
j = 0
for i in ol_v:
if (i['descr']['type'] == "buy" and i['descr']['type'] == order_type):
if (i['descr']['leverage'] == 'none'):
print("\033[91mINFO!! Non-margin buy order detected. Continue.\033[00m")
else:
if (price == None or price < Decimal(i['descr']['price'])):
price = Decimal(i['descr']['price'])
order = i
index = j
elif (i['descr']['type'] == "sell" and i['descr']['type'] == order_type):
if (i['descr']['leverage'] == 'none'):
print("\033[91mINFO!! Non-margin sell order detected. Continue.\033[00m")
else:
if (price == None or price > Decimal(i['descr']['price'])):
price = Decimal(i['descr']['price'])
order = i
index = j
j += 1
if (order == None):
return None, None
else:
return ol_k[index], order
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError, TypeError
def show_next_buy(order_k, order_v):
show_next_open(order_k, order_v)
def show_next_sell(order_k, order_v):
show_next_open(order_k, order_v)
def show_next_open(order_k, order_v):
try:
if (order_v != None):
if (order_v['descr']['type'] == "sell"):
print("\033[35m", end="")
else:
print("\033[32m", end="")
print("{:<20s}{:>15s}{:>15s}{:>15s}".format("NEXT ORDER " + order_v['descr']['type'].upper() + ":", "PRICE" ,"VOL", "LEV"))
print("{:<20s}{:>15s}{:>15.8f}{:>15s}\033[30m".format(order_k, order_v['descr']['price'], Decimal(order_v['vol']) - Decimal(order_v['vol_exec']), order_v['descr']['leverage']))
return None
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
def get_total_buy(open_orders):
return get_total_open(open_orders, "buy")
def get_total_sell(open_orders):
return get_total_open(open_orders, "sell")
def get_total_open(open_orders, order_type):
try:
vol = Decimal(0)
for i in open_orders:
if (i['descr']['type'] == order_type):
if (i['descr']['leverage'] == 'none'):
print("\033[91mINFO!! Non-margin %s order detected. Continue.\033[00m" % order_type)
else:
vol = vol + Decimal(i['vol']) - Decimal(i['vol_exec'])
return vol
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
def show_total_buy(vol):
show_total_open(vol, "buy")
def show_total_sell(vol):
show_total_open(vol, "sell")
def show_total_open(vol, order_type):
try:
print("\033[33m{:<20s}{:>15s}{:>15s}\033[00m".format("TOTAL " + order_type.upper() + " ORDER:", "", "VOL"))
if (vol != None):
print("\033[33m{:<20s}{:>15s}{:>15.8f}\033[00m".format("", "", vol))
return None
except:
print("\033[91mUnexpected Error!!\033[00m")
print('-'*60)
traceback.print_exc(file=sys.stdout)
print('-'*60)
return TypeError
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from contextlib import redirect_stdout # since 3.4
from contextlib import contextmanager
from io import StringIO
import sys
def f1():
s = StringIO()
with redirect_stdout(s):
print(42)
print('='*20)
print(s.getvalue(), end='')
print('='*20)
def f2():
s = StringIO()
stdout = sys.stdout
sys.stdout = s
print(42)
sys.stdout = stdout
print('='*20)
print(s.getvalue(), end='')
print('='*20)
def f3():
@contextmanager
def redirect(stream):
stdout = sys.stdout
sys.stdout = stream
try:
yield
finally:
sys.stdout = stdout
s = StringIO()
with redirect(s):
print(42)
print('='*20)
print(s.getvalue(), end='')
print('='*20)
if __name__ == '__main__':
f1()
f2()
f3()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import unicode_literals
import logging
import asyncio
import time
import functools
from typing import TYPE_CHECKING, Any, Dict, List, Callable, Optional, Union, cast
from uamqp import (
authentication,
constants,
errors,
compat,
Message,
AMQPClientAsync,
)
from .._client_base import ClientBase, _generate_sas_token, _parse_conn_str
from .._utils import utc_from_timestamp
from ..exceptions import ClientClosedError
from .._constants import JWT_TOKEN_SCOPE, MGMT_OPERATION, MGMT_PARTITION_OPERATION
from ._connection_manager_async import get_connection_manager
from ._error_async import _handle_exception
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
try:
from typing_extensions import Protocol
except ImportError:
Protocol = object # type: ignore
_LOGGER = logging.getLogger(__name__)
class EventHubSharedKeyCredential(object):
"""The shared access key credential used for authentication.
:param str policy: The name of the shared access policy.
:param str key: The shared access key.
"""
def __init__(self, policy: str, key: str):
self.policy = policy
self.key = key
self.token_type = b"servicebus.windows.net:sastoken"
async def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument
if not scopes:
raise ValueError("No token scope provided.")
return _generate_sas_token(scopes[0], self.policy, self.key)
class ClientBaseAsync(ClientBase):
def __init__(
self,
fully_qualified_namespace: str,
eventhub_name: str,
credential: "TokenCredential",
**kwargs: Any
) -> None:
self._loop = kwargs.pop("loop", None)
super(ClientBaseAsync, self).__init__(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
credential=credential,
**kwargs
)
self._conn_manager_async = get_connection_manager(loop=self._loop, **kwargs)
def __enter__(self):
raise TypeError(
"Asynchronous client must be opened with async context manager."
)
@staticmethod
def _from_connection_string(conn_str: str, **kwargs) -> Dict[str, Any]:
host, policy, key, entity = _parse_conn_str(conn_str, kwargs)
kwargs["fully_qualified_namespace"] = host
kwargs["eventhub_name"] = entity
kwargs["credential"] = EventHubSharedKeyCredential(policy, key)
return kwargs
async def _create_auth_async(self) -> authentication.JWTTokenAsync:
"""
Create an ~uamqp.authentication.SASTokenAuthAsync instance to authenticate
the session.
"""
try:
token_type = self._credential.token_type
except AttributeError:
token_type = b"jwt"
if token_type == b"servicebus.windows.net:sastoken":
auth = authentication.JWTTokenAsync(
self._auth_uri,
self._auth_uri,
functools.partial(self._credential.get_token, self._auth_uri),
token_type=token_type,
timeout=self._config.auth_timeout,
http_proxy=self._config.http_proxy,
transport_type=self._config.transport_type,
)
await auth.update_token()
return auth
return authentication.JWTTokenAsync(
self._auth_uri,
self._auth_uri,
functools.partial(self._credential.get_token, JWT_TOKEN_SCOPE),
token_type=token_type,
timeout=self._config.auth_timeout,
http_proxy=self._config.http_proxy,
transport_type=self._config.transport_type,
)
async def _close_connection_async(self) -> None:
await self._conn_manager_async.reset_connection_if_broken()
async def _backoff_async(
self,
retried_times: int,
last_exception: Exception,
timeout_time: Optional[float] = None,
entity_name: Optional[str] = None,
) -> None:
entity_name = entity_name or self._container_id
backoff = self._config.backoff_factor * 2 ** retried_times
if backoff <= self._config.backoff_max and (
timeout_time is None or time.time() + backoff <= timeout_time
): # pylint:disable=no-else-return
await asyncio.sleep(backoff, loop=self._loop)
_LOGGER.info(
"%r has an exception (%r). Retrying...",
format(entity_name),
last_exception,
)
else:
_LOGGER.info(
"%r operation has timed out. Last exception before timeout is (%r)",
entity_name,
last_exception,
)
raise last_exception
async def _management_request_async(self, mgmt_msg: Message, op_type: bytes) -> Any:
retried_times = 0
last_exception = None
while retried_times <= self._config.max_retries:
mgmt_auth = await self._create_auth_async()
mgmt_client = AMQPClientAsync(
self._mgmt_target, auth=mgmt_auth, debug=self._config.network_tracing
)
try:
conn = await self._conn_manager_async.get_connection(
self._address.hostname, mgmt_auth
)
await mgmt_client.open_async(connection=conn)
response = await mgmt_client.mgmt_request_async(
mgmt_msg,
constants.READ_OPERATION,
op_type=op_type,
status_code_field=b"status-code",
description_fields=b"status-description",
)
status_code = response.application_properties[b"status-code"]
if status_code < 400:
return response
raise errors.AuthenticationException(
"Management request error. Status code: {}".format(status_code)
)
except Exception as exception: # pylint:disable=broad-except
last_exception = await _handle_exception(exception, self)
await self._backoff_async(
retried_times=retried_times, last_exception=last_exception
)
retried_times += 1
if retried_times > self._config.max_retries:
_LOGGER.info(
"%r returns an exception %r", self._container_id, last_exception
)
raise last_exception
finally:
await mgmt_client.close_async()
async def _get_eventhub_properties_async(self) -> Dict[str, Any]:
mgmt_msg = Message(application_properties={"name": self.eventhub_name})
response = await self._management_request_async(
mgmt_msg, op_type=MGMT_OPERATION
)
output = {}
eh_info = response.get_data() # type: Dict[bytes, Any]
if eh_info:
output["eventhub_name"] = eh_info[b"name"].decode("utf-8")
output["created_at"] = utc_from_timestamp(
float(eh_info[b"created_at"]) / 1000
)
output["partition_ids"] = [
p.decode("utf-8") for p in eh_info[b"partition_ids"]
]
return output
async def _get_partition_ids_async(self) -> List[str]:
return (await self._get_eventhub_properties_async())["partition_ids"]
async def _get_partition_properties_async(
self, partition_id: str
) -> Dict[str, Any]:
mgmt_msg = Message(
application_properties={
"name": self.eventhub_name,
"partition": partition_id,
}
)
response = await self._management_request_async(
mgmt_msg, op_type=MGMT_PARTITION_OPERATION
)
partition_info = response.get_data() # type: Dict[bytes, Union[bytes, int]]
output = {} # type: Dict[str, Any]
if partition_info:
output["eventhub_name"] = cast(bytes, partition_info[b"name"]).decode(
"utf-8"
)
output["id"] = cast(bytes, partition_info[b"partition"]).decode("utf-8")
output["beginning_sequence_number"] = cast(
int, partition_info[b"begin_sequence_number"]
)
output["last_enqueued_sequence_number"] = cast(
int, partition_info[b"last_enqueued_sequence_number"]
)
output["last_enqueued_offset"] = cast(
bytes, partition_info[b"last_enqueued_offset"]
).decode("utf-8")
output["is_empty"] = partition_info[b"is_partition_empty"]
output["last_enqueued_time_utc"] = utc_from_timestamp(
float(cast(int, partition_info[b"last_enqueued_time_utc"]) / 1000)
)
return output
async def _close_async(self) -> None:
await self._conn_manager_async.close_connection()
if TYPE_CHECKING:
class AbstractConsumerProducer(Protocol):
@property
def _name(self):
# type: () -> str
"""Name of the consumer or producer
"""
@_name.setter
def _name(self, value):
pass
@property
def _client(self):
# type: () -> ClientBaseAsync
"""The instance of EventHubComsumerClient or EventHubProducerClient
"""
@_client.setter
def _client(self, value):
pass
@property
def _handler(self):
# type: () -> AMQPClientAsync
"""The instance of SendClientAsync or ReceiveClientAsync
"""
@property
def _loop(self):
# type: () -> asyncio.AbstractEventLoop
"""The event loop that users pass in to call wrap sync calls to async API.
It's furthur passed to uamqp APIs
"""
@_loop.setter
def _loop(self, value):
pass
@property
def running(self):
# type: () -> bool
"""Whether the consumer or producer is running
"""
@running.setter
def running(self, value):
pass
def _create_handler(self, auth: authentication.JWTTokenAsync) -> None:
pass
_MIXIN_BASE = AbstractConsumerProducer
else:
_MIXIN_BASE = object
class ConsumerProducerMixin(_MIXIN_BASE):
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
def _check_closed(self) -> None:
if self.closed:
raise ClientClosedError(
"{} has been closed. Please create a new one to handle event data.".format(
self._name
)
)
async def _open(self) -> None:
"""
Open the EventHubConsumer using the supplied connection.
"""
# pylint: disable=protected-access,line-too-long
if not self.running:
if self._handler:
await self._handler.close_async()
auth = await self._client._create_auth_async()
self._create_handler(auth)
await self._handler.open_async(
connection=await self._client._conn_manager_async.get_connection(
self._client._address.hostname, auth
)
)
while not await self._handler.client_ready_async():
await asyncio.sleep(0.05, loop=self._loop)
self._max_message_size_on_link = (
self._handler.message_handler._link.peer_max_message_size
or constants.MAX_MESSAGE_LENGTH_BYTES
)
self.running = True
async def _close_handler_async(self) -> None:
if self._handler:
# close the link (shared connection) or connection (not shared)
await self._handler.close_async()
self.running = False
async def _close_connection_async(self) -> None:
await self._close_handler_async()
await self._client._conn_manager_async.reset_connection_if_broken() # pylint:disable=protected-access
async def _handle_exception(self, exception: Exception) -> Exception:
if not self.running and isinstance(exception, compat.TimeoutException):
exception = errors.AuthenticationException("Authorization timeout.")
return await _handle_exception(exception, self)
return await _handle_exception(exception, self)
async def _do_retryable_operation(
self,
operation: Callable[..., Any],
timeout: Optional[float] = None,
**kwargs: Any
) -> Optional[Any]:
# pylint:disable=protected-access,line-too-long
timeout_time = (time.time() + timeout) if timeout else None
retried_times = 0
last_exception = kwargs.pop("last_exception", None)
operation_need_param = kwargs.pop("operation_need_param", True)
max_retries = self._client._config.max_retries
while retried_times <= max_retries:
try:
if operation_need_param:
return await operation(
timeout_time=timeout_time,
last_exception=last_exception,
**kwargs
)
return await operation()
except Exception as exception: # pylint:disable=broad-except
last_exception = await self._handle_exception(exception)
await self._client._backoff_async(
retried_times=retried_times,
last_exception=last_exception,
timeout_time=timeout_time,
entity_name=self._name,
)
retried_times += 1
if retried_times > max_retries:
_LOGGER.info(
"%r operation has exhausted retry. Last exception: %r.",
self._name,
last_exception,
)
raise last_exception
return None
async def close(self) -> None:
"""
Close down the handler. If the handler has already closed,
this will be a no op.
"""
await self._close_handler_async()
self.closed = True
|
import asyncio
import logging
import pyez
ZMQ_REQ_PORT = 9999
WORKER_PORT = 9998
LIVELINESS = 3000
SERVER_DURATION = 2000
def assert_eq(exp, act):
if exp != act:
raise AssertionError(
"expected {}, actual: {}".format(exp, act))
return
def new_con():
return pyez.WorkerConnection(
con_s=f"tcp://localhost:{WORKER_PORT}",
service_name=b"TEST",
liveliness=LIVELINESS)
async def serve(handler):
async with new_con() as conn:
try:
await asyncio.wait_for(conn.serve(handler),
SERVER_DURATION / 1000.0)
except asyncio.TimeoutError:
return
return
async def test_serve_ok() -> None:
REQ = [b"big", b"test"]
RES = [b"big", b"response"]
async def handler(req: pyez.Frames):
assert_eq(req, REQ)
return [b"OK"] + RES
async def do_req():
async with pyez.ClientConnection(
f"tcp://localhost:{ZMQ_REQ_PORT}") as conn:
res = await conn.req(b"TEST", REQ)
assert_eq(res, ([b"OK"] + RES))
return
server_task = asyncio.create_task(serve(handler))
await do_req()
await server_task
return
async def test_serve_err() -> None:
REQ = [b"big", b"test"]
RES = [b"big", b"response"]
async def handler(req: pyez.Frames):
assert req == REQ
return [b"ERR"] + RES
async def do_req():
async with pyez.ClientConnection(
f"tcp://localhost:{ZMQ_REQ_PORT}") as conn:
res = await conn.req(b"TEST", REQ)
assert_eq(res, ([b"SERVICE_ERR"] + RES))
return
server_task = asyncio.create_task(serve(handler))
await do_req()
await server_task
return
async def test_timeout() -> None:
async def handler(req: pyez.Frames):
await asyncio.sleep(100)
return [b"OK", b"nothing"]
async def do_req():
async with pyez.ClientConnection(
f"tcp://localhost:{ZMQ_REQ_PORT}") as conn:
res = await conn.req(b"TEST", [b"any", b"thing"])
assert_eq(res, ([b"EZ_ERR", b"TIMEOUT"]))
return
server_task = asyncio.create_task(serve(handler))
await do_req()
await server_task
return
async def test_no_service() -> None:
async def do_req():
async with pyez.ClientConnection(
f"tcp://localhost:{ZMQ_REQ_PORT}") as conn:
res = await conn.req(b"TEST", [b"any", b"thing"],
timeout=10000)
assert_eq(res, ([b"EZ_ERR", b"NO_SERVICE"]))
return
await do_req()
return
TESTS = [
test_serve_ok,
test_serve_err,
test_timeout,
test_no_service,
]
async def main():
logging.basicConfig(level=logging.INFO)
for test in TESTS:
await test()
await asyncio.sleep(LIVELINESS / 1000.0)
logging.info(f"""
---------------- test success: {test.__name__}
""")
return
if __name__ == '__main__':
asyncio.run(main())
|
import os
from pathlib import Path
import torch
from torch import optim
import torch.nn as nn
from tqdm import tqdm_notebook as tqdm
from .dataset import generate_batches, CBOWDataset
from .utils import set_seed_everywhere, handle_dirs, compute_accuracy
from .classifier import CBOWClassifier
from .utils import make_train_state
from .radam import RAdam
class Learner(object):
def __init__(self, args, dataset, vectorizer, classifier):
self.args = args
self.dataset = dataset
self.vectorizer = vectorizer
self.classifier = classifier
self.loss_func = nn.CrossEntropyLoss()
# self.optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
self.optimizer = RAdam(classifier.parameters(), lr=args.learning_rate)
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=self.optimizer,
mode='min', factor=0.5,
patience=1)
self.train_state = make_train_state(args)
def _set_splits_progress_bars(self):
epoch_bar = tqdm(desc='training routine',
total=self.args.num_epochs,
position=0)
self.dataset.set_split('train')
train_bar = tqdm(desc='split=train',
total=self.dataset.get_num_batches(self.args.batch_size),
position=1,
leave=True)
self.dataset.set_split('val')
val_bar = tqdm(desc='split=val',
total=self.dataset.get_num_batches(self.args.batch_size),
position=1,
leave=True)
return epoch_bar, train_bar, val_bar
def _add_update_args(self, **kwargs):
# turn into dict
args_dict = vars(self.args)
# changes also values in self.args
for key, value in kwargs.items():
args_dict[key] = value
def train_eval_epoch(self, batch_generator, epoch_index, progress_bar, train_val='train'):
if train_val not in ['train', 'val']:
raise ValueError
running_loss = 0.0
running_acc = 0.0
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
self.optimizer.zero_grad()
# step 2. compute the output
y_pred = self.classifier(batch_dict['x_data'])
# step 3. compute the loss
loss = self.loss_func(y_pred, batch_dict['y_target'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
if train_val == 'train':
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
self.optimizer.step()
# -----------------------------------------
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_target'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
progress_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
progress_bar.update()
self.train_state[f'{train_val}_loss'].append(running_loss)
self.train_state[f'{train_val}_acc'].append(running_acc)
def train(self, **kwargs):
# kwargs are meant to be training related arguments that might be changed for training
self._add_update_args(**kwargs)
epoch_bar, train_bar, val_bar = self._set_splits_progress_bars()
try:
for epoch_index in range(self.args.num_epochs):
self.train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
self.dataset.set_split('train')
batch_generator = generate_batches(self.dataset,
batch_size=self.args.batch_size,
device=self.args.device)
self.classifier.train()
self.train_eval_epoch(batch_generator, epoch_index, train_bar)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
self.dataset.set_split('val')
batch_generator = generate_batches(self.dataset,
batch_size=self.args.batch_size,
device=self.args.device)
self.classifier.eval()
self.train_eval_epoch(batch_generator, epoch_index, val_bar, 'val')
self.update_train_state()
self.scheduler.step(self.train_state['val_loss'][-1])
if self.train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
def save_model(self):
state = {
'scheduler': self.scheduler,
'state_dict': self.classifier.state_dict(),
'optimizer': self.optimizer.state_dict(),
'train_state': self.train_state,
'args': self.args
}
torch.save(state, self.train_state['model_filename'])
def load_model(self, filename):
learner = torch.load(filename)
self.scheduler = learner['scheduler']
self.classifier.load_state_dict(learner['state_dict'])
self.optimizer.load_state_dict(learner['optimizer'])
self.train_state = learner['train_state']
def update_train_state(self):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if self.train_state['epoch_index'] == 0:
# torch.save(self.classifier.state_dict(), self.train_state['model_filename'])
self.save_model()
self.train_state['stop_early'] = False
# Save model if performance improved
elif self.train_state['epoch_index'] >= 1:
loss_tm1, loss_t = self.train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= self.train_state['early_stopping_best_val']:
# Update step
self.train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < self.train_state['early_stopping_best_val']:
self.save_model()
# Reset early stopping step
self.train_state['early_stopping_step'] = 0
# Stop early ?
self.train_state['stop_early'] = \
self.train_state['early_stopping_step'] >= self.args.early_stopping_criteria
def validate(self):
self.load_model(self.train_state['model_filename'])
self.classifier = self.classifier.to(self.args.device)
self.classifier.eval()
self.dataset.set_split('test')
batch_generator = generate_batches(self.dataset,
batch_size=self.args.batch_size,
shuffle=False,
device=self.args.device)
running_loss = 0.
running_acc = 0.
self.classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = self.classifier(x_in=batch_dict['x_data'])
# compute the loss
loss = self.loss_func(y_pred, batch_dict['y_target'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_target'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
self.train_state['test_loss'] = running_loss
self.train_state['test_acc'] = running_acc
print(f"Test loss: {round(self.train_state['test_loss'], 3)}")
print(f"Test Accuracy: {round(self.train_state['test_acc'], 3)}")
def pretty_print(self, results):
"""
Pretty print embedding results.
"""
for item in results:
print("...[%.2f] - %s" % (item[1], item[0]))
def get_closest(self, target_word, word_to_idx, embeddings, n=5):
"""
Get the n closest
words to your word.
"""
# Calculate distances to all other words
word_embedding = embeddings[word_to_idx[target_word.lower()]]
distances = []
for word, index in word_to_idx.items():
if word == "<MASK>" or word == target_word:
continue
distances.append((word, torch.dist(word_embedding, embeddings[index])))
results = sorted(distances, key=lambda x: x[1])[1:n + 2]
return results
def get_closest_words(self, word):
embeddings = self.classifier.embedding.weight.data
word_to_idx = self.vectorizer.cbow_vocab._token_to_idx
self.pretty_print(self.get_closest(word, word_to_idx, embeddings, n=5))
@classmethod
def learner_from_args(cls, args):
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
if args.reload_from_files:
# training from a checkpoint
print("Loading dataset and loading vectorizer")
dataset = CBOWDataset.load_dataset_and_load_vectorizer(args.cbow_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
print("Loading dataset and creating vectorizer")
dataset = CBOWDataset.load_dataset_and_make_vectorizer(args.cbow_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
classifier = CBOWClassifier(vocabulary_size=len(vectorizer.cbow_vocab),
embedding_size=args.embedding_size)
classifier = classifier.to(args.device)
learner = cls(args, dataset, vectorizer, classifier)
if args.reload_from_files:
learner_states = torch.load(Path(args.model_state_file))
learner.optimizer.load_state_dict(learner_states['optimizer'])
learner.classifier.load_state_dict(learner_states['state_dict'])
learner.scheduler = learner_states['scheduler']
learner.train_state = learner_states['train_state']
return learner
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: nxos_interface_ospf
version_added: "2.2"
short_description: Manages configuration of an OSPF interface instance.
description:
- Manages configuration of an OSPF interface instance.
author: Gabriele Gerbino (@GGabriele)
extends_documentation_fragment: nxos
notes:
- Default, where supported, restores params default value.
- To remove an existing authentication configuration you should use
C(message_digest_key_id=default) plus all other options matching their
existing values.
- C(state=absent) removes the whole OSPF interface configuration.
options:
interface:
description:
- Name of this cisco_interface resource. Valid value is a string.
required: true
ospf:
description:
- Name of the ospf instance.
required: true
area:
description:
- Ospf area associated with this cisco_interface_ospf instance.
Valid values are a string, formatted as an IP address
(i.e. "0.0.0.0") or as an integer.
required: true
cost:
description:
- The cost associated with this cisco_interface_ospf instance.
required: false
default: null
hello_interval:
description:
- Time between sending successive hello packets.
Valid values are an integer or the keyword 'default'.
required: false
default: null
dead_interval:
description:
- Time interval an ospf neighbor waits for a hello
packet before tearing down adjacencies. Valid values are an
integer or the keyword 'default'.
required: false
default: null
passive_interface:
description:
- Setting to true will prevent this interface from receiving
HELLO packets. Valid values are 'true' and 'false'.
required: false
choices: ['true','false']
default: null
message_digest:
description:
- Enables or disables the usage of message digest authentication.
Valid values are 'true' and 'false'.
required: false
choices: ['true','false']
default: null
message_digest_key_id:
description:
- Md5 authentication key-id associated with the ospf instance.
If this is present, message_digest_encryption_type,
message_digest_algorithm_type and message_digest_password are
mandatory. Valid value is an integer and 'default'.
required: false
default: null
message_digest_algorithm_type:
description:
- Algorithm used for authentication among neighboring routers
within an area. Valid values is 'md5'.
required: false
choices: ['md5']
default: null
message_digest_encryption_type:
description:
- Specifies the scheme used for encrypting message_digest_password.
Valid values are '3des' or 'cisco_type_7' encryption.
required: false
choices: ['cisco_type_7','3des']
default: null
message_digest_password:
description:
- Specifies the message_digest password. Valid value is a string.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_interface_ospf:
interface: ethernet1/32
ospf: 1
area: 1
cost=default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"area": "1", "interface": "ethernet1/32", "ospf": "1"}
existing:
description: k/v pairs of existing OSPF configuration
returned: verbose mode
type: dict
sample: {"area": "", "cost": "", "dead_interval": "",
"hello_interval": "", "interface": "ethernet1/32",
"message_digest": false, "message_digest_algorithm_type": "",
"message_digest_encryption_type": "",
"message_digest_key_id": "", "message_digest_password": "",
"ospf": "", "passive_interface": false}
end_state:
description: k/v pairs of OSPF configuration after module execution
returned: verbose mode
type: dict
sample: {"area": "0.0.0.1", "cost": "", "dead_interval": "",
"hello_interval": "", "interface": "ethernet1/32",
"message_digest": false, "message_digest_algorithm_type": "",
"message_digest_encryption_type": "", "message_digest_key_id": "",
"message_digest_password": "", "ospf": "1",
"passive_interface": false}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface Ethernet1/32", "ip router ospf 1 area 0.0.0.1"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
BOOL_PARAMS = [
'passive_interface',
'message_digest'
]
PARAM_TO_COMMAND_KEYMAP = {
'cost': 'ip ospf cost',
'ospf': 'ip router ospf',
'area': 'ip router ospf',
'hello_interval': 'ip ospf hello-interval',
'dead_interval': 'ip ospf dead-interval',
'passive_interface': 'ip ospf passive-interface',
'message_digest': 'ip ospf authentication message-digest',
'message_digest_key_id': 'ip ospf message-digest-key',
'message_digest_algorithm_type': 'ip ospf message-digest-key options',
'message_digest_encryption_type': 'ip ospf message-digest-key options',
'message_digest_password': 'ip ospf message-digest-key options',
}
PARAM_TO_DEFAULT_KEYMAP = {
}
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_custom_value(arg, config, module):
value = ''
if arg == 'ospf':
REGEX = re.compile(r'(?:ip router ospf\s)(?P<value>.*)$', re.M)
value = ''
if 'ip router ospf' in config:
parsed = REGEX.search(config).group('value').split()
value = parsed[0]
elif arg == 'area':
REGEX = re.compile(r'(?:ip router ospf\s)(?P<value>.*)$', re.M)
value = ''
if 'ip router ospf' in config:
parsed = REGEX.search(config).group('value').split()
value = parsed[2]
elif arg.startswith('message_digest_'):
REGEX = re.compile(r'(?:ip ospf message-digest-key\s)(?P<value>.*)$', re.M)
value = ''
if 'ip ospf message-digest-key' in config:
value_list = REGEX.search(config).group('value').split()
if arg == 'message_digest_key_id':
value = value_list[0]
elif arg == 'message_digest_algorithm_type':
value = value_list[1]
elif arg == 'message_digest_encryption_type':
value = value_list[2]
if value == '3':
value = '3des'
elif value == '7':
value = 'cisco_type_7'
elif arg == 'message_digest_password':
value = value_list[3]
elif arg == 'passive_interface':
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
NO_REGEX = re.compile(r'\s+no\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if NO_REGEX.search(config):
value = False
elif REGEX.search(config):
value = True
except TypeError:
value = False
return value
def get_value(arg, config, module):
custom = [
'ospf',
'area',
'message_digest_key_id',
'message_digest_algorithm_type',
'message_digest_encryption_type',
'message_digest_password',
'passive_interface'
]
if arg in custom:
value = get_custom_value(arg, config, module)
elif arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
parents = ['interface {0}'.format(module.params['interface'].capitalize())]
config = netcfg.get_section(parents)
if 'ospf' in config:
for arg in args:
if arg not in ['interface']:
existing[arg] = get_value(arg, config, module)
existing['interface'] = module.params['interface']
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def get_default_commands(existing, proposed, existing_commands, key, module):
commands = list()
existing_value = existing_commands.get(key)
if key.startswith('ip ospf message-digest-key'):
check = False
for param in ['message_digest_encryption_type',
'message_digest_algorithm_type',
'message_digest_password']:
if existing[param] == proposed[param]:
check = True
if check:
if existing['message_digest_encryption_type'] == '3des':
encryption_type = '3'
elif existing['message_digest_encryption_type'] == 'cisco_type_7':
encryption_type = '7'
command = 'no {0} {1} {2} {3} {4}'.format(
key,
existing['message_digest_key_id'],
existing['message_digest_algorithm_type'],
encryption_type,
existing['message_digest_password'])
commands.append(command)
else:
commands.append('no {0} {1}'.format(key, existing_value))
return commands
def get_custom_command(existing_cmd, proposed, key, module):
commands = list()
if key == 'ip router ospf':
command = '{0} {1} area {2}'.format(key, proposed['ospf'],
proposed['area'])
if command not in existing_cmd:
commands.append(command)
elif key.startswith('ip ospf message-digest-key'):
if (proposed['message_digest_key_id'] != 'default' and
'options' not in key):
if proposed['message_digest_encryption_type'] == '3des':
encryption_type = '3'
elif proposed['message_digest_encryption_type'] == 'cisco_type_7':
encryption_type = '7'
command = '{0} {1} {2} {3} {4}'.format(
key,
proposed['message_digest_key_id'],
proposed['message_digest_algorithm_type'],
encryption_type,
proposed['message_digest_password'])
commands.append(command)
return commands
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
commands.extend(get_default_commands(existing, proposed,
existing_commands, key,
module))
else:
if (key == 'ip router ospf' or
key.startswith('ip ospf message-digest-key')):
commands.extend(get_custom_command(commands, proposed,
key, module))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
parents = ['interface {0}'.format(module.params['interface'].capitalize())]
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ['interface {0}'.format(module.params['interface'].capitalize())]
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in existing_commands.items():
if value:
if key.startswith('ip ospf message-digest-key'):
if 'options' not in key:
if existing['message_digest_encryption_type'] == '3des':
encryption_type = '3'
elif existing['message_digest_encryption_type'] == 'cisco_type_7':
encryption_type = '7'
command = 'no {0} {1} {2} {3} {4}'.format(
key,
existing['message_digest_key_id'],
existing['message_digest_algorithm_type'],
encryption_type,
existing['message_digest_password'])
commands.append(command)
elif key in ['ip ospf authentication message-digest',
'ip ospf passive-interface']:
if value:
commands.append('no {0}'.format(key))
elif key == 'ip router ospf':
command = 'no {0} {1} area {2}'.format(key, proposed['ospf'],
proposed['area'])
if command not in commands:
commands.append(command)
else:
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
candidate.add(commands, parents=parents)
def normalize_area(area, module):
try:
area = int(area)
area = '0.0.0.{0}'.format(area)
except ValueError:
splitted_area = area.split('.')
if len(splitted_area) != 4:
module.fail_json(msg='Incorrect Area ID format', area=area)
return area
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
ospf=dict(required=True, type='str'),
area=dict(required=True, type='str'),
cost=dict(required=False, type='str'),
hello_interval=dict(required=False, type='str'),
dead_interval=dict(required=False, type='str'),
passive_interface=dict(required=False, type='bool'),
message_digest=dict(required=False, type='bool'),
message_digest_key_id=dict(required=False, type='str'),
message_digest_algorithm_type=dict(required=False, type='str',
choices=['md5']),
message_digest_encryption_type=dict(required=False, type='str',
choices=['cisco_type_7','3des']),
message_digest_password=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
required_together=[['message_digest_key_id',
'message_digest_algorithm_type',
'message_digest_encryption_type',
'message_digest_password']],
supports_check_mode=True)
for param in ['message_digest_encryption_type',
'message_digest_algorithm_type',
'message_digest_password']:
if module.params[param] == 'default':
module.exit_json(msg='Use message_digest_key_id=default to remove'
' an existing authentication configuration')
state = module.params['state']
args = [
'interface',
'ospf',
'area',
'cost',
'hello_interval',
'dead_interval',
'passive_interface',
'message_digest',
'message_digest_key_id',
'message_digest_algorithm_type',
'message_digest_encryption_type',
'message_digest_password'
]
existing = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
elif str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
proposed['area'] = normalize_area(proposed['area'], module)
result = {}
if (state == 'present' or (state == 'absent' and
existing.get('ospf') == proposed['ospf'] and
existing.get('area') == proposed['area'])):
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
result['connected'] = module.connected
if module._verbosity > 0:
end_state = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
module.exit_json(**result)
if __name__ == '__main__':
main()
|
bl_info = {
"name": "Add heightmap",
"author": "Spencer Alves",
"version": (1, 0),
"blender": (2, 75, 0),
"location": "File > Import > Import heightmap",
"description": "Generates a mesh from a heightmap image",
"warning": "",
"wiki_url": "",
"category": "Add Mesh",
}
import bpy
from bpy.types import Operator
from bpy.props import FloatVectorProperty, IntProperty, StringProperty
from bpy_extras.object_utils import AddObjectHelper, object_data_add
from mathutils import Vector
from bpy_extras.image_utils import load_image
def add_object(self, context):
scale_x = self.scale.x
scale_y = self.scale.y
scale_z = self.scale.z
img = load_image(self.filename, self.directory)
precision = self.precision
width, height = img.size
verts = []
edges = []
faces = []
for x in range(0, width, precision):
for y in range(0, height, precision):
px = (x+(height-y-1)*width)*4
verts.append(Vector((x*scale_x/width,
y*scale_y/height,
img.pixels[px]*scale_z)))
nYVerts = height//precision
for i in range(width//precision-1):
for j in range(nYVerts-1):
face = [j+i*nYVerts, (j+1)+i*nYVerts,
(j+1)+(i+1)*nYVerts, j+(i+1)*nYVerts]
faces.append(face)
mesh = bpy.data.meshes.new(name=img.name)
mesh.from_pydata(verts, edges, faces)
# useful for development when the mesh may be invalid.
# mesh.validate(verbose=True)
object_data_add(context, mesh, operator=self)
class ImportHeightmap(Operator, AddObjectHelper):
"""Create a new Mesh Object"""
bl_idname = "import_image.to_heightmap"
bl_label = "Import heightmap"
bl_options = {'REGISTER', 'UNDO'}
filename = StringProperty(name="File Name",
description="Name of the file")
directory = StringProperty(name="Directory",
description="Directory of the file")
scale = FloatVectorProperty(
name="scale",
default=(1.0, 1.0, 1.0),
subtype='TRANSLATION',
description="scaling",
unit='LENGTH'
)
precision = IntProperty(
name="precision",
default=16,
min=1)
def invoke(self, context, event):
wm = bpy.context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def execute(self, context):
add_object(self, context)
return {'FINISHED'}
# Registration
def import_image_button(self, context):
self.layout.operator(
ImportHeightmap.bl_idname,
text="Add heightmap")
def register():
bpy.utils.register_class(ImportHeightmap)
bpy.types.INFO_MT_file_import.append(import_image_button)
bpy.types.INFO_MT_mesh_add.append(import_image_button)
def unregister():
bpy.utils.unregister_class(ImportHeightmap)
bpy.types.INFO_MT_file_import.append(import_image_button)
bpy.types.INFO_MT_mesh_add.append(import_image_button)
if __name__ == "__main__":
register()
|
#!/usr/bin/env python
# Copyright (c) 2013-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SDK Gripper Example: joystick
"""
import argparse
import rospy
import intera_interface
import intera_external_devices
def map_joystick(joystick, limb):
"""
maps joystick input to gripper commands
@param joystick: an instance of a Joystick
"""
print("Getting robot state... ")
rs = intera_interface.RobotEnable(intera_interface.CHECK_VERSION)
init_state = rs.state()
gripper = None
original_deadzone = None
def clean_shutdown():
if gripper and original_deadzone:
gripper.set_dead_zone(original_deadzone)
print("Exiting example.")
try:
gripper = intera_interface.Gripper(limb + '_gripper')
except (ValueError, OSError) as e:
rospy.logerr("Could not detect an electric gripper attached to the robot.")
clean_shutdown()
return
rospy.on_shutdown(clean_shutdown)
# abbreviations
jhi = lambda s: joystick.stick_value(s) > 0
jlo = lambda s: joystick.stick_value(s) < 0
bdn = joystick.button_down
bup = joystick.button_up
def print_help(bindings_list):
print("Press Ctrl-C to quit.")
for bindings in bindings_list:
for (test, _cmd, doc) in bindings:
if callable(doc):
doc = doc()
print("%s: %s" % (str(test[1]), doc))
def offset_position(offset_pos):
cmd_pos = max(min(gripper.get_position() + offset_pos, gripper.MAX_POSITION), gripper.MIN_POSITION)
gripper.set_position(cmd_pos)
print("commanded position set to {0} m".format(cmd_pos))
def update_velocity(offset_vel):
cmd_speed = max(min(gripper.get_cmd_velocity() + offset_vel, gripper.MAX_VELOCITY), gripper.MIN_VELOCITY)
gripper.set_cmd_velocity(cmd_speed)
print("commanded velocity set to {0} m/s".format(cmd_speed))
# decrease position dead_zone
original_deadzone = gripper.get_dead_zone()
# WARNING: setting the deadzone below this can cause oscillations in
# the gripper position. However, setting the deadzone to this
# value is required to achieve the incremental commands in this example
gripper.set_dead_zone(0.001)
rospy.loginfo("Gripper deadzone set to {}".format(gripper.get_dead_zone()))
num_steps = 8.0
percent_delta = 1.0 / num_steps
position_increment = (gripper.MAX_POSITION - gripper.MIN_POSITION) * percent_delta
velocity_increment = (gripper.MAX_VELOCITY - gripper.MIN_VELOCITY) * percent_delta
bindings_list = []
bindings = (
#(test, command, description)
((bdn, ['btnLeft']), (gripper.reboot, []), "reboot"),
((bdn, ['btnUp']), (gripper.calibrate, []), "calibrate"),
((bdn, ['leftTrigger']), (gripper.close, []), "close"),
((bup, ['leftTrigger']), (gripper.open, []), "open (release)"),
((bdn, ['leftBumper']), (gripper.stop, []), "stop"),
((jlo, ['leftStickVert']), (offset_position, [-position_increment]),
"decrease position"),
((jhi, ['leftStickVert']), (offset_position, [position_increment]),
"increase position"),
((jlo, ['rightStickVert']), (update_velocity, [-velocity_increment]),
"decrease commanded velocity"),
((jhi, ['rightStickVert']), (update_velocity, [velocity_increment]),
"increase commanded velocity"),
((bdn, ['function1']), (print_help, [bindings_list]), "help"),
((bdn, ['function2']), (print_help, [bindings_list]), "help"),
)
bindings_list.append(bindings)
rospy.loginfo("Enabling robot...")
rs.enable()
rate = rospy.Rate(100)
print_help(bindings_list)
print("Press <Start> button for help; Ctrl-C to stop...")
while not rospy.is_shutdown():
# test each joystick condition and call binding cmd if true
for (test, cmd, doc) in bindings:
if test[0](*test[1]):
print(doc)
cmd[0](*cmd[1])
rate.sleep()
rospy.signal_shutdown("Example finished.")
def main():
"""SDK Gripper Example: Joystick Control
Use a game controller to control the grippers.
Attach a game controller to your dev machine and run this
example along with the ROS joy_node to control gripper
using the joysticks and buttons. Be sure to provide
the *joystick* type you are using as an argument to setup
appropriate key mappings.
Uses the intera_interface.Gripper class and the helper classes
in intera_external_devices.Joystick.
"""
epilog = """
See help inside the example with the "Start" button for controller
key bindings.
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__,
epilog=epilog)
required = parser.add_argument_group('required arguments')
required.add_argument(
'-j', '--joystick', required=True, choices=['xbox', 'logitech', 'ps3'],
help='specify the type of joystick to use'
)
parser.add_argument(
"-l", "--limb", dest="limb", default=valid_limbs[0],
choices=valid_limbs,
help="Limb on which to run the gripper joystick example"
)
args = parser.parse_args(rospy.myargv()[1:])
joystick = None
if args.joystick == 'xbox':
joystick = intera_external_devices.joystick.XboxController()
elif args.joystick == 'logitech':
joystick = intera_external_devices.joystick.LogitechController()
elif args.joystick == 'ps3':
joystick = intera_external_devices.joystick.PS3Controller()
else:
# Should never reach this case with proper argparse usage
parser.error("Unsupported joystick type '%s'" % (args.joystick))
print("Initializing node... ")
rospy.init_node("sdk_gripper_joystick")
map_joystick(joystick, args.limb)
if __name__ == '__main__':
main()
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
'''
Copied example from Honza to fix bug in 2D pdp. Basically, when no figure is plotted, this bug will thrown an error.
Hence, this code just needs to run to completion. No assertion check is necessary for this test.
'''
def partial_plot_test_with_user_splits():
train = h2o.import_file(pyunit_utils.locate('smalldata/flow_examples/abalone.csv.gz'))
model = H2OGeneralizedLinearEstimator(training_frame=train)
model.train(y="C9")
model.partial_plot(train, col_pairs_2dpdp=[["C1", "C2"]], save_to_file="pdp.png")
if __name__ == "__main__":
pyunit_utils.standalone_test(partial_plot_test_with_user_splits)
else:
partial_plot_test_with_user_splits()
|
import cv2
def save_image(image, folder, now):
"""
Save an image using OpenCV and then resaves it using
PIL for better compression
"""
filename = '%s/%s.jpg'
filepath = filename % (folder, now)
cv2.imwrite(filepath, image, [cv2.cv.CV_IMWRITE_JPEG_QUALITY, 80])
# Resave it with pillow to do a better compression
# img = Image.open(filepath)
# img.save(filepath, optimize=True, quality=80)
def bcdToInt(chars):
sum = 0
for c in chars:
for val in (c >> 4, c & 0xF):
if val > 9:
print('Warning: BCD code is beyond 0~9')
val = 9
sum = 10*sum+val
return sum
#class RingBuffer
|
import numpy as np
from mcts import MCTS
class Player:
def set_player_ind(self, player_ind):
self.player = player_ind
def get_action(self, board, temp=1e-3, return_prob=False):
pass
class MCTSPlayer(Player):
"""
基于蒙特卡洛树的电脑玩家
"""
def __init__(self, policy_value_function, c_puct=5, n_playout=300, is_selfplay=False):
"""
:param policy_value_function: 论文里面的(p,v)=f(s)函数。接受一个board作为参数并返回一个(动作,概率)列表和在[-1, 1]范围的局面胜率的函数
:param c_puct: 论文里面的c_puct。一个在范围(0, inf)的数字,控制探索等级。值越小越依赖于Q值,值越大越依赖于P值
:param n_playout: 找MCTS叶子节点次数,即每次搜索次数
:param is_selfplay: 是否是自己与自己对局
"""
self.mcts = MCTS(policy_value_function, c_puct, n_playout)
self._is_selfplay = is_selfplay
def set_player_ind(self, p):
self.player = p
def reset_player(self):
self.mcts.update_with_move(-1)
def get_action(self, board, temp=1e-3, return_prob=False):
move_probs = np.zeros(board.get_action_count()) # alphaGo Zero论文里面由MCTS返回的的pi数组
if len(board.get_available_moves()) > 0:
acts, probs = self.mcts.get_move_probs(board, temp)
move_probs[list(acts)] = probs
if self._is_selfplay:
move = np.random.choice(acts, p=0.75*probs + 0.25*np.random.dirichlet(0.3*np.ones(len(probs)))) # 增加一个Dirichlet Noise来探索
self.mcts.update_with_move(move)
else:
move = np.random.choice(acts, p=probs) # 如果用默认值temp=1e-3,就相当于选择P值最高的动作
self.mcts.update_with_move(-1)
if return_prob:
return move, move_probs
else:
return move
else:
print("WARNING: the board is full")
def __str__(self):
return "MCTS {}".format(self.player)
|
import sys
import logging
import os
from lark import Lark
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def run_parser(script, target, debug):
if (debug == True):
logging.getLogger().setLevel(logging.DEBUG)
logging.info("AZ Script Compiler v 0.1")
logging.info("loading grammar")
with open(os.path.join(__location__, '../grammar/azsc.lark'), 'r') as f:
grammar = f.read()
logging.info("loading script file")
try:
with open(script, 'r') as f:
text = f.read()
except IOError:
error_message = "script {0} file not found".format(script)
logging.error(error_message)
return "ERROR: " + error_message
logging.info("setting up parser")
lark = Lark(grammar)
logging.info("generating parse tree")
tree = lark.parse(text)
logging.debug("parse tree:\n" + tree.pretty())
logging.info("importing parse tree transformer")
from azsc.transformers.AZSTransformer import AZSTransformer
logging.info("compiling")
t = AZSTransformer()
t.transform(tree)
cmd = t.get_command()
if (debug==True):
logging.debug("context:")
ctx = t.get_context()
for c in ctx:
logging.debug("\t[%s]=%s", str(c), str(ctx[c]))
logging.info("done")
return cmd
|
from includes import *
'''
python -m RLTest --test tests_dag_basic.py --module path/to/redisai.so
'''
def test_dag_load(env):
con = get_connection(env, '{1}')
ret = con.execute_command(
"AI.TENSORSET persisted_tensor_1{1} FLOAT 1 2 VALUES 5 10")
env.assertEqual(ret, b'OK')
command = "AI.DAGEXECUTE LOAD 1 persisted_tensor_1{1}" \
" |> AI.TENSORGET persisted_tensor_1{1} VALUES"
ret = con.execute_command(command)
env.assertEqual(ret[0], [b'5', b'10'])
def test_dag_local_tensorset(env):
con = get_connection(env, '{1}')
command = "AI.DAGEXECUTE ROUTING {1} |> " \
"AI.TENSORSET volatile_tensor1 FLOAT 1 2 VALUES 5 10 |> " \
"AI.TENSORSET volatile_tensor2 FLOAT 1 2 VALUES 5 10 "
ret = con.execute_command(command)
env.assertEqual(ret, [b'OK',b'OK'])
# assert that transaction tensor does not exist
ret = con.execute_command("EXISTS volatile_tensor")
env.assertEqual(ret, 0)
def test_dagro_local_tensorset(env):
con = get_connection(env, '{1}')
command = "AI.DAGEXECUTE_RO ROUTING {1} |> " \
"AI.TENSORSET volatile_tensor1 FLOAT 1 2 VALUES 5 10 |> " \
"AI.TENSORSET volatile_tensor2 FLOAT 1 2 VALUES 5 10 "
ret = con.execute_command(command)
env.assertEqual(ret, [b'OK',b'OK'])
# assert that volatile_tensor does not exist
ret = con.execute_command("EXISTS volatile_tensor")
env.assertEqual(ret, 0 )
def test_dag_local_tensorset_persist(env):
con = get_connection(env, '{1}')
command = "AI.DAGEXECUTE " \
"PERSIST 1 tensor1{1} |> " \
"AI.TENSORSET tensor1{1} FLOAT 1 2 VALUES 5 10"
ret = con.execute_command(command)
env.assertEqual(ret, [b'OK'])
# assert that PERSIST succeeded.
ret = con.execute_command("EXISTS tensor1{1}")
env.assertEqual(ret, 1 )
ret = con.execute_command("AI.TENSORGET tensor1{1} META VALUES")
env.assertEqual(ret, [b'dtype', b'FLOAT', b'shape', [1, 2], b'values', [b'5', b'10']])
def test_dag_multilocal_tensorset_persist(env):
con = get_connection(env, '{1}')
command = "AI.DAGEXECUTE " \
"PERSIST 1 tensor3:{1} |> " \
"AI.TENSORSET tensor1{1} FLOAT 1 2 VALUES 5 10 |> " \
"AI.TENSORSET tensor2 FLOAT 1 2 VALUES 5 10 |> " \
"AI.TENSORSET tensor3:{1} FLOAT 1 2 VALUES 5 10 |> " \
"AI.TENSORSET tensor4:{1} FLOAT 1 2 VALUES 5 10 "
ret = con.execute_command(command)
env.assertEqual([b'OK',b'OK',b'OK',b'OK'],ret)
# assert that PERSIST succeeded.
ret = con.execute_command("EXISTS tensor1{1}")
env.assertEqual(ret, 0 )
# assert that PERSIST succeeded.
ret = con.execute_command("EXISTS tensor2")
env.assertEqual(ret, 0 )
# assert that PERSIST succeeded.
ret = con.execute_command("EXISTS tensor3:{1}")
env.assertEqual(ret, 1 )
# assert that PERSIST succeeded.
ret = con.execute_command("EXISTS tensor4:{1}")
env.assertEqual(ret, 0 )
ret = con.execute_command("AI.TENSORGET tensor3:{1} META VALUES")
env.assertEqual(ret, [b'dtype', b'FLOAT', b'shape', [1, 2], b'values', [b'5', b'10']])
def test_dag_local_tensorset_tensorget_persist(env):
con = get_connection(env, '{1}')
command = "AI.DAGEXECUTE PERSIST 1 tensor1{1} |> " \
"AI.TENSORSET tensor1{1} FLOAT 1 2 VALUES 5 10 |> " \
"AI.TENSORGET tensor1{1} VALUES"
ret = con.execute_command(command)
env.assertEqual(ret, [b'OK', [b'5', b'10']])
ret = con.execute_command("AI.TENSORGET tensor1{1} VALUES")
env.assertEqual(ret, [b'5', b'10'])
def test_dag_local_multiple_tensorset_on_same_tensor(env):
con = get_connection(env, '{1}')
command = "AI.DAGEXECUTE PERSIST 1 tensor1{1} |> " \
"AI.TENSORSET tensor1{1} FLOAT 1 2 VALUES 5 10 |> " \
"AI.TENSORGET tensor1{1} META VALUES |> " \
"AI.TENSORSET tensor1{1} FLOAT 1 4 VALUES 20 40 60 80 |> " \
"AI.TENSORGET tensor1{1} META VALUES"
ret = con.execute_command(command)
env.assertEqual([
b'OK',
[b'dtype', b'FLOAT', b'shape', [1, 2], b'values', [b'5', b'10']],
b'OK',
[b'dtype', b'FLOAT', b'shape', [1, 4], b'values', [b'20', b'40', b'60', b'80']]
], ret)
ret = con.execute_command("AI.TENSORGET tensor1{1} META VALUES")
env.assertEqual([b'dtype', b'FLOAT', b'shape', [1, 4], b'values', [b'20', b'40',b'60',b'80']],ret)
def test_dag_load_persist_tensorset_tensorget(env):
con = get_connection(env, '{1}')
ret = con.execute_command(
"AI.TENSORSET persisted_tensor_1{1} FLOAT 1 2 VALUES 5 10")
env.assertEqual(ret, b'OK')
ret = con.execute_command(
"AI.TENSORSET persisted_tensor_2:{1} FLOAT 1 3 VALUES 0 0 0")
env.assertEqual(ret, b'OK')
command = "AI.DAGEXECUTE LOAD 2 persisted_tensor_1{1} persisted_tensor_2:{1}" \
" PERSIST 1 volatile_tensor_persisted{1} |> " \
"AI.TENSORSET volatile_tensor_persisted{1} FLOAT 1 2 VALUES 5 10 |> " \
"AI.TENSORGET persisted_tensor_1{1} META VALUES |> " \
"AI.TENSORGET persisted_tensor_2:{1} META VALUES "
ret = con.execute_command(command)
env.assertEqual(ret, [b'OK', [b'dtype', b'FLOAT', b'shape', [1, 2], b'values', [b'5', b'10']], [
b'dtype', b'FLOAT', b'shape', [1, 3], b'values', [b'0', b'0', b'0']]])
ret = con.execute_command("AI.TENSORGET volatile_tensor_persisted{1} META VALUES")
env.assertEqual(ret, [b'dtype', b'FLOAT', b'shape', [1, 2], b'values', [b'5', b'10']])
def test_dag_keyspace_tensorget(env):
con = get_connection(env, '{1}')
ret = con.execute_command(
"AI.TENSORSET persisted_tensor{1} FLOAT 1 2 VALUES 5 10")
env.assertEqual(ret, b'OK')
command = "AI.DAGEXECUTE LOAD 1 persisted_tensor{1} " \
"|> AI.TENSORGET persisted_tensor{1} VALUES"
ret = con.execute_command(command)
env.assertEqual(ret, [[b'5', b'10']])
def test_dag_ro_keyspace_tensorget(env):
con = get_connection(env, '{1}')
ret = con.execute_command(
"AI.TENSORSET persisted_tensor{1} FLOAT 1 2 VALUES 5 10")
env.assertEqual(ret, b'OK')
command = "AI.DAGEXECUTE_RO LOAD 1 persisted_tensor{1} |> " \
"AI.TENSORGET persisted_tensor{1} VALUES"
ret = con.execute_command(command)
env.assertEqual(ret, [[b'5', b'10']])
def test_dag_keyspace_and_localcontext_tensorget(env):
con = get_connection(env, '{1}')
ret = con.execute_command(
"AI.TENSORSET persisted_tensor{1} FLOAT 1 2 VALUES 5 10")
env.assertEqual(ret, b'OK')
command = "AI.DAGEXECUTE LOAD 1 persisted_tensor{1} |> " \
"AI.TENSORSET volatile_tensor FLOAT 1 2 VALUES 5 10 |> " \
"AI.TENSORGET persisted_tensor{1} VALUES |> " \
"AI.TENSORGET volatile_tensor VALUES"
ret = con.execute_command(command)
env.assertEqual(ret, [b'OK', [b'5', b'10'], [b'5', b'10']])
def test_dag_with_timeout(env):
if not TEST_TF:
return
con = get_connection(env, '{1}')
batch_size = 2
minbatch_size = 2
timeout = 1
model_name = 'model{1}'
model_pb, input_var, output_var, labels, img = load_mobilenet_v2_test_data()
con.execute_command('AI.MODELSTORE', model_name, 'TF', DEVICE,
'BATCHSIZE', batch_size, 'MINBATCHSIZE', minbatch_size,
'INPUTS', 1, input_var,
'OUTPUTS', 1, output_var,
'BLOB', model_pb)
con.execute_command('AI.TENSORSET', 'input{1}',
'FLOAT', 1, img.shape[1], img.shape[0], img.shape[2],
'BLOB', img.tobytes())
res = con.execute_command('AI.DAGEXECUTE',
'LOAD', '1', 'input{1}',
'TIMEOUT', timeout, '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 1, 'input{1}', 'OUTPUTS', 1, 'output{1}',
'|>', 'AI.MODELEXECUTE', model_name,
'INPUTS', 1, 'input{1}', 'OUTPUTS', 1, 'output{1}')
env.assertEqual(b'TIMEDOUT', res)
def test_dag_with_string_tensor(env):
if not TEST_ONNX:
env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('identity_string.onnx')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'ONNX', DEVICE, 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
# Execute onnx model whose input is string tensor with shape [2,2], that outputs the input
string_tensor_blob = b'input11\0input12\0input21\0input22\0'
ret = con.execute_command('AI.DAGEXECUTE', 'ROUTING', '{1}',
'|>', 'AI.TENSORSET', 'in_tensor{1}', 'STRING', 2, 2, 'BLOB', string_tensor_blob,
'|>', 'AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'in_tensor{1}', 'OUTPUTS', 1, 'out_tensor{1}',
'|>', 'AI.TENSORGET', 'out_tensor{1}', 'VALUES')
env.assertEqual(ret, [b'OK', b'OK', [b'input11', b'input12', b'input21', b'input22']])
|
import argparse
import glob
import io
import os
import re
import ssl
from time import sleep
import zipfile
from selenium import webdriver
# from selenium.webdriver.common.keys import Keys
from webdriver_manager.chrome import ChromeDriverManager
import requests
from lxml import etree
from scour import scour
# Scour removes certain style attributes if their value is the
# SVG-defined default. However, this module sets certain style
# attributes in doc-level CSS, which overrides the browser default.
# E.g. this module sets `text-anchor` to default to `middle`, but
# browser defaults it to `start` and thus Scour removes it.
# Without having the attribute, this module can't account for
# non-module-default attributes in `hoist_style`; so ensures such
# attributes aren't removed by Scour.
#
# TODO: Other props defined in `style` in
# `custom_lossless_optimize_svg` might be susceptible to the issue
# described above. Consider checking more thoroughly.
del scour.default_properties['text-anchor']
# # Enable importing local modules when directly calling as script
# if __name__ == "__main__":
# cur_dir = os.path.join(os.path.dirname(__file__))
# sys.path.append(cur_dir + "/..")
# from lib import download_gzip
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Organisms configured for WikiPathways caching
organisms = [
"Homo sapiens",
"Mus musculus"
# "Danio rerio",
# "Gallus gallus",
# "Rattus norvegicus",
# "Pan troglodytes",
# "Canis lupus familiaris",
# "Equus caballus",
# "Bos taurus",
# "Caenorhabditis elegans"
]
def get_svg_zip_url(organism):
date = "20211110"
base = f"https://wikipathways-data.wmcloud.org/{date}/svg/"
org_us = organism.replace(" ", "_")
url = f"{base}wikipathways-{date}-svg-{org_us}.zip"
return url
def get_pathway_ids_and_names(organism):
base_url = "https://webservice.wikipathways.org/listPathways"
params = f"?organism={organism}&format=json"
url = base_url + params
response = requests.get(url)
data = response.json()
ids_and_names = [[pw['id'], pw['name']] for pw in data['pathways']]
return ids_and_names
def unwrap_leaf(tree, has_bloat, leaf=None, selector=None):
"""Helper for `unwrap` function
"""
ns_map = {"svg": "http://www.w3.org/2000/svg"}
if not selector:
selector = f"//svg:g[{has_bloat}]/svg:g[{has_bloat}]/svg:" + leaf
elements = tree.xpath(selector, namespaces=ns_map)
for element in elements:
parent = element.getparent()
grandparent = parent.getparent()
grandparent.replace(parent, element)
def get_has_class_clause(raw_class):
"""Enable typical class selectors in XPath, akin to CSS ".foo"
XPath makes it complicated to detect if a string is among class values.
That functionality is typical for class selectors, so tailor syntax to
ease such common queries.
"""
normed_class = "concat(' ', normalize-space(@class), ' ')"
has_class_clause = 'contains(' + normed_class + ', "' + raw_class + '")'
return has_class_clause
def unwrap(tree):
"""Many elements are extraneously wrapped; this pares them
"""
ns_map = {"svg": "http://www.w3.org/2000/svg"}
# XPath has poor support for typical class attributes,
# so tailor syntax accordingly
wrapped = [
"Protein", "Metabolite", "Rna", "Label", "GeneProduct", "Unknown"
]
all_wrapped = " or ".join([
get_has_class_clause(w) for w in wrapped
])
unwrap_leaf(tree, all_wrapped, "rect")
unwrap_leaf(tree, all_wrapped, "use")
text_sel = f"//svg:g[{all_wrapped}]/svg:g/svg:text"
unwrap_leaf(tree, all_wrapped, selector=text_sel)
# bloat_groups = ["GroupGroup", "GroupComplex", "GroupNone"]
# has_bloats = " or ".join([
# 'contains(' + normed_class + ', "' + b + '")' for b in bloat_groups
# ])
# group_child_sel = f"//svg:g[{has_bloats}]/svg:g[{has_bloats}]/*"
# unwrap_leaf(tree, has_bloats, selector=group_child_sel)
return tree
def remove_extra_tspans(tree):
ns_map = {"svg": "http://www.w3.org/2000/svg"}
sizes = []
texts = tree.xpath("//svg:text", namespaces=ns_map)
# print('text_sel', text_sel)
for text in texts:
# print('text', etree.tostring(text))
tspans = text.xpath('svg:tspan', namespaces=ns_map)
if len(tspans) == 1:
tspan = tspans[0]
content = tspan.text
font_size = tspan.attrib["font-size"]
sizes.append(font_size)
# print('content', content)
text.attrib["font-size"] = font_size
text.remove(tspan)
text.text = content
default_font_size = None
if len(sizes) > 0:
default_font_size = max(sizes, key = sizes.count)
return tree, default_font_size
def trim_markers(tree):
"""Remove unused marker elements from diagram
"""
ns_map = {"svg": "http://www.w3.org/2000/svg"}
used_marker_ids = []
# Identify markers that the diagram actually uses
elements = tree.xpath("//*")
for element in elements:
attrs = element.attrib
start = attrs["marker-start"] if "marker-start" in attrs else ""
start = start.replace("url(#", "").replace(")", "")
end = attrs["marker-end"] if "marker-end" in attrs else ""
end = end.replace("url(#", "").replace(")", "")
if start not in used_marker_ids:
used_marker_ids.append(start)
if end not in used_marker_ids:
used_marker_ids.append(end)
# Remove markers that are not used
markers = tree.xpath('//svg:g[@id="marker-defs"]/svg:marker', namespaces=ns_map)
for marker in markers:
attrs = marker.attrib
id = attrs["id"] if "id" in attrs else ""
if id not in used_marker_ids:
marker.getparent().remove(marker)
return tree
def condense_colors(svg):
"""Condense colors by using hexadecimal abbreviations where possible.
Consider using an abstract, general approach instead of hard-coding.
"""
svg = re.sub('#000000', '#000', svg)
svg = re.sub('#ff0000', '#f00', svg)
svg = re.sub('#00ff00', '#0f0', svg)
svg = re.sub('#0000ff', '#00f', svg)
svg = re.sub('#00ffff', '#0ff', svg)
svg = re.sub('#ff00ff', '#f0f', svg)
svg = re.sub('#ffff00', '#ff0', svg)
svg = re.sub('#ffffff', '#fff', svg)
svg = re.sub('#cc0000', '#c00', svg)
svg = re.sub('#00cc00', '#0c0', svg)
svg = re.sub('#0000cc', '#00c', svg)
svg = re.sub('#00cccc', '#0cc', svg)
svg = re.sub('#cc00cc', '#c0c', svg)
svg = re.sub('#cccc00', '#cc0', svg)
svg = re.sub('#cccccc', '#ccc', svg)
svg = re.sub('#999999', '#999', svg)
svg = re.sub('#808080', 'grey', svg)
return svg
def prep_edge_style_hoist(tree):
ns_map = {"svg": "http://www.w3.org/2000/svg"}
edge_class = get_has_class_clause("Edge")
selector = '//svg:g[' + edge_class + ']/svg:path'
elements = tree.xpath(selector, namespaces=ns_map)
defaults_by_prop = {
"fill": "transparent",
"stroke": "#000",
"marker-end": "url(#markerendarrow000000white)"
}
noneable_props = ["marker-end"]
return elements, defaults_by_prop, noneable_props
def prep_rect_style_hoist(tree):
ns_map = {"svg": "http://www.w3.org/2000/svg"}
selector = '//svg:rect'
elements = tree.xpath(selector, namespaces=ns_map)
defaults_by_prop = {
"fill": "#fff",
"stroke": "#000"
}
noneable_props = ["stroke"]
return elements, defaults_by_prop, noneable_props
def prep_text_style_hoist(tree, defaults):
ns_map = {"svg": "http://www.w3.org/2000/svg"}
selector = '//svg:text'
elements = tree.xpath(selector, namespaces=ns_map)
defaults_by_prop = {
"fill": "#000",
"text-anchor": "middle",
# "font-weight": "normal"
}
if 'text' in defaults:
defaults_by_prop.update(defaults['text'])
noneable_props = []
return elements, defaults_by_prop, noneable_props
def prep_metabolite_rect_style_hoist(tree):
ns_map = {"svg": "http://www.w3.org/2000/svg"}
has_class_clause = get_has_class_clause("Metabolite")
selector = f"//svg:g[{has_class_clause}]/rect"
elements = tree.xpath(selector, namespaces=ns_map)
defaults_by_prop = {
"stroke": "#00f"
}
noneable_props = []
return elements, defaults_by_prop, noneable_props
def hoist_style(tree, defaults):
"""Move default styles from elements to `style` tag
The raw diagram's styles are encoded as attributes on every element.
Leveraging CSS specificity rules [1], we can encode that more space-
efficiently by "hoisting" style values to the `style` tag, if the style is
the default, and setting any non-default styles using the `style`
attribute directly on the element.
[1] https://developer.mozilla.org/en-US/docs/Web/CSS/Specificity
"""
ns_map = {"svg": "http://www.w3.org/2000/svg"}
for name in ['metabolite_rect', 'edge', 'rect', 'text']:
if name == 'edge':
e, d, n = prep_edge_style_hoist(tree)
elif name == 'rect':
e, d, n = prep_rect_style_hoist(tree)
elif name == 'text':
e, d, n = prep_text_style_hoist(tree, defaults)
elif name == 'metabolite_rect':
e, d, n = prep_metabolite_rect_style_hoist(tree)
elements, defaults_by_prop, noneable_props = [e, d, n]
for element in elements:
attrs = element.attrib
styles = []
# Iterate each property the can be encoded as a CSS style
for prop in defaults_by_prop:
if prop in attrs:
default = defaults_by_prop[prop]
value = attrs[prop]
# Remove the attribute -- this is where we save space
del element.attrib[prop]
# If the value of this style prop isn't the default, then
# add it to the list of styles to be encoded inline on the
# element
if value != default:
styles.append(f"{prop}:{value}")
elif prop in noneable_props:
styles.append(f"{prop}:none")
# Set any non-default styles on the element. Like the raw diagram,
# but this `style` attribute has a higher CSS precedence than
# styles set in the `style` tag, *unlike* styles set as direct
# attributes.
if len(styles) > 0:
element.attrib['style'] = ";".join(styles)
return tree
def trim_symbols_and_uses_and_groups(tree):
ns_map = {"svg": "http://www.w3.org/2000/svg"}
# Remove unused color attribute in group elements
groups = tree.xpath("//svg:g", namespaces=ns_map)
for group in groups:
if 'color' in group.attrib:
del group.attrib['color']
used_symbols = []
group_uses = tree.xpath("//svg:g/svg:use", namespaces=ns_map)
for group_use in group_uses:
if group_use.attrib["href"] and group_use.attrib["href"] != "#none":
# E.g. href="#foo" -> foo
used_symbols.append(group_use.attrib["href"][1:])
else:
group_use.getparent().remove(group_use)
symbols = tree.xpath("//svg:symbol", namespaces=ns_map)
for symbol in symbols:
id = symbol.attrib["id"]
if id not in used_symbols or id == 'none':
symbol.getparent().remove(symbol)
symbol_children = tree.xpath("//svg:symbol/*", namespaces=ns_map)
for sc in symbol_children:
if "stroke" in sc.attrib and sc.attrib["stroke"] == "currentColor":
del sc.attrib["stroke"]
return tree
def trim_transform(tree):
ns_map = {"svg": "http://www.w3.org/2000/svg"}
rects = tree.xpath("//svg:rect", namespaces=ns_map)
uses = tree.xpath("//svg:use", namespaces=ns_map)
elements = rects + uses
for element in elements:
if "transform" in element.attrib:
matrix = element.attrib["transform"]
if "matrix" not in matrix:
continue
coord_string = matrix.replace("matrix(", "").replace(")", "")
coords = [float(c) for c in coord_string.split()]
is_significant = any([c > 1.1 for c in coords])
if not is_significant:
del element.attrib["transform"]
return tree
def custom_lossless_optimize_svg(svg, pwid):
"""Losslessly decrease size of WikiPathways SVG
"""
ns_map = {"svg": "http://www.w3.org/2000/svg"}
svg = re.sub(pwid.lower(), '', svg)
svg = condense_colors(svg)
svg = svg.replace('<?xml version="1.0" encoding="UTF-8"?>\n', '')
tree = etree.fromstring(svg)
controls = tree.xpath('//*[@class="svg-pan-zoom-control"]')[0]
tree.remove(controls)
controls_style = tree.xpath('//*[@id="svg-pan-zoom-controls-styles"]')[0]
controls_style.getparent().remove(controls_style)
metadata = tree.xpath('//*[@id="' + pwid + '-text"]')[0]
metadata.getparent().remove(metadata)
icons_id = "icon-defs-ArcPathVisioBraceEllipseEndoplasmicReticulumGolgiApparatusHexagonPathVisioMimDegradationMitochondriaOctagonPentagonPathVisioRectangleRoundedRectangleSarcoplasmicReticulumTriangleEquilateralEastTrianglePathVisionone"
icon_defs = tree.xpath('//*[@id="' + icons_id + '"]')[0]
icon_defs.attrib["id"] = "icon-defs"
tree = trim_markers(tree)
tree = trim_symbols_and_uses_and_groups(tree)
tree = trim_transform(tree)
tree, default_font_size = remove_extra_tspans(tree)
font_size_css = ""
defaults = {}
if default_font_size:
defaults = {
"text": {
"font-size": default_font_size
}
}
font_size_css = "font-size: " + default_font_size + ";"
tree = hoist_style(tree, defaults)
tree = unwrap(tree)
svg = etree.tostring(tree).decode("utf-8")
svg = '<?xml version="1.0" encoding="UTF-8"?>\n' + svg
font_family = "\'Liberation Sans\', Arial, sans-serif"
svg = re.sub('font-family="Arial"', '', svg)
svg = re.sub(f'font-family="{font_family}"', '', svg)
style = (
"<style>" +
"svg.Diagram {" +
f"font-family: {font_family}; "
"}" +
".Diagram path {" +
"fill: transparent;" +
"stroke: #000;" +
# "stroke-width: 2;" +
"marker-end: url(#mea);"
"}" +
".Diagram symbol path {" +
"fill: inherit;" +
"stroke: inherit;" +
"stroke-width: inherit;" +
"marker-end: inherit;"
"}" +
".Diagram rect {" +
"fill: #fff;" +
"stroke: #000;" +
"}" +
".Diagram text {" +
"dominant-baseline: central;" +
"overflow: hidden;" +
"text-anchor: middle;" +
"fill: #000;" +
font_size_css +
# "stroke: #000; " +
"}" +
# "g > a {" +
# "color: #000;" +
# "}" +
"</style>"
)
old_style = '<style type="text/css">'
svg = re.sub(old_style, style + old_style, svg)
svg = re.sub('xml:space="preserve"', '', svg)
# Remove "px" from attributes where numbers are assumed to be pixels.
svg = re.sub(r'width="([0-9.]+)px"', r'width="\1"', svg)
svg = re.sub(r'height="([0-9.]+)px"', r'height="\1"', svg)
svg = re.sub(r'stroke-width="([0-9.]+)px"', r'stroke-width="\1"', svg)
svg = re.sub('fill="inherit"', '', svg)
svg = re.sub('stroke-width="inherit"', '', svg)
svg = re.sub('color="inherit"', '', svg)
svg = re.sub('fill-opacity="0"', '', svg)
svg = re.sub('dominant-baseline="central"', '', svg)
svg = re.sub('overflow="hidden"', '', svg)
# Match any anchor or group tag, up until closing angle bracket (>), that
# includes a color attribute with the value black (#000).
# For such matches, remove the color attribute but not anything else.
svg = re.sub(r'<g([^>]*)(color="#000")', r'<g \1', svg)
svg = re.sub(r'<(rect class="Icon"[^>]*)(color="#000")', r'<rect \1', svg)
svg = re.sub(r'<(rect class="Icon"[^>]*)(fill="#000")', r'<rect \1', svg)
svg = re.sub(r'<(text class="Text"[^>]*)(fill="#000")', r'<\1', svg)
svg = re.sub(r'<(text class="Text"[^>]*)(stroke="white" stroke-width="0")', r'<\1', svg)
svg = re.sub(r'<(text[^>]*)(clip\-path="[^"]*)"', r'<\1', svg)
# svg = re.sub(r'<defs><clipPath.*</defs>', r'', svg)
svg = re.sub(r'class="([^"]*)( Node)"', r'class="\1"', svg)
svg = re.sub(r'class="([^"]*)( textContent)"', r'class="\1"', svg)
svg = re.sub(r'id="[^"]*-text-clipPath"', '', svg)
# Remove class attributes from elements where it can be deduced
svg = re.sub(r'<rect([^>]*)(class="[^"]*)"', r'<rect \1', svg)
svg = re.sub(r'<text([^>]*)(class="[^"]*)"', r'<text \1', svg)
svg = re.sub(r'<tspan([^>]*)(class="[^"]*)"', r'<tspan \1', svg)
svg = re.sub(r'<path([^>]*)(id="[^"]*)"', r'<path \1', svg)
# svg = re.sub(r'<path([^>]*)(fill="transparent")', r'<path \1', svg)
# svg = re.sub('text-anchor="middle"', '', svg)
svg = re.sub(r'markerendarrow', 'mea', svg)
svg = re.sub(r'markerendmim', 'mem', svg)
svg = re.sub('mea000000white', 'mea', svg)
svg = re.sub(r'mea([^white]+)white', r'mea\1', svg)
svg = re.sub('mea000000', 'mea000', svg)
svg = re.sub('meaff0000', 'meaf00', svg)
svg = re.sub('mea00ff00', 'mea0f0', svg)
svg = re.sub('mea0000ff', 'mea00f', svg)
svg = re.sub('mea00ffff', 'mea0ff', svg)
svg = re.sub('meaff00ff', 'meaf0f', svg)
svg = re.sub('meaffff00', 'meaff0', svg)
svg = re.sub('meaffffff', 'meafff', svg)
svg = re.sub('meacc0000', 'meac00', svg)
svg = re.sub('mea00cc00', 'mea0c0', svg)
svg = re.sub('mea0000cc', 'mea00c', svg)
svg = re.sub('mea00cccc', 'mea0cc', svg)
svg = re.sub('meacc00cc', 'meac0c', svg)
svg = re.sub('meacccc00', 'meacc0', svg)
svg = re.sub('meacccccc', 'meaccc', svg)
svg = re.sub('mea999999', 'mea999', svg)
svg = re.sub('mea808080', 'meagrey', svg)
svg = re.sub('000000white', '000', svg)
svg = re.sub(r'id="[^"]*-icon" ', '', svg)
svg = re.sub(r'id="[^"]*-text" class="[^"]*"', '', svg)
svg = re.sub(r'\d*\.\d{2,}', lambda m: format(float(m.group(0)), '.2f'), svg)
# svg = re.sub(
# r'text-anchor="middle"><tspan\s+x="0" y="0"',
# r'text-anchor="middle"><tspan ',
# svg
# )
return svg
def custom_lossy_optimize_svg(svg):
"""Lossily decrease size of WikiPathways SVG
The broad principle is to remove data that does not affect static render,
but could affect dynamic rendering (e.g. highlighting a specific gene).
Data removed here could be inferred and/or repopulated in the DOM given a
schema. Such a schema would first need to be defined and made available in
client-side software. It might make sense to do that in the pvjs library.
"""
# Remove non-leaf pathway categories.
svg = re.sub('SingleFreeNode DataNode ', '', svg)
svg = re.sub('DataNode SingleFreeNode ', '', svg)
svg = re.sub('Shape SingleFreeNode', '', svg)
svg = re.sub('SingleFreeNode Label', 'Label', svg)
svg = re.sub('Label SingleFreeNode', 'Label', svg)
svg = re.sub('Edge Interaction ', '', svg)
svg = re.sub('Interaction Edge ', '', svg)
svg = re.sub('Edge Interaction', 'Edge', svg)
svg = re.sub('Interaction Edge', 'Edge', svg)
# svg = re.sub('class="Interaction,Edge" ', '', svg)
svg = re.sub('GraphicalLine Edge', 'Edge', svg)
svg = re.sub('Metabolite Node Icon', 'Icon', svg)
svg = re.sub('Label Node Icon', 'Icon', svg)
svg = re.sub('GroupGroup Node Icon', 'Icon', svg)
svg = re.sub('GroupComplex Node Icon', 'Icon', svg)
svg = re.sub('Group Complex Icon', 'Icon', svg)
svg = re.sub('Anchor Burr', 'AB', svg)
svg = re.sub(r'class="[^"]*,[^"]*"', '', svg)
# Interaction data attributes
svg = re.sub('SBO_[0-9]+\s*', '', svg)
# Gene data attributes
svg = re.sub('Entrez_Gene_[0-9]+\s*', '', svg)
svg = re.sub('Ensembl_ENS\w+\s*', '', svg)
svg = re.sub('HGNC_\w+\s*', '', svg)
svg = re.sub('Wikidata_Q[0-9]+\s*', '', svg)
svg = re.sub('P594_ENSG[0-9]+\s*', '', svg)
svg = re.sub('P351_\w+\s*', '', svg)
svg = re.sub('P353_\w+\s*', '', svg)
svg = re.sub('P594_ENSG[0-9]+\s*', '', svg)
# Metabolite data attributes
svg = re.sub('P683_CHEBI_[0-9]+\s*', '', svg)
svg = re.sub('P2057_\w+\s*', '', svg)
svg = re.sub('ChEBI_[0-9]+\s*', '', svg)
svg = re.sub('ChEBI_CHEBI[0-9]+\s*', '', svg)
svg = re.sub('ChEBI_CHEBI_[0-9]+\s*', '', svg)
svg = re.sub('P683_[0-9]+', '', svg)
svg = re.sub('HMDB_\w+\s*', '', svg)
svg = re.sub(' Enzyme_Nomenclature_[0-9_]*', '', svg)
svg = re.sub(' PubChem-compound_[0-9]*', '', svg)
svg = re.sub(' Chemspider_[0-9]*', '', svg)
svg = re.sub(' CAS_[0-9-]+', '', svg)
# Other miscellaneous data attributes
svg = re.sub(' Pfam_PF[0-9]+', '', svg)
svg = re.sub(' Uniprot-TrEMBL_\w+', '', svg)
svg = re.sub(' WikiPathways_WP[0-9]+', '', svg)
# Group data attributes
svg = re.sub('Group GroupGroup', 'GroupGroup', svg)
svg = re.sub('Group GroupNone', 'GroupNone', svg)
svg = re.sub('Group Complex GroupComplex', 'GroupComplex', svg)
svg = re.sub('about="[^"]*"', '', svg)
svg = re.sub('typeof="[^"]*"', '', svg)
svg = re.sub(r'xlink:href="http[^\'" >]*"', '', svg)
svg = re.sub(r' href="#none"', '', svg)
svg = re.sub('target="_blank"', '', svg)
# svg = re.sub('font-weight="bold"', '', svg)
return svg
class WikiPathwaysCache():
def __init__(self, output_dir="data/", reuse=False):
self.output_dir = output_dir
self.tmp_dir = f"tmp/"
self.reuse = reuse
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
def fetch_svgs(self, ids_and_names, org_dir):
prev_error_wpids = []
error_wpids = []
error_path = org_dir + "error_wpids.csv"
if os.path.exists(error_path):
with open(error_path) as f:
prev_error_wpids = f.read().split(",")
error_wpids = prev_error_wpids
for i_n in ids_and_names:
id = i_n[0]
svg_path = org_dir + id + ".svg"
if self.reuse:
if os.path.exists(svg_path):
print(f"Found cache; skip processing {id}")
continue
elif id in prev_error_wpids:
print(f"Found previous error; skip processing {id}")
continue
url = f"https://www.wikipathways.org/index.php/Pathway:{id}?view=widget"
try:
driver = self.driver
except AttributeError:
# Only initializes once, and if not reusing populated cache
self.driver = webdriver.Chrome(ChromeDriverManager().install())
self.driver.implicitly_wait(3) # seconds
driver = self.driver
driver.get(url)
try:
sleep(1)
selector = "svg.Diagram"
raw_content = self.driver.find_element_by_css_selector(selector)
content = raw_content.get_attribute("outerHTML")
except Exception as e:
print(f"Encountered error when stringifying SVG for {id}")
error_wpids.append(id)
with open(error_path, "w") as f:
f.write(",".join(error_wpids))
sleep(0.5)
continue
svg = content.replace(
'typeof="Diagram" xmlns:xlink="http://www.w3.org/1999/xlink"',
'typeof="Diagram"'
)
print("Preparing and writing " + svg_path)
svg = '<?xml version="1.0" encoding="UTF-8"?>\n' + svg
with open(svg_path, "w") as f:
f.write(svg)
sleep(1)
def optimize_svgs(self, org_dir):
for svg_path in glob.glob(f'{org_dir}*.svg'):
# for svg_path in ["tmp/homo-sapiens/WP231.svg"]: # debug
with open(svg_path, 'r') as f:
svg = f.read()
svg = re.sub("fill-opacity:inherit;", "", svg)
# print('clean_svg')
# print(clean_svg)
original_name = svg_path.split("/")[-1]
name = original_name.split(".svg")[0]
pwid = re.search(r"WP\d+", name).group() # pathway ID
optimized_svg_path = self.output_dir + pwid + ".svg"
print(f"Optimizing to create: {optimized_svg_path}")
scour_options = scour.sanitizeOptions()
scour_options.remove_metadata = False
scour_options.newlines = False
scour_options.strip_comments = True
scour_options.strip_ids = False
scour_options.shorten_ids = False
scour_options.strip_xml_space_attribute = True
scour_options.keep_defs = True
try:
clean_svg = scour.scourString(svg, options=scour_options)
except Exception as e:
print(f"Encountered error while optimizing SVG for {pwid}")
continue
repo_url = "https://github.com/eweitz/cachome/tree/main/"
code_url = f"{repo_url}src/wikipathways.py"
data_url = f"{repo_url}{optimized_svg_path}"
wp_url = f"https://www.wikipathways.org/index.php/Pathway:{pwid}"
provenance = "\n".join([
"<!--",
f" WikiPathways page: {wp_url}",
f" URL for this compressed file: {data_url}",
# f" Uncompressed SVG file: {original_name}",
# f" From upstream ZIP archive: {url}",
f" Source code for compression: {code_url}",
"-->"
])
clean_svg = clean_svg.replace(
'<?xml version="1.0" encoding="UTF-8"?>',
'<?xml version="1.0" encoding="UTF-8"?>\n' + provenance
)
# clean_svg = re.sub('tspan x="0" y="0"', 'tspan', clean_svg)
clean_svg = custom_lossless_optimize_svg(clean_svg, pwid)
clean_svg = custom_lossy_optimize_svg(clean_svg)
with open(optimized_svg_path, "w") as f:
f.write(clean_svg)
def populate_by_org(self, organism):
"""Fill caches for a configured organism
"""
org_dir = self.tmp_dir + organism.lower().replace(" ", "-") + "/"
if not os.path.exists(org_dir):
os.makedirs(org_dir)
ids_and_names = get_pathway_ids_and_names(organism)
# ids_and_names = [["WP231", "test"]]
# print("ids_and_names", ids_and_names)
self.fetch_svgs(ids_and_names, org_dir)
self.optimize_svgs(org_dir)
def populate(self):
"""Fill caches for all configured organisms
Consider parallelizing this.
"""
for organism in organisms:
self.populate_by_org(organism)
# Command-line handler
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--output-dir",
help=(
"Directory to put outcome data. (default: %(default))"
),
default="data/"
)
parser.add_argument(
"--reuse",
help=(
"Whether to use previously-downloaded raw SVG zip archives"
),
action="store_true"
)
args = parser.parse_args()
output_dir = args.output_dir
reuse = args.reuse
WikiPathwaysCache(output_dir, reuse).populate()
|
"""
Complete the preOrder function in your editor below, which has parameter: a pointer to the root of a binary tree.
It must print the values in the tree's postorder traversal as a single line of space-separated values.
"""
def traverse_in_order_recursive(root):
if root:
traverse_in_order_recursive(root.left)
traverse_in_order_recursive(root.right)
print(root.data, end=" ")
def traverse_in_order_iterative(root):
if root:
stack = [root]
node = root
while node:
if node.left:
stack.append(node)
node = node.left
else:
print(node.data)
node = stack.pop()
root = Node("A")
root.left = Node("B")
root.left.left = Node("C")
root.left.right = Node("D")
root.right = Node("E")
root.right.left = Node("G")
root.right.right = Node("F")
traverse_in_order_iterative(root)
|
from flask import Flask, render_template, request, session, redirect, url_for
import pymysql
app = Flask(__name__)
app.secret_key = '!hv@cRsh0nU'
class Merchant:
name = ""
username = ""
password = ""
organisation = ""
abn = None
def __init__(self, name, usrnm, passwd, abn, org):
self.name = name
self.username = usrnm
self.password = passwd
self.abn = abn
self.organisation = org
class Customer:
name = ""
email = ""
username = ""
password = ""
def __init__(self, name, email, usrnm, passwd):
self.name = name
self.email = email
self.username = usrnm
self.password = passwd
class dataFish:
product_id = ""
species = ""
weight = ""
L1 = ""
L2 = ""
L3 = ""
height = ""
width = ""
storage_temperature = ""
estimated_storage_life_in_months = ""
nutrients = ""
best_before_calculated = ""
storage_affecting_factor = ""
price = ""
def __init__(self, product_id, species, weight, L1, L2, L3, height, width, storage_temperature, estimated_storage_life_in_months, nutrients, best_before_calculated, storage_affecting_factor, price):
self.product_id = product_id
self.species = species
self.weight = weight
self.L1 = L1
self.L2 = L2
self.L3 = L3
self.height = height
self.width = width
self.storage_temperature = storage_temperature
self.estimated_storage_life_in_months = estimated_storage_life_in_months
self.nutrients = nutrients
self.best_before_calculated = best_before_calculated
self.storage_affecting_factor = storage_affecting_factor
self.price = price
class Sql:
SERVER = "localhost"
USR = "root"
PWD = "mypass123"
DB = "Fishery"
GET_DATA_QRY = "Select product_id,species,weight,L1,L2,L3,height,width,storage_temperature,estimated_storage_life_in_months,nutrients,best_before_calculated,storage_affecting_factor,price from newfishes"
MERCH_SIGNUP_QRY = "Insert into merchant (name, organisation, username, password,abn) values('{}','{}','{}','{}','{}')"
GET_MERCH = "Select name, organisation, username, password,abn from merchant where isConfirmed is NULL"
GET_REG_MERCH = "Select count(*) from merchant where isConfirmed is True and "
GET_SINGLE_MERCH = "Select name, organisation, username, password,abn from merchant where username = {}"
GET_REG_CUST = "Select count(*) from customer where "
CUST_SIGNUP_QRY = "Insert into customer (name, email, username, password) values('{}','{}','{}','{}')"
Fish_DATA_QRY = " Insert into newfishes (product_id,species,weight,L1,L2,L3,height,width,storage_temperature,estimated_storage_life_in_months,nutrients,best_before_calculated,storage_affecting_factor,price) values('{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}','{}')"
MERCH_APPROVAL_QRY = "UPDATE merchant SET isConfirmed = (var1) WHERE username = (var2) values('{}','{}')"
# INSERT_FISH_DATA="Insert into newfishes("
def getData(self):
with pymysql.connect(host=self.SERVER, user=self.USR, password=self.PWD, db=self.DB) as conn:
with conn.cursor() as cursor:
cursor.execute(self.GET_DATA_QRY)
data = cursor.fetchall()
conn.commit()
return data
def insertMerchant(self, merchant):
# try:
with pymysql.connect(host=self.SERVER, user=self.USR, password=self.PWD, db=self.DB) as conn:
with conn.cursor() as cursor:
print(self.MERCH_SIGNUP_QRY.format(merchant.name, merchant.organisation,
merchant.username, merchant.password, merchant.abn))
cursor.execute(self.MERCH_SIGNUP_QRY.format(
merchant.name, merchant.organisation, merchant.username, merchant.password, merchant.abn))
conn.commit()
return True
# def
# except:
# return False
# def insertCustomer(self,customer):
# with pymysql.connect(host=self.SERVER, user = self.USR, password=self.PWD, db=self.DB) as conn:
# with conn.cursor(as_dict = True) as cursor:
# print(self.CUST_SIGNUP_QRY.format(customer.name,customer.email,customer.username,customer.password))
# cursor.execute(self.CUST_SIGNUP_QRY.format(customer.name,customer.email,customer.username,customer.password))
# cursor.commit()
# return True
def insertCustomer(self, customer):
with pymysql.connect(host=self.SERVER, user=self.USR, password=self.PWD, db=self.DB) as conn:
with conn.cursor() as cursor:
print(self.CUST_SIGNUP_QRY.format(customer.name,
customer.email, customer.username, customer.password))
cursor.execute(self.CUST_SIGNUP_QRY.format(
customer.name, customer.email, customer.username, customer.password))
conn.commit()
return True
def insertFishData(self, newfishes):
# try:
with pymysql.connect(host=self.SERVER, user=self.USR, password=self.PWD, db=self.DB) as conn:
with conn.cursor() as cursor:
# print(f"{fishdata}")
cursor.execute(self.Fish_DATA_QRY.format(newfishes.product_id, newfishes.species, newfishes.weight, newfishes.L1, newfishes.L2, newfishes.L3, newfishes.height, newfishes.width,
newfishes.storage_temperature, newfishes.estimated_storage_life_in_months, newfishes.nutrients, newfishes.best_before_calculated, newfishes.storage_affecting_factor, newfishes.price))
conn.commit()
return True
def insertApprovedMerchant(self, username, isApproved):
# try:
with pymysql.connect(host=self.SERVER, user=self.USR, password=self.PWD, db=self.DB) as conn:
with conn.cursor() as cursor:
print(f"{username} Approval Processing")
# cursor.execute(self.MERCH_APPROVAL_QRY.format(merchant.name,merchant.organisation,merchant.username,merchant.password,merchant.abn))
print(
f"Update merchant set isConfirmed = {isApproved} where username = {username}")
cursor.execute(
f"Update merchant set isConfirmed = '{isApproved}' where username = '{username}'")
conn.commit()
return True
# except:
# return False
def getMerchantData(self, user):
with pymysql.connect(host=self.SERVER, user=self.USR, password=self.PWD, db=self.DB) as conn:
with conn.cursor() as cursor:
cursor.execute(self.GET_SINGLE_MERCH.format(user))
data = cursor.fetchall()
conn.commit()
return data
def getAllMerchantData(self):
with pymysql.connect(host=self.SERVER, user=self.USR, password=self.PWD, db=self.DB) as conn:
with conn.cursor() as cursor:
cursor.execute(self.GET_MERCH)
data = cursor.fetchall()
conn.commit()
return data
def getRegisteredUsers(self, username, password):
with pymysql.connect(host=self.SERVER, user=self.USR, password=self.PWD, db=self.DB) as conn:
with conn.cursor() as cursor:
print(
f"{self.GET_REG_MERCH} username='{username}' and password='{password}'")
cursor.execute(
f"{self.GET_REG_MERCH} username='{username}' and password='{password}'")
data = cursor.fetchall()
conn.commit()
val = str(data)[2]
if int(val) == 1:
return True
else:
return False
def getRegisteredCustomers(self, username, password):
with pymysql.connect(host=self.SERVER, user=self.USR, password=self.PWD, db=self.DB) as conn:
with conn.cursor() as cursor:
print(
f"{self.GET_REG_CUST} username='{username}' and password='{password}'")
cursor.execute(
f"{self.GET_REG_CUST} username='{username}' and password='{password}'")
data = cursor.fetchall()
conn.commit()
val = str(data)[2]
if int(val) == 1:
return True
else:
return False
def getAbns():
with open('abns.txt', 'r') as f:
return [abn.replace(" ", "") for abn in f.read().split("\n")]
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/customerHome', methods=['GET', 'POST'])
def customerHome():
fish_data = Sql().getData()
return render_template('customerHome.html', data=fish_data, header=["product_id", "species", "weight", "L1", "L2", "L3", "height", "width", "storage_temperature", "estimated_storage_life_in_months", "nutrients", "best_before_calculated", "storage_affecting_factor", "price"])
@app.route('/marketplace', methods=['GET', 'POST'])
def marketplace():
fish_data = Sql().getData()
return render_template('marketplace.html', data=fish_data, header=["product_id", "species", "weight", "L1", "L2", "L3", "height", "width", "storage_temperature", "estimated_storage_life_in_months", "nutrients", "best_before_calculated", "storage_affecting_factor", "price"])
@app.route('/verify', methods=['POST'])
def verfiy_approval():
global isLogged
print(f"{request.form['merchant']} ::::::::::::::::::::::::::::::")
username = str(request.form['merchant']).split(",")[2][2:-1]
if request.form['merchant'] != pymysql.NULL and request.form['submit'] != pymysql.NULL:
isLogged = True
sql = Sql()
sql.insertApprovedMerchant(username, request.form['submit'])
data = sql.getAllMerchantData()
return render_template("adminHome.html", merchants=data, header=["Name", "Organisation", "Username", "Password", "ABN"])
else:
return "<html><body>error</body></html>"
def populateRegisteredUsers(data):
for record in data:
merchant = Merchant(record[0], record[2],
record[3], record[4], record[1])
registeredUsers[record[2]] = merchant
registeredAbns.append(int(record[4]))
@app.route('/populateListing', methods=['POST'])
def populateListing():
global isLogged
# if isLogged:
# isLogged = False
# return redirect(url_for('index'))
if "ifadmin" in request.form:
if request.form['user'] == "admin" and request.form['pass'] == "admin@123":
isLogged = True
sql = Sql()
data = sql.getAllMerchantData()
session['username'] = request.form['user']
return render_template("adminHome.html", merchants=data, header=["Name", "Organisation", "Username", "Password", "ABN"])
else:
return "<html><body>error</body></html>"
else:
if Sql().getRegisteredUsers(request.form['user'], request.form["pass"]):
isLogged = True
session['username'] = request.form['user']
return render_template("merchantHome.html")
# else:
# return "<html><body>Authentication Error: Bad Username or password </body></html>"
elif Sql().getRegisteredCustomers(request.form['user'], request.form["pass"]):
isLogged = True
session['username'] = request.form['user']
return render_template("customerHome.html")
else:
return "<html><body>Authentication Error: User Not registered, please Signup</body></html>"
@app.route('/signup', methods=['GET', 'POST'])
def signup():
return render_template("signupForm.html")
@app.route('/two', methods=['GET', 'POST'])
def two():
return render_template("cusMer.html")
@app.route('/cust', methods=['GET', 'POST'])
def cust():
return render_template("customer.html")
@app.route('/login', methods=['GET', 'POST'])
def login():
return render_template("newlogin.html")
@app.route('/logout')
def logout():
session.pop('username', None)
return redirect(url_for('index'))
@app.route('/saveFishData', methods=['POST'])
def saveFishData():
# fishData=f'INSERT INTO newfishes(product_id,species,weight,L1,L2,L3,height,width,storage_temperature,estimated_storage_life_in_months,nutrients,best_before_calculated,storage_affecting_factor,price) values ('+request.form["product_id"],request.form["species"],request.form["weight"],request.form["L1"],request.form["L2"],request.form["L3"],request.form["height"],request.form["width"],request.form["storage_temperature"],request.form["estimated_storage_life_in_months"],request.form["nutrients"],request.form["best_before_calculated"],request.form["storage_affecting_factor"],request.form["price"]+')'
newfishes = dataFish(request.form["product_id"], request.form["species"], request.form["weight"], request.form["L1"], request.form["L2"], request.form["L3"], request.form["height"], request.form["width"],
request.form["storage_temperature"], request.form["estimated_storage_life_in_months"], request.form["nutrients"], request.form["best_before_calculated"], request.form["storage_affecting_factor"], request.form["price"])
# print(request.form["species"])
sql = Sql()
if sql.insertFishData(newfishes):
return render_template('merchantHome.html')
else:
return "<p><b> Error occured: Unable to insert customer, Contact admin. </b><p>"
@app.route('/registerUser', methods=['POST'])
def registerUser():
# if request.form["abn"].replace(" ","") in abns:
# if request.form["user"] not in registeredUsers and request.form["abn"] not in registeredAbns:
merchant = Merchant(request.form["firstname"]+" "+request.form["lastname"],
request.form["user"], request.form["pass"], request.form["abn"], request.form["org"])
sql = Sql()
registeredUsers[request.form["user"]] = merchant
registeredAbns.append(request.form["abn"])
if sql.insertMerchant(merchant):
return render_template('newlogin.html')
else:
return "<p><b> Error occured: Unable to insert merchant, Contact admin. </b><p>"
# else:
# return "<p><b> User or Abn already registered</b></p>"
# else:
# return "<p><b> Error occured: ABN not registered with admin, contact admin </b><p>"
@app.route('/registerCustomer', methods=['POST'])
def register():
customer = Customer(
request.form["name"], request.form["email"], request.form["user"], request.form["pass"])
sql = Sql()
# registeredCustomers[request.form["user"]] = customer
if sql.insertCustomer(customer):
return render_template('newlogin.html')
else:
return "<p><b> Error occured: Unable to insert customer, Contact admin. </b><p>"
# if 'user' in request.form and 'pass' in request.form:
# username = request.form['user']
# password = request.form['pass']
# cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
# cursor.execute('SELECT * FROM accounts WHERE username = % s', (username, ))
# account = cursor.fetchone()
# if account:
# msg = 'Account already exists !'
# elif not re.match(r'[^@]+@[^@]+\.[^@]+', email):
# msg = 'Invalid email address !'
# elif not re.match(r'[A-Za-z0-9]+', username):
# msg = 'Username must contain only characters and numbers !'
# elif not username or not password or not email:
# msg = 'Please fill out the form !'
# else:
# cursor.execute('INSERT INTO accounts VALUES (NULL, % s, % s, % s)', (username, password, email, ))
# mysql.connection.commit()
# msg = 'You have successfully registered !'
# elif request.method == 'POST':
# msg = 'Please fill out the form !'
# return render_template('login1.html')
if __name__ == '__main__':
isLogged = False
# abns = getAbns()
registeredUsers = {}
registeredAbns = []
populateRegisteredUsers(Sql().getAllMerchantData())
app.run()
|
from django.views import View
from django.http.response import JsonResponse
class HealthCheck(View):
def get(self, *args, **kwargs):
return JsonResponse(data='Service is up', status=200, safe=False)
|
from tests.db.data_fixtures import *
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.condenser_equipment_and_heat_exchangers import CoolingTowerTwoSpeed
log = logging.getLogger(__name__)
class TestCoolingTowerTwoSpeed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_coolingtowertwospeed(self):
pyidf.validation_level = ValidationLevel.error
obj = CoolingTowerTwoSpeed()
# alpha
var_name = "Name"
obj.name = var_name
# node
var_water_inlet_node_name = "node|Water Inlet Node Name"
obj.water_inlet_node_name = var_water_inlet_node_name
# node
var_water_outlet_node_name = "node|Water Outlet Node Name"
obj.water_outlet_node_name = var_water_outlet_node_name
# real
var_design_water_flow_rate = 0.0001
obj.design_water_flow_rate = var_design_water_flow_rate
# real
var_high_fan_speed_air_flow_rate = 0.0001
obj.high_fan_speed_air_flow_rate = var_high_fan_speed_air_flow_rate
# real
var_high_fan_speed_fan_power = 0.0001
obj.high_fan_speed_fan_power = var_high_fan_speed_fan_power
# real
var_high_fan_speed_ufactor_times_area_value = 1050000.00005
obj.high_fan_speed_ufactor_times_area_value = var_high_fan_speed_ufactor_times_area_value
# real
var_low_fan_speed_air_flow_rate = 0.0001
obj.low_fan_speed_air_flow_rate = var_low_fan_speed_air_flow_rate
# real
var_low_fan_speed_air_flow_rate_sizing_factor = 0.5
obj.low_fan_speed_air_flow_rate_sizing_factor = var_low_fan_speed_air_flow_rate_sizing_factor
# real
var_low_fan_speed_fan_power = 0.0001
obj.low_fan_speed_fan_power = var_low_fan_speed_fan_power
# real
var_low_fan_speed_fan_power_sizing_factor = 0.5
obj.low_fan_speed_fan_power_sizing_factor = var_low_fan_speed_fan_power_sizing_factor
# real
var_low_fan_speed_ufactor_times_area_value = 150000.00005
obj.low_fan_speed_ufactor_times_area_value = var_low_fan_speed_ufactor_times_area_value
# real
var_low_fan_speed_ufactor_times_area_sizing_factor = 0.5
obj.low_fan_speed_ufactor_times_area_sizing_factor = var_low_fan_speed_ufactor_times_area_sizing_factor
# real
var_free_convection_regime_air_flow_rate = 0.0
obj.free_convection_regime_air_flow_rate = var_free_convection_regime_air_flow_rate
# real
var_free_convection_regime_air_flow_rate_sizing_factor = 0.5
obj.free_convection_regime_air_flow_rate_sizing_factor = var_free_convection_regime_air_flow_rate_sizing_factor
# real
var_free_convection_regime_ufactor_times_area_value = 150000.0
obj.free_convection_regime_ufactor_times_area_value = var_free_convection_regime_ufactor_times_area_value
# real
var_free_convection_ufactor_times_area_value_sizing_factor = 0.5
obj.free_convection_ufactor_times_area_value_sizing_factor = var_free_convection_ufactor_times_area_value_sizing_factor
# alpha
var_performance_input_method = "UFactorTimesAreaAndDesignWaterFlowRate"
obj.performance_input_method = var_performance_input_method
# real
var_heat_rejection_capacity_and_nominal_capacity_sizing_ratio = 19.19
obj.heat_rejection_capacity_and_nominal_capacity_sizing_ratio = var_heat_rejection_capacity_and_nominal_capacity_sizing_ratio
# real
var_high_speed_nominal_capacity = 0.0001
obj.high_speed_nominal_capacity = var_high_speed_nominal_capacity
# real
var_low_speed_nominal_capacity = 0.0001
obj.low_speed_nominal_capacity = var_low_speed_nominal_capacity
# real
var_low_speed_nominal_capacity_sizing_factor = 0.5
obj.low_speed_nominal_capacity_sizing_factor = var_low_speed_nominal_capacity_sizing_factor
# real
var_free_convection_nominal_capacity = 0.0
obj.free_convection_nominal_capacity = var_free_convection_nominal_capacity
# real
var_free_convection_nominal_capacity_sizing_factor = 0.5
obj.free_convection_nominal_capacity_sizing_factor = var_free_convection_nominal_capacity_sizing_factor
# real
var_basin_heater_capacity = 0.0
obj.basin_heater_capacity = var_basin_heater_capacity
# real
var_basin_heater_setpoint_temperature = 2.0
obj.basin_heater_setpoint_temperature = var_basin_heater_setpoint_temperature
# object-list
var_basin_heater_operating_schedule_name = "object-list|Basin Heater Operating Schedule Name"
obj.basin_heater_operating_schedule_name = var_basin_heater_operating_schedule_name
# alpha
var_evaporation_loss_mode = "LossFactor"
obj.evaporation_loss_mode = var_evaporation_loss_mode
# real
var_evaporation_loss_factor = 29.29
obj.evaporation_loss_factor = var_evaporation_loss_factor
# real
var_drift_loss_percent = 30.3
obj.drift_loss_percent = var_drift_loss_percent
# alpha
var_blowdown_calculation_mode = "ConcentrationRatio"
obj.blowdown_calculation_mode = var_blowdown_calculation_mode
# real
var_blowdown_concentration_ratio = 2.0
obj.blowdown_concentration_ratio = var_blowdown_concentration_ratio
# object-list
var_blowdown_makeup_water_usage_schedule_name = "object-list|Blowdown Makeup Water Usage Schedule Name"
obj.blowdown_makeup_water_usage_schedule_name = var_blowdown_makeup_water_usage_schedule_name
# object-list
var_supply_water_storage_tank_name = "object-list|Supply Water Storage Tank Name"
obj.supply_water_storage_tank_name = var_supply_water_storage_tank_name
# node
var_outdoor_air_inlet_node_name = "node|Outdoor Air Inlet Node Name"
obj.outdoor_air_inlet_node_name = var_outdoor_air_inlet_node_name
# integer
var_number_of_cells = 1
obj.number_of_cells = var_number_of_cells
# alpha
var_cell_control = "MinimalCell"
obj.cell_control = var_cell_control
# real
var_cell_minimum_water_flow_rate_fraction = 0.50005
obj.cell_minimum_water_flow_rate_fraction = var_cell_minimum_water_flow_rate_fraction
# real
var_cell_maximum_water_flow_rate_fraction = 1.0
obj.cell_maximum_water_flow_rate_fraction = var_cell_maximum_water_flow_rate_fraction
# real
var_sizing_factor = 0.0001
obj.sizing_factor = var_sizing_factor
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.coolingtowertwospeeds[0].name, var_name)
self.assertEqual(idf2.coolingtowertwospeeds[0].water_inlet_node_name, var_water_inlet_node_name)
self.assertEqual(idf2.coolingtowertwospeeds[0].water_outlet_node_name, var_water_outlet_node_name)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].design_water_flow_rate, var_design_water_flow_rate)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].high_fan_speed_air_flow_rate, var_high_fan_speed_air_flow_rate)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].high_fan_speed_fan_power, var_high_fan_speed_fan_power)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].high_fan_speed_ufactor_times_area_value, var_high_fan_speed_ufactor_times_area_value)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].low_fan_speed_air_flow_rate, var_low_fan_speed_air_flow_rate)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].low_fan_speed_air_flow_rate_sizing_factor, var_low_fan_speed_air_flow_rate_sizing_factor)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].low_fan_speed_fan_power, var_low_fan_speed_fan_power)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].low_fan_speed_fan_power_sizing_factor, var_low_fan_speed_fan_power_sizing_factor)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].low_fan_speed_ufactor_times_area_value, var_low_fan_speed_ufactor_times_area_value)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].low_fan_speed_ufactor_times_area_sizing_factor, var_low_fan_speed_ufactor_times_area_sizing_factor)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].free_convection_regime_air_flow_rate, var_free_convection_regime_air_flow_rate)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].free_convection_regime_air_flow_rate_sizing_factor, var_free_convection_regime_air_flow_rate_sizing_factor)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].free_convection_regime_ufactor_times_area_value, var_free_convection_regime_ufactor_times_area_value)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].free_convection_ufactor_times_area_value_sizing_factor, var_free_convection_ufactor_times_area_value_sizing_factor)
self.assertEqual(idf2.coolingtowertwospeeds[0].performance_input_method, var_performance_input_method)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].heat_rejection_capacity_and_nominal_capacity_sizing_ratio, var_heat_rejection_capacity_and_nominal_capacity_sizing_ratio)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].high_speed_nominal_capacity, var_high_speed_nominal_capacity)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].low_speed_nominal_capacity, var_low_speed_nominal_capacity)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].low_speed_nominal_capacity_sizing_factor, var_low_speed_nominal_capacity_sizing_factor)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].free_convection_nominal_capacity, var_free_convection_nominal_capacity)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].free_convection_nominal_capacity_sizing_factor, var_free_convection_nominal_capacity_sizing_factor)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].basin_heater_capacity, var_basin_heater_capacity)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].basin_heater_setpoint_temperature, var_basin_heater_setpoint_temperature)
self.assertEqual(idf2.coolingtowertwospeeds[0].basin_heater_operating_schedule_name, var_basin_heater_operating_schedule_name)
self.assertEqual(idf2.coolingtowertwospeeds[0].evaporation_loss_mode, var_evaporation_loss_mode)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].evaporation_loss_factor, var_evaporation_loss_factor)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].drift_loss_percent, var_drift_loss_percent)
self.assertEqual(idf2.coolingtowertwospeeds[0].blowdown_calculation_mode, var_blowdown_calculation_mode)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].blowdown_concentration_ratio, var_blowdown_concentration_ratio)
self.assertEqual(idf2.coolingtowertwospeeds[0].blowdown_makeup_water_usage_schedule_name, var_blowdown_makeup_water_usage_schedule_name)
self.assertEqual(idf2.coolingtowertwospeeds[0].supply_water_storage_tank_name, var_supply_water_storage_tank_name)
self.assertEqual(idf2.coolingtowertwospeeds[0].outdoor_air_inlet_node_name, var_outdoor_air_inlet_node_name)
self.assertEqual(idf2.coolingtowertwospeeds[0].number_of_cells, var_number_of_cells)
self.assertEqual(idf2.coolingtowertwospeeds[0].cell_control, var_cell_control)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].cell_minimum_water_flow_rate_fraction, var_cell_minimum_water_flow_rate_fraction)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].cell_maximum_water_flow_rate_fraction, var_cell_maximum_water_flow_rate_fraction)
self.assertAlmostEqual(idf2.coolingtowertwospeeds[0].sizing_factor, var_sizing_factor)
|
# Copyright 2019 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from collections import defaultdict
from elasticsearch import Elasticsearch
from flask import current_app as app
from flask import jsonify
from flask import request
import json
import yaml
import logging
from rhoci.models.job import Job
from rhoci.jenkins.osp import get_release
LOG = logging.getLogger(__name__)
from rhoci.api import bp # noqa
PROJECTION = {'name': 1, 'last_build': 1, 'release': 1, 'last_successful_build': 1}
@bp.route('/jobs', methods=['GET', 'POST'])
def jobs(query_str=None):
"""All jobs API route."""
q_str = request.args.get('query_str', default={})
if q_str:
query_str = eval(q_str)
else:
query_str = {}
results = {'data': Job.find(query_str=query_str, projection=PROJECTION)}
return jsonify(results)
@bp.route('/jobs/filtered', methods=['GET', 'POST'])
@bp.route('/jobs/filtered?filters=<filters>', methods=['GET', 'POST'])
def get_filtered_jobs(filters=None):
filters = request.args.get('filters')
if filters and filters != "undefined":
filters_dict = json.loads(filters)
else:
filters_dict = {}
results = {'data': []}
es = Elasticsearch(app.config['custom']['elk']['es']['url'])
body = {
"query": {
"bool": {}},
"size": 0,
"aggs": {
"jobs": {
"terms": {"field": "job_name.keyword",
"size": 1000},
"aggs": {
"builds": {
"terms": {"field": "build_num"},
"aggs": {
"status": {
"terms": {"field": "build_result.keyword"}
}
}
}}}}}
if filters_dict:
body["query"]["bool"]["filter"] = []
filters_modified = {"{}.keyword".format(k):v for k,v in filters_dict.items()}
for f,v in filters_modified.items():
body["query"]["bool"]["filter"].append({ "term": {f:v} })
res = es.search(index="logstash", body=body)
for job in res['aggregations']['jobs']['buckets']:
if job['builds']['buckets'] and job['builds']['buckets'][-1]['status']['buckets']:
status = job['builds']['buckets'][-1]['status']['buckets'][-1]['key']
else:
status = "None"
results['data'].append({'job_name': job['key'], 'build_number': int(job['builds']['buckets'][-1]['key']), 'status': status})
return jsonify(results)
@bp.route('/jobs/<DFG_name>/<status>')
@bp.route('/jobs/DFG=<DFG_name>')
@bp.route('/jobs/<job_name>')
@bp.route('/jobs/all')
def get_jobs(DFG_name=None, squad_name=None,
component_name=None, job_name=None, status=None):
"""Returns jobs."""
jobs = defaultdict(dict)
results = {'data': []}
es = Elasticsearch(app.config['custom']['elk']['es']['url'])
body = {
"query": {
"bool": {
"must": [{"exists": {"field": "build_result.keyword"}} ],
# "filter": [
# { "term": { "DFG.keyword": DFG_name}}
#]
}},
"size": 0,
"aggs": {
"jobs": {
"terms": {"field": "job_name.keyword",
"size": 1000},
"aggs": {
"builds": {
"terms": {"field": "build_num"},
"aggs": {
"status": {
"terms": {"field": "build_result.keyword"}
}
}
}}}}}
if DFG_name and status:
body["query"]["bool"]["filter"] = [{ "term": { "DFG.keyword": DFG_name}}]
res = es.search(index="logstash", body=body)
for job in res['aggregations']['jobs']['buckets']:
if job['builds']['buckets'][-1]['status']['buckets'][-1]['key'] == status:
results['data'].append({'job_name': job['key'], 'build_number': int(job['builds']['buckets'][-1]['key']), 'status': status})
elif DFG_name:
body["query"]["bool"]["filter"] = [{ "term": { "DFG.keyword": DFG_name}}]
res = es.search(index="logstash", body=body)
for job in res['aggregations']['jobs']['buckets']:
results['data'].append({'job_name': job['key'], 'build_number': int(job['builds']['buckets'][-1]['key']), 'status': job['builds']['buckets'][-1]['status']['buckets'][-1]['key']})
else:
res = es.search(index="logstash", body=body)
for job in res['aggregations']['jobs']['buckets']:
results['data'].append({'job_name': job['key'], 'build_number': int(job['builds']['buckets'][-1]['key']), 'status': job['builds']['buckets'][-1]['status']['buckets'][-1]['key']})
return jsonify(results)
|
#! /usr/bin/env python
import numpy as np
def sparseFrobeniusNorm(X):
""" Compute a the Frobenius norm of the observed entries of a sparse matrix.
Args:
X: a sparse matrix
Returns:
The square root of the sum of the squares of the observed entries.
"""
tmp = X.multiply(X)
return np.sqrt(tmp.sum())
def test_sparseFrobeniusNorm():
from scipy.sparse import rand
X = rand(3, 4, 0.5)
print((sparseFrobeniusNorm(X)))
print((np.linalg.norm(X.todense(), 'fro')))
if __name__ == '__main__':
test_sparseFrobeniusNorm()
|
import gin
import tensorflow as tf
@gin.configurable
class MultiPolicy:
def __init__(self, act_spec, logits):
self.logits = logits
self.inputs = [tf.placeholder(s.dtype, [None, *s.shape]) for s in act_spec]
self.dists = [self.make_dist(s, l) for s, l in zip(act_spec.spaces, logits)]
self.entropy = sum([dist.entropy() for dist in self.dists])
self.logli = sum([dist.log_prob(act) for dist, act in zip(self.dists, self.inputs)])
self.sample = [dist.sample() for dist in self.dists]
@staticmethod
def make_dist(space, logits):
# tfp is really heavy on init, better to lazy load
import tensorflow_probability as tfp
if space.is_continuous():
mu, log_std = tf.split(logits, num_or_size_splits=2, axis=-1)
return tfp.distributions.MultivariateNormalDiag(mu, tf.exp(log_std))
else:
return tfp.distributions.Categorical(logits)
|
from flask import session, flash, redirect, current_app
from flask import Blueprint, session, redirect, url_for, render_template #request, , , jsonify
auth_routes = Blueprint("auth_routes", __name__)
# signup route only for email password auth (not implemented)
#@auth_routes.route("/signup")
#def signup():
# print("SIGNUP...")
# return render_template("signup.html")
@auth_routes.route("/login")
def login():
print("LOGIN...")
# this is a login page for either google or email/password auth (but the latter not implemented at the moment):
return render_template("login.html")
# if not using email/password auth, consider shortcut directly to google login:
#return redirect("/auth/google/login")
@auth_routes.route("/auth/google/login")
def google_login():
print("GOOGLE OAUTH LOGIN...")
oauth = current_app.config["OAUTH"]
redirect_uri = url_for("auth_routes.google_oauth_callback", _external=True) # see corresponding route below
return oauth.google.authorize_redirect(redirect_uri) # send the user to login with google, then hit the callback route
@auth_routes.route("/auth/google/callback")
def google_oauth_callback():
print("GOOGLE OAUTH CALLBACK...")
oauth = current_app.config["OAUTH"]
token = oauth.google.authorize_access_token()
user_info = token.get("userinfo")
if user_info:
print("STORING USER INFO IN THE SESSION...")
#print(user_info)
#> {
#> 'iss': 'https://accounts.google.com',
#> 'azp': '__________.apps.googleusercontent.com',
#> 'aud': '__________.apps.googleusercontent.com',
#> 'sub': '__________',
#> 'email': 'example@gmail.com',
#> 'email_verified': True,
#> 'at_hash': '__________',
#> 'nonce': '__________',
#> 'name': 'First M Last',
#> 'picture': 'https://lh3.googleusercontent.com/a-/__________',
#> 'given_name': 'First M',
#> 'family_name': 'Last',
#> 'locale': 'en',
#> 'iat': __________,
#> 'exp': __________
#> }
print("USER INFO:", user_info["email"], user_info["name"], user_info["locale"])
# add user info to the session
session["current_user"] = user_info
# store the user info in the database:
#service = current_app.config["FIREBASE_SERVICE"]
#service.update_user({
# "email": user_info["email"],
# "verified": user_info["email_verified"],
# "given_name": user_info["given_name"],
# "family_name": user_info["family_name"],
# "picture": user_info["picture"],
# "locale": user_info["locale"],
#})
else:
print("NO USER INFO")
return redirect("/")
@auth_routes.route("/logout")
def logout():
print("LOGGING OUT...")
session.pop("current_user", None) # remove user info from the session
return redirect("/")
#
# EMAIL / PASSWORD AUTH (NOT IMPLEMENTED)
#
#@auth_routes.route("/auth/email_password/signup")
#def email_password_signup():
# return ...
#@auth_routes.route("/auth/email_password/login")
#def email_password_login():
# return ...
#@auth_routes.route("/auth/email_password/reset_password3")
#def email_password_reset():
# return ...
|
# © 2020 지성. all rights reserved.
# <llllllllll@kakao.com>
# Apache License 2.0
from .base import *
from .naive import *
from .pg import *
from .predict import *
|
orig = 'abcdefghijklmnopqrstuvwxyz'
new = ['@', '8', '(', '|)', '3', '#', '6', '[-]', '|', '_|', '|<', '1', '[]\/[]', '[]\[]', '0', '|D', '(,)', '|Z', '$', '\'][\'', '|_|', '\/', '\/\/', '}{', '`/', '2']
line = raw_input().lower()
trans = ''
for ch in line:
try:
trans += new[orig.index(ch)]
except:
trans += ch
print(trans)
|
from typing import Dict
import torch
from torchtyping import TensorType
from typing import Dict, Optional
from tqdm import tqdm
from typeguard import typechecked
"""
# PCA rationale:
# check for the constraints , if small, do nothing
# if needed, project the result onto the constraints using the projection parameters
# pca_reproject(x_after_step, self.proj_params) to go back to plausible values (if epsilon = 0)
# if epsilon non-zero. project onto all evecs (discarded and kept). these are all orthogonal.
# you go one by one. if your in the kept eigenvecs, do nothing. if you're in discarded evecs, you're outside constraint space
# you have a sclar proj per dim. so in those held out dims, you manually set the projs to be epsilon instead of that high projection that you may encounter.
# you modify all the projections onto the discarded evecs. you have a vector which is num_obs x num_evecs. this is the representation of data in the PCA coordinate basis
# then, you modify that representation, and you send it back to the original space using the transpose of the evecs.
# Temporal rationale: want x_t - x_t-1 to be small. compute the difference per timepoint. choose one direction, say forward. you have two points in
# you have 2 points in 2d space. the difference vector is the direction. compute the norm. if norm > epsilon, rescale it so norm is equal to epsilon. diff/epsilon -- now you have a direction and a step size. you define x_t += x_t-1 + diff/epsilon.
# the next time point has to be inside a ball with radius epsilon. if it's outside, you project onto the exterior of that ball. if it's inside, keep it where it is.
# the result will be different if you start from the end or from the beggining.
"""
def MSE(preds: TensorType["num_samples", "num_keypoints",2],
gt: TensorType["num_samples", "num_keypoints",2]):
bp_error = torch.linalg.norm(preds - gt, dim=2) # error per keypoint-frame
average_error = torch.nanmean(bp_error, dim=1) # mean over keypoints
return average_error
@typechecked
class ProjectedGD(object):
""" projected gradient descent on an L2 ball subject to constraints"""
def __init__(
self,
data: TensorType["num_obs", "obs_dim"] = None,
ground_truth: Optional[TensorType["num_obs", "obs_dim"]] = None,
confidences: Optional[TensorType["num_obs", "num_keypoints"]] = None,
proj_params: dict = None,
lr: Optional[float] = None,
max_iter: int = 1000,
tol: float = 1e-5,
verbose: bool = False,
lr_decay_factor: float = 0.25,
):
"""assume you get only the bodyparts of interest for this, irrelevant cols get filtered externally"""
self.max_iter = max_iter
self.tol = tol
self.verbose = verbose
self.data: TensorType["num_samples", "num_keypoints", 2] = data.reshape(data.shape[0], -1, 2)
self.ground_truth: TensorType["num_samples", "num_keypoints", 2] = ground_truth.reshape(ground_truth.shape[0], -1, 2)
self.proj_params = proj_params
self.optimized_preds = self.data.detach().clone() # + torch.randn_like(data)*1e-4 # torch.nn.parameter.Parameter(data=data.detach().clone())
self.x_list = []
self.lr_list = []
self.error_list = []
self.confidences = 1.0
self.lr_decay_factor = lr_decay_factor
if confidences is not None:
self.confidences: TensorType["num_obs", "num_keypoints",1] = confidences.unsqueeze(2)
self.confidences = torch.clamp(confidences, min=0.0, max=1.0)
if lr is not None:
self.lr = lr
else:
self.lr = self.initialize_alpha()
# TODO: modify norm to bo over the last dimension. have num_keypoints norms per sample.
# TODO: everything else can remain in this shape?
# When conf comes in, reshape it similarly.
# currently this is not used.
@staticmethod
def l2_grad(
diffs: TensorType["num_samples", "num_keypoints", 2], scalar: float = 1.0
) -> TensorType["num_samples", "num_keypoints", 2]:
# TODO: test
if torch.allclose(diffs, torch.zeros_like(diffs)):
# don't divide by zero
return diffs
else:
norm: TensorType["num_samples", "num_keypoints",1] = torch.linalg.norm(diffs, dim=2, keepdim=True)
grad = diffs * scalar * (1.0 / norm)
return grad
def grad_step(
self, x_curr: TensorType["num_samples", "num_keypoints", 2]
) -> TensorType["num_samples", "num_keypoints", 2]:
norm: TensorType["num_samples", "num_keypoints", 1] = torch.linalg.norm(x_curr-self.data, dim=2, keepdim=True)
step: TensorType["num_samples", "num_keypoints", 1] = (self.lr * self.confidences) / (norm + 1e-8)
step = torch.clamp(step, min=0.0, max=1.0)
x_after_step = (1-step)*x_curr + step*self.data
return x_after_step
# standard way below
# return x_curr - self.lr * self.l2_grad(x_curr - self.data)
def project(
self, x_after_step: TensorType["num_samples", "num_keypoints", 2]
) -> TensorType["num_samples", "num_keypoints", 2]:
# reshape
x_after_step = x_after_step.reshape(x_after_step.shape[0],-1)
# reproject
reprojected = self.proj_params["pca_singleview"].reproject(x_after_step)
# reshape back
reprojected = reprojected.reshape(x_after_step.shape[0], -1, 2)
return reprojected
def step(
self, x_curr: TensorType["num_samples", "num_keypoints", 2]
) -> TensorType["num_samples", "num_keypoints", 2]:
x_after_step = self.grad_step(x_curr=x_curr) # gradient descent on the l2 norm objective
x_after_projection = self.project(x_after_step=x_after_step) # project the current x onto the constraints, get plausible x
return x_after_projection
def initialize_alpha(self) -> TensorType[(), float]:
# project
projected = self.project(x_after_step=self.data)
# compute the difference
diff = projected - self.data # X_0 - Y
# compute the norm and divide by confidences
alpha = torch.max(torch.norm(diff, dim=2, keepdim=True) / self.confidences)
return alpha
def fit(self) -> TensorType["num_samples", "num_keypoints", 2]:
# TODO: measure RMSE per iteration, run for longer, understand whar it's doing
x_curr = self.optimized_preds.clone()
# project and initialize step size.
for i in tqdm(range(self.max_iter)):
# projected gradient descent step
x_new = self.step(x_curr)
if self.verbose:
print(f"iteration {i}")
print(f"x_curr: {x_curr}")
print(f"x_new: {x_new}")
if torch.allclose(x_curr, x_new, atol=self.tol):
# if no change, you're clamped at step=1.0, too big, decrease and move away from data
self.lr = self.lr * self.lr_decay_factor
x_curr = x_new.clone()
self.error_list.append(MSE(x_curr, self.ground_truth))
self.x_list.append(x_new) # record the new x
self.lr_list.append(self.lr) # record the new step size
self.optimized_preds = x_new
return self.optimized_preds
|
from models.cbamresnet import *
from models.pnasnet import *
from models.seresnext import *
from models.inceptionresnetv2 import *
from models.xception import *
__all__ = ['get_model']
_models = {
'cbam_resnet50': cbam_resnet50,
'pnasnet5large': pnasnet5large,
'seresnext50_32x4d': seresnext50_32x4d,
'seresnext101_32x4d': seresnext101_32x4d,
'seresnext101_64x4d': seresnext101_64x4d,
'xception': xception,
'inceptionresnetv2': inceptionresnetv2,
}
def get_model(name, **kwargs):
"""
Get supported model.
Parameters:
----------
name : str
Name of model.
Returns
-------
Module
Resulted model.
"""
name = name.lower()
if name not in _models:
raise ValueError('Unsupported model: {}'.format(name))
net = _models[name](**kwargs)
return net
|
#!/usr/bin/env python
from __future__ import print_function
from distutils.core import setup
from distutils import sysconfig
from os.path import join as pjoin, split as psplit
import sys
import platform
setup(name='FEniCSopt',
version='0.2',
description = "FEniCS optimization package",
author = "Petr Lukas",
author_email='lukas@karlin.mff.cuni.cz',
url='https://github.com/lukaspetr/FEniCSopt',
classifiers=[
'Development Status :: 0.2 - Unstable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 2.7',
'License :: MIT',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
],
scripts = [pjoin("scripts", "ind_cross.py")],
packages = ["fenicsopt", "fenicsopt.core", "fenicsopt.examples", "fenicsopt.exports"],
package_dir = {"fenicsopt": "fenicsopt"}
)
|
# -*- coding: utf-8 -*-
# @Brief: 配置文件
input_shape = (320, 320, 3)
num_classes = 21
epochs = 50
batch_size = 4
lr = 0.0001
train_txt_path = "/Users/linzhihui/Desktop/Data/VOC2012/ImageSets/SegmentationAug/train.txt"
val_txt_path = "/Users/linzhihui/Desktop/Data/VOC2012/ImageSets/SegmentationAug/val.txt"
trainval_txt_path = "/Users/linzhihui/Desktop/Data/VOC2012/ImageSets/SegmentationAug/trainval.txt"
|
"""Support for hunterdouglass_powerview sensors."""
from aiopvapi.resources.shade import BaseShade, factory as PvShade
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
DOMAIN,
ROOM_ID_IN_SHADE,
ROOM_NAME_UNICODE,
SHADE_BATTERY_LEVEL,
SHADE_BATTERY_LEVEL_MAX,
)
from .entity import ShadeEntity
from .model import PowerviewEntryData
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the hunter douglas shades sensors."""
pv_entry: PowerviewEntryData = hass.data[DOMAIN][entry.entry_id]
entities = []
for raw_shade in pv_entry.shade_data.values():
shade: BaseShade = PvShade(raw_shade, pv_entry.api)
if SHADE_BATTERY_LEVEL not in shade.raw_data:
continue
name_before_refresh = shade.name
room_id = shade.raw_data.get(ROOM_ID_IN_SHADE)
room_name = pv_entry.room_data.get(room_id, {}).get(ROOM_NAME_UNICODE, "")
entities.append(
PowerViewShadeBatterySensor(
pv_entry.coordinator,
pv_entry.device_info,
room_name,
shade,
name_before_refresh,
)
)
async_add_entities(entities)
class PowerViewShadeBatterySensor(ShadeEntity, SensorEntity):
"""Representation of an shade battery charge sensor."""
_attr_entity_category = EntityCategory.DIAGNOSTIC
_attr_native_unit_of_measurement = PERCENTAGE
_attr_device_class = SensorDeviceClass.BATTERY
_attr_state_class = SensorStateClass.MEASUREMENT
def __init__(self, coordinator, device_info, room_name, shade, name):
"""Initialize the shade."""
super().__init__(coordinator, device_info, room_name, shade, name)
self._attr_unique_id = f"{self._attr_unique_id}_charge"
@property
def name(self):
"""Name of the shade battery."""
return f"{self._shade_name} Battery"
@property
def native_value(self):
"""Get the current value in percentage."""
return round(
self._shade.raw_data[SHADE_BATTERY_LEVEL] / SHADE_BATTERY_LEVEL_MAX * 100
)
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(
self.coordinator.async_add_listener(self._async_update_shade_from_group)
)
@callback
def _async_update_shade_from_group(self):
"""Update with new data from the coordinator."""
self._shade.raw_data = self.data.get_raw_data(self._shade.id)
self.async_write_ha_state()
async def async_update(self) -> None:
"""Refresh shade battery."""
await self._shade.refreshBattery()
|
# Code generated by lark_sdk_gen. DO NOT EDIT.
from pylark.lark_request import RawRequestReq, _new_method_option
from pylark import lark_type, lark_type_sheet, lark_type_approval
import attr
import typing
import io
@attr.s
class DeleteDepartmentReq(object):
department_id_type: lark_type.DepartmentIDType = attr.ib(
default=None, metadata={"req_type": "query", "key": "department_id_type"}
) # 此次调用中使用的部门ID的类型, 示例值:"open_department_id", 可选值有: `department_id`:以自定义department_id来标识部门, `open_department_id`:以open_department_id来标识部门, 默认值: `open_department_id`
department_id: str = attr.ib(
default="", metadata={"req_type": "path", "key": "department_id"}
) # 部门ID,需要与查询参数中传入的department_id_type类型保持一致。, 示例值:"od-4e6ac4d14bcd5071a37a39de902c7141", 最大长度:`64` 字符, 正则校验:`^0|[^od][A-Za-z0-9]*`
@attr.s
class DeleteDepartmentResp(object):
pass
def _gen_delete_department_req(request, options) -> RawRequestReq:
return RawRequestReq(
dataclass=DeleteDepartmentResp,
scope="Contact",
api="DeleteDepartment",
method="DELETE",
url="https://open.feishu.cn/open-apis/contact/v3/departments/:department_id",
body=request,
method_option=_new_method_option(options),
need_tenant_access_token=True,
)
|
import re
import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import numpy as np
from torchvision.models.densenet import model_urls, _DenseLayer, _DenseBlock, _Transition
from torchvision.models.utils import load_state_dict_from_url
from collections import OrderedDict
from collections import deque
from ...utils.Dense_U_Net_lidar_helper import get_config
# Structure of code essentially:
# https://github.com/pytorch/vision/blob/master/torchvision/models/densenet.py
class Dense_U_Net_lidar(nn.Module):
'''
U-Net like structure | Encoder = DenseNet original | optional secondary stream in encoder processing lidar data
Keeping the structure and variable naming of original densenet | allowing to use pretrained weigthts from torchvision.models
1. Added optional lidar stream (stream_2) mirroring the densenet rgb stream (stream_1)
2. Added optional Concat layer to bring together rgb and lidar
3. replacing the classifier with UNet like Decoder | feeding in output of blocks | if split streams using rgb stream
4. Output: Heat Maps for each class
'''
def __init__(self, config):
'''
There is a cleaner way of implementing this. TODO clean-up
Arguments:
config: as specified in .utils.Dense_U_Net_lidar_helper
'''
super().__init__()
self.config = config
# original densenet attributes
self.growth_rate = config.model.growth_rate
self.block_config = config.model.block_config
self.num_init_features = config.model.num_init_features
self.bn_size = config.model.bn_size
self.drop_rate = config.model.drop_rate
self.memory_efficient = config.model.memory_efficient
self.num_classes = config.model.num_classes
# param assignment
self.concat_before_block_num = config.model.concat_before_block_num
self.num_layers_before_blocks = config.model.num_layers_before_blocks
self.concat_after_module_idx = self.num_layers_before_blocks-1 + 2*(self.concat_before_block_num-1)
self.stream_1_in_channels = config.model.stream_1_in_channels
self.stream_2_in_channels = config.model.stream_2_in_channels
self.network_input_channels = self.stream_1_in_channels # Allowing for rgb input or torch.cat((rgb,lidar),1) | added
if self.concat_before_block_num == 1 and self.stream_2_in_channels == 0:
self.fusion = 'no'
elif self.concat_before_block_num == 1 and self.stream_2_in_channels > 0:
self.fusion = 'early'
self.network_input_channels += self.stream_2_in_channels
elif self.concat_before_block_num > 1 and self.concat_before_block_num <= len(self.block_config):
self.fusion = 'mid'
else:
raise AttributeError
### core structure
## Encoder | same as densenet without norm5 and classifier
# First convolution | original densenet
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(self.network_input_channels, self.num_init_features, kernel_size=7, stride=2,
padding=3, bias=False)),
('norm0', nn.BatchNorm2d(self.num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock | original densenet + stack comprising layer sizes for the decoder
feature_size_stack = deque()
feature_size_stack.append(self.num_init_features + 2*self.growth_rate)
num_features = self.num_init_features
for i, num_layers in enumerate(self.block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=self.bn_size,
growth_rate=self.growth_rate,
drop_rate=self.drop_rate,
memory_efficient=self.memory_efficient
)
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * self.growth_rate
feature_size_stack.append(num_features)
if i != len(self.block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
## Decoder
# U net like | ugly: should have own class for whole sequence
self.decoder = nn.Sequential()
num_in_features = feature_size_stack.pop()
for i in range(len(self.block_config)):
num_features = feature_size_stack.pop()
transp_conv_seq = nn.Sequential(OrderedDict([ # denselayer like struct; reduce channels with 1x1 convs
('norm0', nn.BatchNorm2d(num_in_features)),
('relu0', nn.ReLU(inplace=True)),
('conv_reduce', nn.Conv2d(num_in_features, num_features,
kernel_size=1, stride=1, padding=0, bias=False)),
('norm1', nn.BatchNorm2d(num_features)),
('relu1', nn.ReLU(inplace=True))
]))
self.decoder.add_module('Transposed_Convolution_Sequence_%d' %(i+1), transp_conv_seq)
self.decoder.add_module('Transposed_Convolution_%d' %(i+1), nn.ConvTranspose2d(num_features,
num_features, 3, stride=2, padding=1, bias=False))
num_in_features = num_features*2
self.decoder.add_module('Upsampling', nn.Upsample(scale_factor=2))
# final refinement: concat orig rgb & lidar before passing
self.dec_out_to_heat_maps = nn.Sequential(OrderedDict([
('norm0', nn.BatchNorm2d(num_features+self.stream_1_in_channels+self.stream_2_in_channels)),
('relu0', nn.ReLU(inplace=True)),
('refine0', nn.Conv2d(num_features+self.stream_1_in_channels+self.stream_2_in_channels,
num_features//2, 3, stride=1, padding=1, bias=False)),
('norm1', nn.BatchNorm2d(num_features//2)),
('relu1', nn.ReLU(inplace=True)),
('refine1', nn.Conv2d(num_features//2, self.num_classes,
5, stride=1, padding=2, bias=False))
]))
### additional structure depending on fusion mechanism
if self.fusion == 'no':
# i.e. one stream only
pass
elif self.fusion == 'early':
# i.e. concat rgb and lidar before network
pass
elif self.fusion == 'mid':
# add all the same processing for the lidar data as for rgb data
# add concat layer
'''
# weirdly gives slower iteration times
# Stream_2 mirrors Stream_1 up to concat level
self.stream_2_features = copy.deepcopy(self.features[:self.concat_after_module_idx+1])
self.stream_2_features.conv0 = nn.Conv2d(self.stream_2_in_channels,
self.num_init_features, kernel_size=7, stride=2, padding=3, bias=False)
'''
# First convolution | original densenet | for lidar block
self.stream_2_features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(self.stream_2_in_channels, self.num_init_features, kernel_size=7, stride=2,
padding=3, bias=False)),
('norm0', nn.BatchNorm2d(self.num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]))
# Each denseblock | original densenet + break before concat layer
num_features = self.num_init_features
for i, num_layers in enumerate(self.block_config):
if i == self.concat_before_block_num-1:
break
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=self.bn_size,
growth_rate=self.growth_rate,
drop_rate=self.drop_rate,
memory_efficient=self.memory_efficient
)
self.stream_2_features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * self.growth_rate
if i != len(self.block_config) - 1:
trans = _Transition(num_input_features=num_features,
num_output_features=num_features // 2)
self.stream_2_features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# concat layer | rgb + lidar | 1x1 conv
num_features = self.features[self.concat_after_module_idx+1].denselayer1.norm1.num_features
self.concat_module = nn.Sequential(OrderedDict([
('norm', nn.BatchNorm2d(num_features*2)),
('relu', nn.ReLU(inplace=True)),
('conv', nn.Conv2d(num_features*2, num_features, kernel_size=1,
stride=1, padding=0, bias=False))
]))
else:
raise AttributeError
# Official init from torch repo
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
# get number of parameters of model
self.num_params = sum(p.numel() for p in self.parameters())
def forward(self, stream_1_data, stream_2_data):
'''
unnecessarily complex because init respects original densenet implementation; see __init__
Arguments:
stream_1_data: batch, channels, W, H (W and H as lidar_data!)
stream_2_data: batch, channels, W, H (W and H as rgb_data!)
'''
# stack of encoding features used in decoder
HxW_shape_stack = deque()
features_from_enc_stack = deque()
# assigning name
if self.fusion == 'no': # allowing net to work with lidar only
features_from_enc_stack.append(stream_1_data)
features = stream_1_data
elif self.fusion == 'early':
features_from_enc_stack.append(torch.cat((stream_1_data, stream_2_data), 1))
features = torch.cat((stream_1_data, stream_2_data), 1)
elif self.fusion == 'mid':
features_from_enc_stack.append(torch.cat((stream_1_data, stream_2_data), 1))
features = stream_1_data
stream_2_features = self.stream_2_features(stream_2_data)
else:
raise AttributeError
# encoding
for i, enc_module in enumerate(self.features):
features = enc_module(features) # encode
# concat lidar and rgb after transition
if self.fusion == 'mid' and i == self.concat_after_module_idx:
assert features.size() == stream_2_features.size(), str(features.size()) + ' ' + str(stream_2_features.size())
features = torch.cat((features, stream_2_features), 1)
features = self.concat_module(features)
# save features for decoder in stack
if i == self.num_layers_before_blocks-2: # get size before maxpool before first block
HxW_shape_stack.append(features.size())
if isinstance(enc_module, _DenseBlock) and i<len(self.features)-1: # only blocks but skip last
features_from_enc_stack.append(features)
HxW_shape_stack.append(features.size())
# decoding | ugly quick and dirty implementation
for i, dec_module in enumerate(self.decoder):
if not isinstance(dec_module, nn.ConvTranspose2d):
if i > 0 and not isinstance(dec_module, nn.Upsample): # concat upsampled data and data from encoder
features = torch.cat((features, features_from_enc_stack.pop()), 1) # concat
features = dec_module(features) # decode
else:
features = dec_module(features, output_size=HxW_shape_stack.pop()) # decode
# scale to heat maps
features = torch.cat((features, features_from_enc_stack.pop()), 1)
features = self.dec_out_to_heat_maps(features)
return features
def _load_state_dict(model, config, model_url, progress):
'''
load pretrained densenet state dict from torchvision into model
copy from https://github.com/pytorch/vision/blob/master/torchvision/models/densenet.py
!!added sencond part before last line; that's why cannot simply import function
'''
# '.'s are no longer allowed in module names, but previous _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict_torchvision = load_state_dict_from_url(model_url, progress=progress)
for key in list(state_dict_torchvision.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict_torchvision[new_key] = state_dict_torchvision[key]
del state_dict_torchvision[key]
### ADDED pytorch version such that it fits the Dense_U_Net_lidar
# remove state dict of first module if different from original
if model.fusion == 'early' or model.stream_1_in_channels != 3:
del state_dict_torchvision['features.conv0.weight']
# update state dict of stream_1 and load state dict back into model
state_dict_model = model.state_dict()
state_dict_model.update(state_dict_torchvision)
model.load_state_dict(state_dict_model, strict=False)
# load weights into stream_2 and load state dict back into model
if model.fusion == 'mid' :
lidar_feature_state_dict = model.stream_2_features.state_dict()
feature_state_dict = model.features.state_dict()
del feature_state_dict['conv0.weight']
lidar_feature_state_dict.update(feature_state_dict)
model.stream_2_features.load_state_dict(lidar_feature_state_dict, strict=False)
def _dense_u_net_lidar(arch, growth_rate, block_config, num_init_features, pretrained, progress,
config):
'''
loads config file if not given
creates model
loads pretrained weights if wanted
'''
if config is None:
config = get_config(os.path.join('content', 'mnt', 'My Drive', 'Colab Notebooks', 'DeepCV_Packages'))
# for compatibility with densenet original functions
config.model.growth_rate = growth_rate
config.model.block_config = block_config
config.model.num_init_features = num_init_features
model = Dense_U_Net_lidar(config)
if pretrained:
_load_state_dict(model, config, model_urls[arch], progress)
return model
def densenet121_u_lidar(pretrained=False, progress=True, config=None):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _dense_u_net_lidar('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress,
config)
def densenet161_u_lidar(pretrained=False, progress=True, config=None):
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _dense_u_net_lidar('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
config)
def densenet169_u_lidar(pretrained=False, progress=True, config=None):
r"""Densenet-169 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _dense_u_net_lidar('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress,
config)
def densenet201_u_lidar(pretrained=False, progress=True, config=None):
r"""Densenet-201 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _dense_u_net_lidar('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress,
config)
|
from numpy import maximum
from numpy import zeros
from gwlfe.BMPs.AgAnimal.NFEN import NFEN
from gwlfe.BMPs.AgAnimal.NFEN import NFEN_f
from gwlfe.BMPs.Stream.NSTAB import NSTAB
from gwlfe.BMPs.Stream.NSTAB import NSTAB_f
from gwlfe.BMPs.Stream.NURBBANK import NURBBANK
from gwlfe.BMPs.Stream.NURBBANK import NURBBANK_f
from gwlfe.Memoization import memoize
from gwlfe.Output.Loading.StreamBankN import StreamBankN
from gwlfe.Output.Loading.StreamBankN import StreamBankN_f
@memoize
def StreamBankN_1(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, n42b, AgLength,
UrbBankStab, SedNitr, BankNFrac, n69c, n45, n69, n46c, n42):
result = zeros((NYrs, 12))
streambank_n = StreamBankN(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, SedNitr, BankNFrac)
nstab = NSTAB(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, n42b, n46c, SedNitr, BankNFrac, n69c)
nfen = NFEN(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, AgLength,
n42, SedNitr, BankNFrac, n45, n69)
nurbbank = NURBBANK(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, n42b, UrbBankStab, SedNitr, BankNFrac, n69c)
for Y in range(NYrs):
for i in range(12):
result[Y][i] = streambank_n[Y][i] - (nstab[Y][i] + nfen[Y][i] + nurbbank[Y][i])
if result[Y][i] < 0:
result[Y][i] = 0
return result
@memoize
def StreamBankN_1_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, n42b, AgLength,
UrbBankStab, SedNitr, BankNFrac, n69c, n45, n69, n46c, n42):
streambank_n = StreamBankN_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, SedNitr, BankNFrac)
nstab = NSTAB_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, n42b, n46c, SedNitr, BankNFrac, n69c)
nfen = NFEN_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, AgLength,
n42, SedNitr, BankNFrac, n45, n69)
nurbbank = NURBBANK_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area,
CNI_0, AntMoist_0, Grow_0, CNP_0, Imper, ISRR, ISRA, CN,
UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef, Qretention, PctAreaInfil, n25b, Landuse,
TileDrainDensity, PointFlow, StreamWithdrawal, GroundWithdrawal,
NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF,
AvSlope, SedAAdjust, StreamLength, n42b, UrbBankStab, SedNitr, BankNFrac, n69c)
return maximum(streambank_n - (nstab + nfen + nurbbank), 0)
|
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
# ans = -float('inf')
# sum_ = -float('inf')
# for index, element in enumerate(nums):
# sum_ = max(sum_ + element, element)
# ans = max(ans, sum_)
# return ans
n = len(nums)
dp = [0 for _ in range(n)]
dp[0] = nums[0]
for i in range(1, n):
dp[i] = max(nums[i] + dp[i - 1], nums[i])
return max(dp)
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 - 2019, doudoudzj
# Copyright (c) 2012, VPSMate development team
# All rights reserved.
#
# InPanel is distributed under the terms of the (new) BSD License.
# The full license can be found in 'LICENSE'.
'''Module for Service Management.'''
import glob
import os
import shlex
import subprocess
class Service(object):
'''supported service operate script'''
service_items = {
'inpanel': False,
'nginx': False,
'httpd': False,
'vsftpd': False,
'mysqld': False,
'redis': False,
'memcached': False,
'mongod': False,
'php-fpm': False,
'sendmail': False,
'postfix': False,
'sshd': False,
'iptables': False,
'crond': False,
'ntpd': False,
'named': False,
'lighttpd': False,
'proftpd': False,
'pure-ftpd': False,
'smb': False
}
pidnames = {
'sendmail': ['sm-client'],
'smb': ['smbd']
}
subsys_locks = (
'iptables'
)
@classmethod
def status(self, service):
initscript = '/etc/init.d/%s' % service
if not os.path.exists(initscript):
initscript = '/usr/lib/systemd/system/%s.service' % service
if not os.path.exists(initscript):
return None
pidfile = '/var/run/%s.pid' % service
if not os.path.exists(pidfile):
p = glob.glob('/var/run/%s/*.pid' % service)
if len(p) > 0:
pidfile = p[0]
else:
# some services have special pid filename
if service in Service.pidnames:
for pidname in Service.pidnames[service]:
pidfile = '/var/run/%s.pid' % pidname
if os.path.exists(pidfile):
break
else:
pidfile = None
else:
pidfile = None
if not pidfile:
# not always corrent, some services dead but the lock still exists
# some services don't have the pidfile
if service in Service.subsys_locks:
if os.path.exists('/var/lock/subsys/%s' % service):
return 'running'
# try execute pidof to find the pidfile
cmd_ = shlex.split('pidof -c -o %%PPID -x %s' % service)
p = subprocess.Popen(cmd_, stdout=subprocess.PIPE, close_fds=True)
pid = p.stdout.read().strip()
p.wait()
if not pid:
return 'stopped'
if pidfile:
with open(pidfile) as f:
pid = f.readline().strip()
if not pid:
return 'stopped'
if not os.path.exists('/proc/%s' % pid):
return 'stopped'
return 'running'
@classmethod
def autostart_set(self, service, autostart=True):
"""Add or remove service to autostart list.
E.g: chkconfig service_name on|off
"""
cmdbin = 'chkconfig'
status = 'on' if autostart else 'off'
cmd = '%s %s %s' % (cmdbin, service, status)
cmd = shlex.split(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, close_fds=True)
p.stdout.read()
return p.wait() == 0 and True or False
@classmethod
def autostart_list(self):
"""Return a list of the autostart service name.
"""
startlevel = -1
with open('/etc/inittab') as f:
for line in f:
if line.startswith('id:'):
startlevel = line.split(':')[1]
break
if startlevel == -1:
p = subprocess.Popen(shlex.split('runlevel'),
stdout=subprocess.PIPE, close_fds=True)
startlevel = int(p.stdout.read().strip().replace('N ', ''))
p.wait()
rcpath = '/etc/rc.d/rc%s.d/' % startlevel
enableServicePath = '/etc/systemd/system/multi-user.target.wants/'
services = [
os.path.basename(os.readlink(filepath))
for filepath in glob.glob('%s/S*' % rcpath)
]
services += [
os.path.basename(filePath).replace('.service', '')
for filePath in glob.glob('%s*.service' % enableServicePath)
]
return services
if __name__ == '__main__':
autostart_services = Service.autostart_list()
for service in Service.support_services:
print('* Status of %s: %s (autostart: %s)' % (service, Service.status(service), str(service in autostart_services)))
|
# coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional, Union, Literal # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator, Field, Extra # noqa: F401
from aries_cloudcontroller.model.menu_form_param import MenuFormParam
class MenuForm(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
MenuForm - a model defined in OpenAPI
description: Additional descriptive text for menu form [Optional].
params: List of form parameters [Optional].
submit_label: Alternative label for form submit button [Optional].
title: Menu form title [Optional].
"""
description: Optional[str] = None
params: Optional[List[MenuFormParam]] = None
submit_label: Optional[str] = Field(None, alias="submit-label")
title: Optional[str] = None
def __init__(
self,
*,
description: Optional[str] = None,
params: Optional[List[MenuFormParam]] = None,
submit_label: Optional[str] = None,
title: Optional[str] = None,
**kwargs,
):
super().__init__(
description=description,
params=params,
submit_label=submit_label,
title=title,
**kwargs,
)
class Config:
allow_population_by_field_name = True
MenuForm.update_forward_refs()
|
from utils.aioLogger import aioLogger
import pandas as pd
from typing import List, Tuple, Union
from config.aioConfig import heightWeightConfig
class heightWeightTimeVerifier:
"""this class verify the time info"""
def __init__(self, sorted_dfs: List[pd.DataFrame]):
self.sorted_dfs = sorted_dfs
self.logger = aioLogger(self.__class__.__name__).logger
@staticmethod
def get_start_time_from_df(df: pd.DataFrame) -> int:
return df[heightWeightConfig.TIME_COLUMN].iloc[0]
@staticmethod
def get_end_time_from_df(df: pd.DataFrame) -> int:
return df[heightWeightConfig.TIME_COLUMN].iloc[-1]
def check_according_start_and_end_time(self) -> bool:
"""test2 start time should large than test1 end time"""
for i in range(len(self.sorted_dfs) - 1):
end_time_df_before = self.get_end_time_from_df(self.sorted_dfs[i])
start_time_df_after = self.get_start_time_from_df(self.sorted_dfs[i + 1])
if end_time_df_before < start_time_df_after:
self.logger.info(f"time_check for {len(self.sorted_dfs)} dfs -> OK")
return True
else:
return False
|
from ezcode.list.linked_list import SinglyLinkedList
from ezcode.list.stack import Stack, MinStack, MaxStack
from ezcode.list.queue import Queue, MonotonicQueue
from ezcode.list.lru_cache import LRUCache
from fixture.utils import equal_list
class Node:
def __init__(self, v=None, n=None):
self.v = v
self.n = n
def __repr__(self):
return f"Node({self.v})"
def test_singly_linked_list_basics():
list_0 = SinglyLinkedList(head=None, data_name="v", next_name="n")
list_0_copy = SinglyLinkedList(head=None, data_name="v", next_name="n")
list_0_reverse = SinglyLinkedList(head=None, data_name="v", next_name="n")
list_1 = SinglyLinkedList(head=Node(1), data_name="v", next_name="n")
list_1_copy = SinglyLinkedList(head=Node(1), data_name="v", next_name="n")
list_1_reverse = SinglyLinkedList(head=Node(1), data_name="v", next_name="n")
list_2 = SinglyLinkedList(head=Node(1, Node(2)), data_name="v", next_name="n")
list_2_copy = SinglyLinkedList(head=Node(1, Node(2)), data_name="v", next_name="n")
list_2_reverse = SinglyLinkedList(head=Node(2, Node(1)), data_name="v", next_name="n")
list_3 = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name="v", next_name="n")
list_3_copy = SinglyLinkedList(head=Node(1, Node(2, Node(3))), data_name="v", next_name="n")
list_3_reverse = SinglyLinkedList(head=Node(3, Node(2, Node(1))), data_name="v", next_name="n")
assert list_0_copy == list_0
assert list_1_copy == list_1
assert list_2_copy == list_2
assert list_3_copy == list_3
assert list_0.copy() == list_0_copy
assert list_1.copy() == list_1_copy
assert list_2.copy() == list_2_copy
assert list_3.copy() == list_3_copy
assert not list_0 == list_1
assert not list_1 == list_2
assert not list_2 == list_3
assert not list_3 == list_0
assert str(list_0) == "None"
assert str(list_1) == "1 ─> None"
assert str(list_2) == "1 ─> 2 ─> None"
assert str(list_3) == "1 ─> 2 ─> 3 ─> None"
assert equal_list(list_0.to_array(), [])
assert equal_list(list_1.to_array(), [1])
assert equal_list(list_2.to_array(), [1, 2])
assert equal_list(list_3.to_array(), [1, 2, 3])
list_0_reverse_copy = list_0_reverse.copy()
list_1_reverse_copy = list_1_reverse.copy()
list_2_reverse_copy = list_2_reverse.copy()
list_3_reverse_copy = list_3_reverse.copy()
list_0_reverse_copy.reverse()
list_1_reverse_copy.reverse()
list_2_reverse_copy.reverse()
list_3_reverse_copy.reverse()
assert list_0_copy == list_0_reverse_copy
assert list_1_copy == list_1_reverse_copy
assert list_2_copy == list_2_reverse_copy
assert list_3_copy == list_3_reverse_copy
list_0_reverse.head = list_0_reverse.algorithm.reverse(list_0_reverse.head, list_0_reverse.algorithm.get_next(list_0_reverse.head))
list_1_reverse.head = list_1_reverse.algorithm.reverse(list_1_reverse.head, list_1_reverse.algorithm.get_next(list_1_reverse.head))
list_2_reverse.head = list_2_reverse.algorithm.reverse(list_2_reverse.head, list_2_reverse.algorithm.get_next(list_2_reverse.head))
list_3_reverse.head = list_3_reverse.algorithm.reverse(list_3_reverse.head, list_3_reverse.algorithm.get_next(list_3_reverse.head))
assert list_0_copy == list_0_reverse
assert list_1_copy == list_1_reverse
assert list_2_copy == list_2_reverse
assert list_3_copy == list_3_reverse
try:
list_0.peek_head() == 0
except IndexError as e:
assert e.args[0] == "Peek head at an empty SinglyLinkedList"
else:
assert False
list_1.peek_head() == 1
list_2.peek_head() == 2
list_3.peek_head() == 3
list_3_copy.pop_head() == list_2_copy
list_2_copy.pop_head() == list_1_copy
list_1_copy.pop_head() == list_0_copy
try:
list_0.pop_head()
except IndexError as e:
assert e.args[0] == "Pop head from an empty SinglyLinkedList"
else:
assert False
list_3.delete(set([2, 3])) == list_1
list_2.delete(set([1, 2])) == list_0
def test_reverse_sublist():
lists = [
SinglyLinkedList(head=Node(0), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1)), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2))), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3)))), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4))))), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5)))))), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6))))))), data_name="v", next_name="n"),
SinglyLinkedList(head=Node(0, Node(1, Node(2, Node(3, Node(4, Node(5, Node(6, Node(7)))))))), data_name="v", next_name="n"),
]
for list_orig in lists:
list_orig.print()
for i in range(len(list_orig)):
list_orig_copy = list_orig.copy()
list_orig_copy.reverse(start_index=i)
assert equal_list(list_orig_copy.to_array(), [x for x in range(i)] + [x for x in range(len(list_orig) - 1, i - 1, -1)])
list_orig_copy = list_orig.copy()
list_orig_copy.reverse(end_index=i)
assert equal_list(list_orig_copy.to_array(), [x for x in range(i, -1, -1)] + [x for x in range(i + 1, len(list_orig))])
sublist_length = len(list_orig) // 2
if sublist_length > 0 and i <= len(list_orig) - sublist_length:
start, end = i, i + sublist_length - 1
list_orig_copy = list_orig.copy()
list_orig_copy.reverse(start_index=start, end_index=end)
assert equal_list(
list_orig_copy.to_array(),
[x for x in range(start)] + [x for x in range(end, start - 1, -1)] + [x for x in range(end + 1, len(list_orig))]
)
def test_queue():
queue = Queue()
for i in range(3):
assert len(queue) == i
queue.push(i)
assert queue.peek() == 0
for i in range(3):
assert len(queue) == 3 - i
assert queue.peek() == i
assert queue.pop() == i
def test_stack():
stack = Stack()
for i in range(3):
assert len(stack) == i
stack.push(i)
assert stack.peek() == i
for i in range(3):
assert len(stack) == 3 - i
assert stack.peek() == 2 - i
assert stack.pop() == 2 - i
def test_lru_cache():
lru_cache = LRUCache(capacity=3)
assert lru_cache.get(1) is None
lru_cache.put(key=1, value=1)
lru_cache.put(key=2, value=2)
lru_cache.put(key=3, value=3)
assert lru_cache.get(1) == 1 # 1 3 2
lru_cache.put(key=4, value=4) # 4 1 3 (no 2)
assert lru_cache.get(2) is None
assert lru_cache.get(4) == 4 # 4 1 3
lru_cache.put(key=3, value=33) # 3 4 1
lru_cache.put(key=5, value=5) # 5 3 4 (no 1)
assert lru_cache.get(1) is None
assert lru_cache.get(3) == 33
assert lru_cache.get(5) == 5
def test_min_max_stack():
min_stack = MinStack()
max_stack = MaxStack()
for data, min_data, max_data in zip([2, 1, 3, 5, 4], [2, 1, 1, 1, 1], [2, 2, 3, 5, 5]):
min_stack.push(data)
max_stack.push(data)
assert min_stack.get_min() == min_data
assert max_stack.get_max() == max_data
for min_data, max_data in zip([1, 1, 1, 2], [5, 3, 2, 2]):
min_stack.pop()
max_stack.pop()
assert min_stack.get_min() == min_data
assert max_stack.get_max() == max_data
def test_monontonic_queue():
mq = MonotonicQueue(is_increasing=True)
for data, benchmark in zip([5, 3, 1, 2, 4], [5, 3, 1, 1, 1]):
mq.push(data)
assert mq.peek() == benchmark
mq = MonotonicQueue(is_increasing=False)
for data, benchmark in zip([5, 3, 1, 2, 4], [5, 5, 5, 5, 5]):
mq.push(data)
assert mq.peek() == benchmark
|
from email.mime import text
import random
from configparser import SafeConfigParser
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import sys
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.dirname(current_dir)
parent_dir = os.path.dirname(parent_dir)
sys.path.append(parent_dir)
import Packages.logging.log_file as log
def choose_random_line(xlist):
lines = open(xlist).read().splitlines()
return random.choice(lines)
def get_server_info(smtpserver):
parser = SafeConfigParser()
parser.read(smtpserver)
ip = parser["EMAIL"]["ip"]
port = parser["EMAIL"]["port"]
sender_email = parser["EMAIL"]["sender"]
password = parser["EMAIL"]["password"]
return ip, sender_email, password, port
def main(smtpserver, destinationaddresslist, bodylist, subjectlist, attachmentslist):
serverinfo = get_server_info(smtpserver)
subject = choose_random_line(subjectlist)
body = choose_random_line(bodylist)
sender_email = serverinfo[1]
receiver_email = choose_random_line(destinationaddresslist)
password = serverinfo[2]
serverip = serverinfo[0]
serverport = serverinfo[3]
filename = choose_random_line(attachmentslist) # In same directory as script
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = receiver_email
message["Subject"] = subject
# Add body to email
message.attach(MIMEText(body, "plain"))
# Decide wheater an attachment should be sent
if(random.randint(1,10) < 7):
with open(filename, "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
# Add attachment to message and convert message to string
message.attach(part)
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL(serverip, int(serverport), context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, text)
log.log_event("Emali send;" + receiver_email)
|
"""
*
* Copyright (c) 2017 Cisco Systems, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* Neither the name of the Cisco Systems, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*
"""
import os
import json
import pickle
tls_fp_dict = None
def tls_fp_dict_init():
global tls_fp_dict
tls_fp_file = 'res_tls_fingerprints.json'
cur_dir = os.path.dirname(__file__)
tls_fp_path = os.path.join(cur_dir, tls_fp_file)
tls_fp_dict = {}
with open(tls_fp_path) as f:
for counter, line in enumerate(f):
tmp = json.loads(line)
fpvalue = pickle.dumps(tmp['fingerprint']['tls'])
if fpvalue in tls_fp_dict:
print "warning: duplicate tls fingerprint in line " + str(counter + 1) + " of file " + tls_fp_file
tls_fp_dict[fpvalue] = tmp['label']
def tls_inference(f, kwargs):
global tls_fp_dict
if not tls_fp_dict:
tls_fp_dict_init()
if 'fingerprint' in f:
if 'tls' in f['fingerprint']:
fpvalue = pickle.dumps(f['fingerprint']['tls'])
if fpvalue in tls_fp_dict:
return {'tls': tls_fp_dict[fpvalue]}
return None
|
from edutls.record import ContentType
from edutls.types import UInt8Enum, UInt16Enum, Protocol
class AlertLevel(UInt8Enum):
warning = 1,
fatal = 2
class AlertDescription(UInt8Enum):
close_notify = 0,
unexpected_message = 10,
bad_record_mac = 20,
record_overflow = 22,
handshake_failure = 40,
bad_certificate = 42,
unsupported_certificate = 43,
certificate_revoked = 44,
certificate_expired = 45,
certificate_unknown = 46,
illegal_parameter = 47,
unknown_ca = 48,
access_denied = 49,
decode_error = 50,
decrypt_error = 51,
protocol_version = 70,
insufficient_security = 71,
internal_error = 80,
inappropriate_fallback = 86,
user_canceled = 90,
missing_extension = 109,
unsupported_extension = 110,
unrecognized_name = 112,
bad_certificate_status_response = 113,
unknown_psk_identity = 115,
certificate_required = 116,
no_application_protocol = 120,
class Alert(Protocol):
def __init__(self, level: AlertLevel = AlertLevel.warning,
description: AlertDescription = AlertDescription.close_notify):
self.level = level
self.description = description
def pack(self) -> bytes:
return self.level.pack() + self.description.pack()
def unpack(self, data: bytes) -> bytes:
self.level, data = AlertLevel.unpack(data)
self.description, data = AlertDescription.unpack(data)
return data
@property
def type(self):
return ContentType.alert
|
import dash_core_components as dcc
from dash.dependencies import Input, Output, State
from urllib.parse import parse_qs, urlparse
from app.models import ElementTemplate
def layout():
return dcc.Dropdown(id = "elementTemplateDropdown", placeholder = "Select Element Template", multi = False, value = -1)
def optionsCallback(dashApp):
@dashApp.callback(Output(component_id = "elementTemplateDropdown", component_property = "options"),
[Input(component_id = "siteDropdown", component_property = "value")])
def elementTemplateDropdownOptions(siteDropdownValue):
return [{"label": elementTemplate.Name, "value": elementTemplate.ElementTemplateId} for elementTemplate in ElementTemplate.query. \
filter(ElementTemplate.SiteId == siteDropdownValue).order_by(ElementTemplate.Name).all()]
def valueCallback(dashApp):
@dashApp.callback(Output(component_id = "elementTemplateDropdown", component_property = "value"),
[Input(component_id = "elementTemplateDropdown", component_property = "options")],
[State(component_id = "url", component_property = "href"),
State(component_id = "elementTemplateDropdown", component_property = "value")])
def elementTemplateDropdownValue(elementTemplateDropdownOptions, urlHref, elementTemplateDropdownValue):
elementTemplateId = None
if elementTemplateDropdownValue == -1:
if elementTemplateDropdownOptions:
queryString = parse_qs(urlparse(urlHref).query)
if "elementTemplateId" in queryString:
id = int(queryString["elementTemplateId"][0])
if len(list(filter(lambda elementTemplate: elementTemplate["value"] == id, elementTemplateDropdownOptions))) > 0:
elementTemplateId = id
return elementTemplateId
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@AUTHOR:Joselyn Zhao
@CONTACT:zhaojing17@foxmail.com
@HOME_PAGE:joselynzhao.top
@SOFTWERE:PyCharm
@FILE:qie.py
@TIME:2020/4/18 20:55
@DES: 命名切片
'''
items = [0,1,2,3,4,5,6]
a = slice(2,4)
print(items[2:4])
print(items[a])
items[a] = [10,11] #局部替换
print(items)
print(a.start)
print(a.stop)
print(a.step) #None
# 具有这三个属性
s = 'Helloworld'
print(a.indices(len(s))) #将切片映射到特定大小的序列上。返回(start,stop,step)元组
for i in range(*a.indices(len(s))):
print(s[i])
print(range(*a.indices(len(s))))
|
# Write code using find() and string slicing to extract the number at the end of the line below. Convert the extracted value to a floating point number and print it out.
# Desired Output
# 0.8475
text = "X-DSPAM-Confidence: 0.8475";
pos = text.find(" ")
number = text[pos:]
number = number.strip()
num = float(number)
print(num)
|
import pathlib
import importlib
from typing import Any, AsyncIterator, Tuple
from dffml import (
config,
field,
entrypoint,
SimpleModel,
ModelNotTrained,
Feature,
Features,
Sources,
Record,
SourcesContext,
)
@config
class DAAL4PyLRModelConfig:
predict: Feature = field("Label or the value to be predicted")
features: Features = field("Features to train on. For SLR only 1 allowed")
location: pathlib.Path = field("Location where state should be saved",)
@entrypoint("daal4pylr")
class DAAL4PyLRModel(SimpleModel):
r"""
Implemented using daal4py.
First we create the training and testing datasets
**train.csv**
.. code-block::
:test:
:filepath: train.csv
f1,ans
12.4,11.2
14.3,12.5
14.5,12.7
14.9,13.1
16.1,14.1
16.9,14.8
16.5,14.4
15.4,13.4
17.0,14.9
17.9,15.6
18.8,16.4
20.3,17.7
22.4,19.6
19.4,16.9
15.5,14.0
16.7,14.6
**test.csv**
.. code-block::
:test:
:filepath: test.csv
f1,ans
18.8,16.4
20.3,17.7
22.4,19.6
19.4,16.9
15.5,14.0
16.7,14.6
Train the model
.. code-block:: console
:test:
$ dffml train \
-model daal4pylr \
-model-features f1:float:1 \
-model-predict ans:int:1 \
-model-location tempdir \
-sources f=csv \
-source-filename train.csv
Assess the accuracy
.. code-block:: console
:test:
$ dffml accuracy \
-model daal4pylr \
-model-features f1:float:1 \
-model-predict ans:int:1 \
-model-location tempdir \
-features ans:int:1 \
-sources f=csv \
-source-filename test.csv \
-scorer mse \
0.6666666666666666
Make a prediction
.. code-block:: console
:test:
$ echo -e 'f1,ans\n0.8,1\n' | \
dffml predict all \
-model daal4pylr \
-model-features f1:float:1 \
-model-predict ans:int:1 \
-model-location tempdir \
-sources f=csv \
-source-filename /dev/stdin
[
{
"extra": {},
"features": {
"ans": 1,
"f1": 0.8
},
"key": "0",
"last_updated": "2020-07-22T02:53:11Z",
"prediction": {
"ans": {
"confidence": null,
"value": 1.1907472649730522
}
}
}
]
Example usage of daal4py Linear Regression model using python API
**run.py**
.. literalinclude:: /../model/daal4py/examples/lr/lr.py
:test:
:filepath: run.py
Run the file
.. code-block:: console
:test:
$ python run.py
"""
CONFIG = DAAL4PyLRModelConfig
def __init__(self, config) -> None:
super().__init__(config)
self.pd = importlib.import_module("pandas")
self.np = importlib.import_module("numpy")
self.d4p = importlib.import_module("daal4py")
self.joblib = importlib.import_module("joblib")
self.lm = self.d4p.linear_regression_training(
interceptFlag=True, streaming=True
)
self.lm_predictor = self.d4p.linear_regression_prediction()
self.ac_predictor = self.d4p.linear_regression_prediction()
self.lm_trained = None
async def __aenter__(self):
await super().__aenter__()
self.path = self.filepath(
self.parent.config.location
if not hasattr(self.parent, "temp_dir")
else self.parent.temp_dir,
"trained_model.sav",
)
self.load_model()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
if self.lm_trained:
self.joblib.dump(self.lm_trained, self.path)
await super().__aexit__(exc_type, exc_value, traceback)
def compare(self, alist, bfloat):
result = []
for element in alist:
if element <= bfloat:
result.append(True)
else:
result.append(False)
return result
def load_model(self):
if self.path.is_file():
self.lm_trained = self.joblib.load(self.path)
def filepath(self, location, file_name):
return location / file_name
async def train(self, sources: Sources) -> None:
async for record in sources.with_features(
self.features + [self.parent.config.predict.name]
):
feature_data = record.features(
self.features + [self.parent.config.predict.name]
)
# NOTE Duplicate feature data due to regression in oneDAL
# See https://github.com/intel/dffml/issues/801
df = self.pd.DataFrame([feature_data] * 2, index=[0, 1])
xdata = df.drop([self.parent.config.predict.name], 1)
ydata = df[self.parent.config.predict.name]
self.lm.compute(xdata, ydata)
self.lm_trained = self.lm.finalize().model
async def predict(
self, sources: SourcesContext
) -> AsyncIterator[Tuple[Record, Any, float]]:
# Iterate through each record that needs a prediction
if self.lm_trained is None:
raise ModelNotTrained("Train model before prediction.")
async for record in sources.with_features(
self.parent.config.features.names()
):
feature_data = record.features(self.features)
predict = self.pd.DataFrame(feature_data, index=[0])
preds = self.lm_predictor.compute(predict, self.lm_trained)
target = self.parent.config.predict.name
if preds.prediction.size == 1:
prediction = preds.prediction.flat[0]
else:
prediction = preds.prediction
record.predicted(target, prediction, float("nan"))
# Yield the record to the caller
yield record
|
#+
# Copyright 2015 iXsystems, Inc.
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#####################################################################
from . import sysctl
from xml.etree import ElementTree as etree
import six
_classes = {}
_geoms = {}
_providers = {}
class GEOMBase(object):
def __init__(self, xml):
self.xml = xml
@property
def id(self):
return self.xml.attrib['id']
class GEOMClass(GEOMBase):
@property
def name(self):
return self.xml.find('name').text
@property
def geoms(self):
for i in self.xml.findall('geom'):
yield GEOMObject(i)
def geom_by_name(self, name):
ret = list(filter(lambda g: g.name == name, self.geoms))
return ret[0] if len(ret) > 0 else None
def __str__(self):
return "<geom.GEOMClass name '{0}' id '{1}'>".format(self.name, self.id)
def __repr__(self):
return str(self)
def __getstate__(self):
return {
'name': self.name,
'geoms': [x.__getstate__() for x in self.geoms]
}
class GEOMObject(GEOMBase):
@property
def name(self):
return self.xml.find('name').text
@property
def rank(self):
return int(self.xml.find('rank').text)
@property
def clazz(self):
return class_by_id(self.xml.find('class').attrib['ref'])
@property
def provider(self):
try:
return six.next(self.providers)
except StopIteration:
return None
@property
def consumer(self):
try:
return six.next(self.consumers)
except StopIteration:
return None
@property
def providers(self):
for i in self.xml.findall('provider'):
yield GEOMProvider(i)
@property
def consumers(self):
for i in self.xml.findall('consumer'):
yield GEOMConsumer(i)
@property
def config(self):
config = self.xml.find('config')
if config is not None:
return {i.tag: i.text for i in config}
return None
def __str__(self):
return "<geom.GEOMObject name '{0}' id '{1}'>".format(self.name, self.id)
def __repr__(self):
return str(self)
def __getstate__(self):
return {
'id': self.id,
'name': self.name,
'class_id': self.clazz.id,
'config': self.config,
'providers': [x.__getstate__() for x in self.providers],
'consumers': [x.__getstate__() for x in self.consumers]
}
class GEOMProvider(GEOMBase):
@property
def geom(self):
return geom_by_id(self.xml.find('geom').attrib['ref'])
@property
def mode(self):
return self.xml.find('mode').text
@property
def name(self):
return self.xml.find('name').text
@property
def mediasize(self):
return int(self.xml.find('mediasize').text)
@property
def sectorsize(self):
return int(self.xml.find('sectorsize').text)
@property
def stripesize(self):
return int(self.xml.find('stripesize').text)
@property
def stripeoffset(self):
return int(self.xml.find('stripeoffset').text)
@property
def description(self):
try:
d = self.xml.find("config/descr")
return d.text
except:
return None
@property
def config(self):
config = self.xml.find('config')
if config is not None:
return {i.tag: i.text for i in config}
return None
def __str__(self):
return "<geom.GEOMProvider name '{0}' id '{1}'>".format(self.name, self.id)
def __repr__(self):
return str(self)
def __getstate__(self):
return {
'name': self.name,
'mode': self.mode,
'geom_id': self.geom.id,
'mediasize': self.mediasize,
'sectorsize': self.sectorsize,
'stripesize': self.stripesize,
'stripeoffset': self.stripeoffset,
'config': self.config
}
class GEOMConsumer(GEOMBase):
@property
def geom(self):
return geom_by_id(self.xml.find('geom').attrib['ref'])
@property
def mode(self):
return self.xml.find('mode').text
@property
def provider(self):
return provider_by_id(self.xml.find('provider').attrib['ref'])
@property
def config(self):
config = self.xml.find('config')
if config is not None:
return {i.tag: i.text for i in config}
return None
def __str__(self):
return "<geom.GEOMConsumer id '{0}'>".format(self.id)
def __repr__(self):
return str(self)
def __getstate__(self):
return {
'geom_id': self.geom.id,
'provider_id': self.provider.id,
'config': self.config
}
def scan():
confxml = sysctl.sysctlbyname('kern.geom.confxml').strip('\x00')
tree = etree.fromstring(confxml)
for i in tree.findall('class'):
cls = GEOMClass(i)
_classes[cls.id] = cls
for g in cls.geoms:
_geoms[g.id] = g
for p in g.providers:
_providers[p.id] = p
def classes():
return _classes.values()
def geoms():
return _geoms.values()
def class_by_id(ident):
return _classes[ident]
def class_by_name(name):
ret = list(filter(lambda g: g.name == name, _classes.values()))
return ret[0] if len(ret) > 0 else None
def geom_by_id(ident):
return _geoms[ident]
def geom_by_name(classname, name):
cls = class_by_name(classname)
if not cls:
return None
return cls.geom_by_name(name)
def provider_by_id(ident):
return _providers[ident]
# Yeah, no, don't do this. On a large system, it was discovered
# this module was using ~24MB of resident memory. (tested on ~1240 disk system)
# scan()
|
import numpy as np
import sklearn.model_selection
class _TemporalDataset:
def __init__(self, data):
self.X = np.array(data)
class TemporalDatasetTrain(_TemporalDataset):
def __init__(self, data, ground_truth=None):
super(TemporalDatasetTrain, self).__init__(data)
self.ground_truth = np.array(ground_truth)
@property
def X_train(self):
return self.X
@property
def ground_truth_train(self):
return self.ground_truth
class TemporalDatasetPredict(_TemporalDataset):
def __init__(self, data, ground_truth=None, prediction_rule='last_observed', prediction_window=4, random_state=42):
super(TemporalDatasetPredict, self).__init__(data)
self.prediction_rule = prediction_rule
self.prediction_window = 4
# Find time of last observed entry for all rows
if self.prediction_rule == 'last_observed':
time_of_prediction = self.X.shape[1] - np.argmax(
self.X[:, ::-1] != 0, axis=1) - 1
# Find time as nearest (in abs. value) nonzero intro to random integer
elif self.prediction_rule == 'random':
np.random.seed(random_state)
time_of_prediction = np.array([np.argmin(np.abs(np.random.randint(
0, self.X.shape[1]) - np.arange(0, self.X.shape[1])*(x != 0))) for x in self.X], dtype=np.int)
# Copy values to be predicted
y_true = np.copy(self.X[range(self.X.shape[0]), time_of_prediction])
# Remove observations in or after prediction window
for i_row in range(self.X.shape[0]):
self.X[i_row, max(0, time_of_prediction[i_row] -
prediction_window):] = 0
# Find rows that still contain observations
self.valid_rows = np.sum(self.X, axis=1) > 0
# Remove all rows that don't satisfy the specified criteria
self.y = y_true[self.valid_rows]
self.X = self.X[self.valid_rows]
self.time_of_prediction = time_of_prediction[self.valid_rows]
self.ground_truth = ground_truth
if not(self.ground_truth is None):
self.ground_truth = ground_truth[self.valid_rows]
@property
def X_pred_regressor(self):
return self.X
@property
def y_true(self):
return self.y
@property
def ground_truth_pred(self):
return self.ground_truth
class TemporalDatasetKFold(_TemporalDataset):
def __init__(self, data, ground_truth=None, prediction_rule='last_observed', prediction_window=4, n_splits=5, random_state=42):
self.prediction_rule = prediction_rule
self.prediction_window = prediction_window
self.n_splits = n_splits
self.__pred_obj = TemporalDatasetPredict(
data=data, ground_truth=ground_truth, prediction_rule=self.prediction_rule, prediction_window=self.prediction_window, random_state=random_state)
if ground_truth is None:
self.__train_obj = TemporalDatasetTrain(
data=data[self.__pred_obj.valid_rows])
else:
self.__train_obj = TemporalDatasetTrain(
data=data[self.__pred_obj.valid_rows], ground_truth=ground_truth[self.__pred_obj.valid_rows])
kf = sklearn.model_selection.KFold(n_splits, shuffle=False)
self.__idc_per_fold = [
idc_fold for idc_fold in kf.split(self.__train_obj.X)]
# Instantiate with 1st fold
self.__i_fold = 0
self.__idc_train, self.__idc_pred = self.__idc_per_fold[self.i_fold]
@property
def X_train(self):
return self.__train_obj.X[self.__idc_train]
@property
def X_pred_regressor(self):
return self.__pred_obj.X[self.__idc_pred]
@property
def y_true(self):
return self.__pred_obj.y[self.__idc_pred]
@property
def time_of_prediction(self):
return self.__pred_obj.time_of_prediction[self.__idc_pred]
@property
def ground_truth_train(self):
if (self.__train_obj.ground_truth is None):
return None
return self.__train_obj.ground_truth[self.__idc_train]
@property
def ground_truth_pred(self):
if (self.__pred_obj.ground_truth is None):
return None
return self.__pred_obj.ground_truth[self.__idc_pred]
@property
def i_fold(self):
return self.__i_fold
@i_fold.setter
def i_fold(self, i_fold):
assert(int(i_fold) < self.n_splits)
self.__i_fold = int(i_fold)
self.__idc_train, self.__idc_pred = self.__idc_per_fold[self.__i_fold]
|
import os, sys
import time
from tqdm import tqdm
import argparse
def get_parser():
parser = argparse.ArgumentParser(description="PAIB")
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-pg",
"--passgen",
action="store_true",
help="Generate passwords using keywords given by the user",
)
group.add_argument(
"-op",
"--osint_passgen",
action="store_true",
help="helps to collect victim's info through inbuilt osint tool and generate passwords using the words collected",
)
group.add_argument(
"-pt",
"--pretrained",
action="store_true",
help="Generate password on a pretrained model using this command",
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
if args.passgen:
os.system("python3 train.py")
elif args.osint_passgen:
print("[+] Tool Developement in progress")
elif args.pretrained:
os.system("python3 sample.py")
else:
parser.print_help()
if __name__ == "__main__":
main()
|
# set up logging
import logging, os
from datetime import datetime, timedelta
logging.basicConfig(level=os.environ.get("LOGLEVEL","INFO"))
log = logging.getLogger(__name__)
import rasterio
import numpy as np
from datetime import datetime
from multiprocessing import Pool
from .util import *
from .const import *
def _zonal_worker(args):
"""A function for use with the multiprocessing
package, passed to each worker.
Returns a dictionary of the form:
{zone_id:{'value':VALUE,'arable_pixels':VALUE,'percent_arable':VALUE},...}
Parameters
----------
args:tuple
Tuple containing the following (in order):
targetwindow
product_path
shape_path
"""
targetwindow, product_path, shape_path, mask_path = args
# get product raster info
product_handle = rasterio.open(product_path,'r')
product_noDataVal = product_handle.meta['nodata']
product_data = product_handle.read(1,window=targetwindow)
product_handle.close()
# get shape raster info
shape_handle = rasterio.open(shape_path,'r')
shape_noDataVal = shape_handle.meta['nodata']
shape_data = shape_handle.read(1,window=targetwindow)
shape_handle.close()
# get mask raster info
if mask_path is not None:
mask_handle = rasterio.open(mask_path,'r')
mask_data = mask_handle.read(1,window=targetwindow)
mask_handle.close()
else:
mask_data = np.full(product_data.shape,1)
# create empty output dictionary
out_dict = {}
# loop over all admin codes present in shape_
uniquezones = np.unique(shape_data[shape_data != shape_noDataVal]) # exclude nodata value
for zone_code in uniquezones:
valid_pixels = int((shape_data[(shape_data == zone_code)]).size)
if valid_pixels == 0:
continue
masked = np.array(product_data[(product_data != product_noDataVal) & (shape_data == zone_code) & (mask_data == 1)], dtype='int64')
value = (masked.mean() if (masked.size > 0) else 0)
out_dict[zone_code] = {"value":value,"pixels":masked.size}
return out_dict
def _update(stored_dict,this_dict) -> dict:
"""Updates stats dictionary with values from a new window result
Parameters
----------
stored_dict:dict
Dictionary to be updated with new data
this_dict:dict
New data with which to update stored_dict
"""
out_dict = stored_dict
for k in this_dict.keys():
this_info = this_dict[k]
try:
stored_info = stored_dict[k]
except KeyError: # if stored_dict has no info for zone k (new zone in this window), set it equal to the info from this_dict
out_dict[k] = this_info
continue
try:
# weight of stored_dict value is the ratio of its valid pixels to the total new sum of valid pixels
stored_weight = float(stored_info['pixels']) / float(stored_info['pixels'] + this_info['pixels'])
except ZeroDivisionError:
# if no valid pixels at all, weight everything at 0
stored_weight = 0
try:
# weight of this_dict value is the ratio of its valid pixels to the total new sum of valid pixels
this_weight = float(this_info['pixels']) / float(stored_info['pixels'] + this_info['pixels'])
except ZeroDivisionError:
# if the total valid pixels are 0, everything gets weight 0
this_weight = 0
## weighted mean value
value = (stored_info['value'] * stored_weight) + (this_info['value'] * this_weight)
out_dict[k] = {'value':value, 'pixels':stored_info['pixels'] + this_info['pixels']}
return out_dict
def zonal_stats(zone_raster:str, data_raster:str, mask_raster = None, n_cores:int = 1, block_scale_factor: int = 8, default_block_size: int = 256, time:bool = False, *args, **kwargs) -> dict:
"""Generates zonal statistics based on input data and zone rasters
***
Parameters
----------
zone_raster: str
Path to input zone raster file
data_raster: str
Path to raster file
n_cores: int
How many cores to use for parallel processing. Default
1
block_scale_factor: int
Factor by which to scale default raster block size for
the purposes of windowed reads. Default 8
default_block_size: int
Inferred block size for untiled data raster.
Default 256
time: bool
Whether to log time it takes to execute this function.
Default False
Returns
-------
A nested dictionary. Outer-level keys are zone id numbers, each
of which corresponds to an inner dictionary with keys "value"
(the mean for that zone) and "pixels" (the number of pixels in
that zone).
"""
# start timer
startTime = datetime.now()
# coerce integer arguments to proper type
n_cores = int(n_cores)
block_scale_factor = int(block_scale_factor)
default_block_size = int(default_block_size)
# get raster metadata
with rasterio.open(data_raster,'r') as meta_handle:
metaprofile = meta_handle.profile
hnum = meta_handle.width
vnum = meta_handle.height
if metaprofile['tiled']:
blocksize = metaprofile['blockxsize'] * block_scale_factor
else:
log.warning(f"Input raster {data_raster} is not tiled!")
blocksize = default_block_size * block_scale_factor
# get windows
windows = getWindows(hnum, vnum, blocksize)
# generate arguments to pass into _zonal_worker
parallel_args = [(w, data_raster, zone_raster, mask_raster) for w in windows]
# do the multiprocessing
output_data = {}
with Pool(processes = n_cores) as p:
for window_data in p.map(_zonal_worker, parallel_args):
output_data = _update(output_data, window_data)
for zone in output_data:
if output_data[zone]['pixels'] == 0:
output_data[zone]['value'] = np.NaN
if time:
log.info(f"Finished in {datetime.now() - startTime}")
return output_data
|
class Solution(object):
def containsNearbyDuplicate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
if len(nums) <= 1:
return False
if k == 0:
return False
p1, p2 = 0, 1
numSet = set()
numSet.add(nums[0])
while p2 < len(nums):
if p2 - p1 <= k:
if nums[p2] in numSet:
return True
numSet.add(nums[p2])
p2 += 1
else:
numSet.remove(nums[p1])
if nums[p2] in numSet:
return True
numSet.add(nums[p2])
p1 += 1
p2 += 1
return False
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Lazily-evaluated property pattern in Python.
https://en.wikipedia.org/wiki/Lazy_evaluation
*References:
bottle
https://github.com/bottlepy/bottle/blob/cafc15419cbb4a6cb748e6ecdccf92893bb25ce5/bottle.py#L270
django
https://github.com/django/django/blob/ffd18732f3ee9e6f0374aff9ccf350d85187fac2/django/utils/functional.py#L19
pip
https://github.com/pypa/pip/blob/cb75cca785629e15efb46c35903827b3eae13481/pip/utils/__init__.py#L821
pyramimd
https://github.com/Pylons/pyramid/blob/7909e9503cdfc6f6e84d2c7ace1d3c03ca1d8b73/pyramid/decorator.py#L4
werkzeug
https://github.com/pallets/werkzeug/blob/5a2bf35441006d832ab1ed5a31963cbc366c99ac/werkzeug/utils.py#L35
*TL;DR80
Delays the eval of an expr until its value is needed and avoids repeated evals.
"""
from __future__ import print_function
import functools
class lazy_property(object):
def __init__(self, function):
self.function = function
functools.update_wrapper(self, function)
def __get__(self, obj, type_):
if obj is None:
return self
val = self.function(obj)
obj.__dict__[self.function.__name__] = val
return val
def lazy_property2(fn):
attr = '_lazy__' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr):
setattr(self, attr, fn(self))
return getattr(self, attr)
return _lazy_property
class Person(object):
def __init__(self, name, occupation):
self.name = name
self.occupation = occupation
self.call_count2 = 0
@lazy_property
def relatives(self):
# Get all relatives, let's assume that it costs much time.
relatives = "Many relatives."
return relatives
@lazy_property2
def parents(self):
self.call_count2 += 1
return "Father and mother"
def main():
Jhon = Person('Jhon', 'Coder')
print(u"Name: {0} Occupation: {1}".format(Jhon.name, Jhon.occupation))
print(u"Before we access `relatives`:")
print(Jhon.__dict__)
print(u"Jhon's relatives: {0}".format(Jhon.relatives))
print(u"After we've accessed `relatives`:")
print(Jhon.__dict__)
print(Jhon.parents)
print(Jhon.__dict__)
print(Jhon.parents)
print(Jhon.call_count2)
if __name__ == '__main__':
main()
### OUTPUT ###
# Name: Jhon Occupation: Coder
# Before we access `relatives`:
# {'call_count2': 0, 'name': 'Jhon', 'occupation': 'Coder'}
# Jhon's relatives: Many relatives.
# After we've accessed `relatives`:
# {'relatives': 'Many relatives.', 'call_count2': 0, 'name': 'Jhon', 'occupation': 'Coder'}
# Father and mother
# {'_lazy__parents': 'Father and mother', 'relatives': 'Many relatives.', 'call_count2': 1, 'name': 'Jhon', 'occupation': 'Coder'} # noqa flake8
# Father and mother
# 1
|
# -*- coding: utf-8 -*-
from aserializer.utils.parsers import Parser
class MetaOptions(object):
def __init__(self, meta):
self.fields = getattr(meta, 'fields', [])
self.exclude = getattr(meta, 'exclude', [])
class SerializerMetaOptions(MetaOptions):
def __init__(self, meta):
super(SerializerMetaOptions, self).__init__(meta)
self.parser = getattr(meta, 'parser', Parser)
class CollectionMetaOptions(MetaOptions):
def __init__(self, meta):
super(CollectionMetaOptions, self).__init__(meta)
self.serializer = getattr(meta, 'serializer', None)
self.with_metadata = getattr(meta, 'with_metadata', True)
self.metadata_key = getattr(meta, 'metadata_key', '_metadata')
self.items_key = getattr(meta, 'items_key', 'items')
self.offset_key = getattr(meta, 'offset_key', 'offset')
self.limit_key = getattr(meta, 'limit_key', 'limit')
self.total_count_key = getattr(meta, 'total_count_key', 'totalCount')
self.sort = getattr(meta, 'sort', [])
self.validation = getattr(meta, 'validation', False)
|
import sys
userpassinfo = ('51reboot', '123456')
class Auth(object):
def login(self, username, password):
if username == userpassinfo[0] and password == userpassinfo[1]:
return "login succ.", True
else:
return "login faild.", False
def logout(self):
sys.exit(0)
|
from django.urls import include, path
from .views import (
TraderRegisterView,
FarmerRegisterView,
UserLoginView,
UserLogoutView,
ValidateOTP,
ValidatePhoneNumberSendOTP,
)
urlpatterns = [
path("send/otp/", ValidatePhoneNumberSendOTP.as_view(), name="send_otp"),
path("verify/otp/", ValidateOTP.as_view(), name="verify_otp"),
path(
"register/users/traders/",
TraderRegisterView.as_view(),
name="register_traders",
),
path(
"register/users/farmers/", FarmerRegisterView.as_view(), name="register_farmers"
),
path("login/", UserLoginView.as_view(), name="user_login"),
path("logout/", UserLogoutView.as_view(), name="user_logout"),
]
|
#!/usr/bin/env python
""" Reads the example file to a dict
This file shows, how to read the data into a dict, that can be used as any other Python dict.
Also it shows how to read the attributes available in the file and in a dataset.
"""
from structure_definitions import *
__author__ = "Johannes Hiller"
__version__ = "0.8"
__maintainer__ = "Johannes Hiller"
__email__ = "johannes.hiller@ika.rwth-aachen.de"
__status__ = "development"
# The file to read
h5_file = "l3pilot_cdf_filled_example.h5"
# Initialize the dict for storing the data
h5_dict = dict()
# Open the file
with h5py.File(h5_file, "r") as file:
# Initialize the ego part of the dict
h5_dict[str_ego] = dict()
for item in [list1 for list1 in ego_struct if list1.__class__.__name__ == "EgoTop"]:
h5_dict[str_ego][item.name] = file[str_ego][item.name]
# Initialize the lane part of the dict
h5_dict[str_lan] = dict()
for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneTop"]:
h5_dict[str_lan][item.name] = file[str_lan][item.name]
# Initialize the dict for the different lanes
h5_dict[str_lan][str_lan_obj] = dict()
for item in [list1 for list1 in lane_struct if list1.__class__.__name__ == "LaneSObject"]:
h5_dict[str_lan][str_lan_obj][item.name] = file[str_lan][str_lan_obj][item.name]
# Initialize the object part of the dict
h5_dict[str_obj] = dict()
for item in [list1 for list1 in object_struct if list1.__class__.__name__ == "ObjectTop"]:
h5_dict[str_obj][item.name] = file[str_obj][item.name]
# Initialize the dict for the different objects
h5_dict[str_obj][str_obj_obj] = dict()
for item in [list1 for list1 in object_struct if list1.__class__.__name__ == "ObjectSObject"]:
h5_dict[str_obj][str_obj_obj][item.name] = file[str_obj][str_obj_obj][item.name]
# Initialize the positioning part of the dict
h5_dict[str_pos] = dict()
for item in [list1 for list1 in pos_struct if list1.__class__.__name__ == "PosTop"]:
h5_dict[str_pos][item.name] = file[str_pos][item.name]
# Print some information about the file
print("The file is in version {} of the L3Pilot Common Data Format and was created by {}, {} at {}"
.format(float(file.attrs["metaData"]["General"]["FormatVersion"]), file.attrs["author"],
file.attrs["institution"], file.attrs["creation_date"]))
# Print some information on a signal in the egoVehicle dataset
print("The description of the UTCTime is : \"{}\" and its unit is \"{}\""
.format(file["egoVehicle"].attrs["UTCTime"][0][1], file["egoVehicle"].attrs["UTCTime"][1][1]))
# Print the number of timestamps contained in the file
print("The loaded h5 file contains {} timestamps".format(len(h5_dict["egoVehicle"]["UTCTime"])))
|
from .gat import GAT
from .sp_gat import SpGAT
|
from pprint import pprint
from bubbl2struct import b2s
# Load the simple diagram from the diagras folder
converter = b2s('diagrams/simple_diagram.html')
# Load as JSON
json = converter.as_json()
print 'Converted to json format: '
pprint(json)
# Load as adjacency
adj, nodes, edges = converter.as_adj()
print 'Converted to adjacency matrix: '
pprint(str(adj))
print 'Node information: '
for (node, id) in zip(nodes, range(0, len(nodes))):
print str(id) + ': ' + node
print 'Edge information: '
pprint(edges)
print str(adj.shape)
|
"""Python script to create GnuPlot plots."""
from subprocess import Popen, PIPE
# A few constants shared across all plots
FONTSIZE = "15"
TERMINAL = "postscript eps color solid"
LINEWIDTH = "3"
OUTPUT_PREFIX = "output/"
# The following is the template all GnuPlot script files share. It contains
# placeholders for a number of values:
# 0. Terminal type (filled from variable TERMINAL)
# 1. Output file
# 2. Title
# 3. Font size for all fonts (filled from FONTSIZE)
# 4. Label for X-Axis
# 5. Label for Y-Axis
# The template will be dynamically extended with the plot commands and other
# options (like logscale etc) required to create the desired plots.
TEMPLATE = """#!/usr/bin/gnuplot
set terminal {0}
set output '{1}'
set title "{2}" font ",{3}"
set xlabel "{4}" font ",{3}"
set ylabel "{5}" font ",{3}"
set xtics font ",{3}"
set ytics font ",{3}"
set key font ",{3}" spacing 1.5
"""
TARGET = [
{ # Graph to display the distribution of share steps
"output": "impl-simu-share-time.eps",
"xlabel": "Step",
"ylabel": "Fraction of Users",
"title": "Share Distribution over Time",
"options":
[
"set xrange[0:10]"
],
"plot":
[
{
"input": "share-step-distribution.csv",
"x": "1",
"y": "2",
"title": "p(n)",
"type": "lines",
"options":
[
"lw " + LINEWIDTH,
]
}
]
}, # End Share Step Distribution Graph
{ # Graph to display degree distribution of a scale-free network
"output": "impl-simu-scale-free.eps",
"xlabel": "Degree",
"ylabel": "Number of Users",
"title": "Degree Distribution of a Scale-Free Network",
"options":
[
"set logscale xy",
"set xrange[1:1000]"
],
"plot":
[
{
"input": "u100000/user-distribution.csv",
"x": "2",
"y": "3",
"title": "u = 100 000 Users",
"type": "lines",
"filter":
{
"column": "1",
"value": "0"
},
"options":
[
"lw " + LINEWIDTH,
]
}
]
}, # End of degree distribution of a scale-free network plot
{ # Graph to display how the degree distribution evolves over rounds
"output": "impl-simu-user-development.eps",
"xlabel": "Degree",
"ylabel": "Number of Users",
"title": "Median Degree Distribution over Time (100k initial Users)",
"options":
[
"set logscale xy",
"set xrange[1:1000]"
],
"plot":
[
{
"input": "u100000/user-distribution.csv",
"x": "2",
"y": "3",
"title": "Step 0",
"type": "lines",
"filter":
{
"column": "1",
"value": "0"
},
"options":
[
"lw " + LINEWIDTH,
]
},
{
"input": "u100000/user-distribution.csv",
"x": "2",
"y": "3",
"title": "Step 50",
"type": "lines",
"filter":
{
"column": "1",
"value": "50"
},
"options":
[
"lw " + LINEWIDTH,
]
},
{
"input": "u100000/user-distribution.csv",
"x": "2",
"y": "3",
"title": "Step 100",
"type": "lines",
"filter":
{
"column": "1",
"value": "100"
},
"options":
[
"lw " + LINEWIDTH,
]
},
{
"input": "u100000/user-distribution.csv",
"x": "2",
"y": "3",
"title": "Step 150",
"type": "lines",
"filter":
{
"column": "1",
"value": "150"
},
"options":
[
"lw " + LINEWIDTH,
]
},
{
"input": "u100000/user-distribution.csv",
"x": "2",
"y": "3",
"title": "Step 200",
"type": "lines",
"filter":
{
"column": "1",
"value": "200"
},
"options":
[
"lw " + LINEWIDTH,
]
}
]
}, # End of degree distribution over time plot
{ # Graph to display the VICBF scaling
"output": "eval-simu-vicbf-scaling.eps",
"xlabel": "Step",
"ylabel": "Size (MB)",
"title": "Median Size of Serialized VI-CBF over Time (100k initial Users, Orphans only)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u100000/vicbf-scaling-fpr0.01-proto1.csv",
"x": "1",
"y": "($20 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.01 \261 0.0001, Protocol 1)",
"type": "lines",
"options":
[
"ls 1",
]
},
{
"input": "u100000/vicbf-scaling-fpr0.01-proto2.csv",
"x": "1",
"y": "($20 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.01 \261 0.0001, Protocol 2)",
"type": "lines",
"options":
[
"ls 3"
]
},
{
"input": "u100000/vicbf-scaling-fpr0.01-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001, Protocol 1)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u100000/vicbf-scaling-fpr0.01-proto2.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001, Protocol 2)",
"type": "lines",
"options":
[
"ls 4"
]
}
]
}, # End of VICBF scaling graph
{ # Graph to display the VICBF scaling for 1000 users
"output": "eval-simu-vicbf-scaling-u1000.eps",
"xlabel": "Step",
"ylabel": "Size (MB)",
"title": "Median Size of Serialized VI-CBF over Time (1k initial Users, Orphans only)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u1000/vicbf-scaling-fpr0.01-proto1.csv",
"x": "1",
"y": "($20 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.01 \261 0.0001, Protocol 1)",
"type": "lines",
"options":
[
"ls 1",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-proto2.csv",
"x": "1",
"y": "($20 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.01 \261 0.0001, Protocol 2)",
"type": "lines",
"options":
[
"ls 3"
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001, Protocol 1)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-proto2.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001, Protocol 2)",
"type": "lines",
"options":
[
"ls 4"
]
}
]
}, # End of VICBF scaling graph for 1000 users
{ # Graph to display the VICBF scaling depending on FPR, u=100000
"output": "eval-simu-vicbf-scaling-u100000-fpr.eps",
"xlabel": "Step",
"ylabel": "Size (MB)",
"title": "Median Size of Serialized VI-CBF for different FPRs over Time (100k initial Users, Orphans only)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 5 lt 6 lc rgb "green" lw ' + LINEWIDTH,
'set style line 6 lt 1 lc rgb "green" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u100000/vicbf-scaling-fpr0.1-proto1.csv",
"x": "1",
"y": "($20 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 5",
]
},
{
"input": "u100000/vicbf-scaling-fpr0.01-proto1.csv",
"x": "1",
"y": "($20 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 1",
]
},
{
"input": "u100000/vicbf-scaling-fpr0.001-proto1.csv",
"x": "1",
"y": "($20 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 3"
]
},
{
"input": "u100000/vicbf-scaling-fpr0.1-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 6"
]
},
{
"input": "u100000/vicbf-scaling-fpr0.01-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u100000/vicbf-scaling-fpr0.001-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 4"
]
},
]
}, # End of VICBF FPR scaling graph for u=100000
{ # Graph to display the VICBF scaling depending on FPR, u=100000, compressed only
"output": "eval-simu-vicbf-scaling-u100000-fpr-compressed.eps",
"xlabel": "Step",
"ylabel": "Size (MB)",
"title": "Median Compressed VI-CBF Size for different FPRs over Time (u=100k, Orphans only)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 5 lt 6 lc rgb "green" lw ' + LINEWIDTH,
'set style line 6 lt 1 lc rgb "green" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u100000/vicbf-scaling-fpr0.1-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 6"
]
},
{
"input": "u100000/vicbf-scaling-fpr0.01-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u100000/vicbf-scaling-fpr0.001-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 4"
]
},
]
}, # End of VICBF FPR scaling graph for u=100000
{ # Graph to display the VICBF scaling depending on FPR for u=1000
"output": "eval-simu-vicbf-scaling-u1000-fpr.eps",
"xlabel": "Step",
"ylabel": "Size (MB)",
"title": "Median Size of Serialized VI-CBF for different FPRs over Time (1k initial Users, Orphans only)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 5 lt 6 lc rgb "green" lw ' + LINEWIDTH,
'set style line 6 lt 1 lc rgb "green" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u1000/vicbf-scaling-fpr0.1-proto1.csv",
"x": "1",
"y": "($20 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 5",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-proto1.csv",
"x": "1",
"y": "($20 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 1",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.001-proto1.csv",
"x": "1",
"y": "($20 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 3"
]
},
{
"input": "u1000/vicbf-scaling-fpr0.1-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 6"
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.001-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 4"
]
},
]
}, # End of VICBF FPR scaling graph for u=1000
{ # Graph to display the VICBF scaling depending on FPR for u=1000
"output": "eval-simu-vicbf-scaling-u1000-fpr-compressed.eps",
"xlabel": "Step",
"ylabel": "Size (MB)",
"title": "Median Compressed VI-CBF Size for different FPRs over Time (u=1k, Orphans only)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 5 lt 6 lc rgb "green" lw ' + LINEWIDTH,
'set style line 6 lt 1 lc rgb "green" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u1000/vicbf-scaling-fpr0.1-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 6"
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.001-proto1.csv",
"x": "1",
"y": "($21 / 1024 / 1024)",
"title": "Compressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 4"
]
},
]
}, # End of VICBF FPR scaling graph for u=1000, compressed only
{ # Graph to display the VICBF scaling depending on FPR, productive KVs only, u=100000
"output": "eval-simu-vicbf-scaling-u100000-productive.eps",
"xlabel": "Step",
"ylabel": "Size (MB)",
"title": "Median Size of Serialized VI-CBF for different FPRs over Time (100k initial Users, Non-Orphans only)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 5 lt 6 lc rgb "green" lw ' + LINEWIDTH,
'set style line 6 lt 1 lc rgb "green" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u100000/vicbf-scaling-fpr0.1-retronly.csv",
"x": "1",
"y": "($5 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 5",
]
},
{
"input": "u100000/vicbf-scaling-fpr0.01-retronly.csv",
"x": "1",
"y": "($5 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 1",
]
},
{
"input": "u100000/vicbf-scaling-fpr0.001-retronly.csv",
"x": "1",
"y": "($5 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 3"
]
},
{
"input": "u100000/vicbf-scaling-fpr0.1-retronly.csv",
"x": "1",
"y": "($6 / 1024 / 1024)",
"title": "Compressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 6"
]
},
{
"input": "u100000/vicbf-scaling-fpr0.01-retronly.csv",
"x": "1",
"y": "($6 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u100000/vicbf-scaling-fpr0.001-retronly.csv",
"x": "1",
"y": "($6 / 1024 / 1024)",
"title": "Compressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 4"
]
},
]
}, # End of VICBF FPR scaling graph for u=100000 for productive KVs only
{ # Graph to display the VICBF scaling depending on FPR, productive KVs only, u=1000
"output": "eval-simu-vicbf-scaling-u1000-productive.eps",
"xlabel": "Step",
"ylabel": "Size (MB)",
"title": "Median Size of Serialized VI-CBF for different FPRs over Time (1k initial Users, Non-Orphans only)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 5 lt 6 lc rgb "green" lw ' + LINEWIDTH,
'set style line 6 lt 1 lc rgb "green" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u1000/vicbf-scaling-fpr0.1-retronly.csv",
"x": "1",
"y": "($5 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 5",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-retronly.csv",
"x": "1",
"y": "($5 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 1",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.001-retronly.csv",
"x": "1",
"y": "($5 / 1024 / 1024)",
"title": "Uncompressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 3"
]
},
{
"input": "u1000/vicbf-scaling-fpr0.1-retronly.csv",
"x": "1",
"y": "($6 / 1024 / 1024)",
"title": "Compressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 6"
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-retronly.csv",
"x": "1",
"y": "($6 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.001-retronly.csv",
"x": "1",
"y": "($6 / 1024 / 1024)",
"title": "Compressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 4"
]
},
]
}, # End of VICBF FPR scaling graph for u=100000 for productive KVs only
{ # Graph to display the VICBF scaling depending on FPR, productive KVs only, u=1000, only compressed
"output": "eval-simu-vicbf-scaling-u1000-productive-compressed.eps",
"xlabel": "Step",
"ylabel": "Size (MB)",
"title": "Median Compressed VI-CBF Size for different FPRs over Time (u=1k, Non-Orphans only)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 5 lt 6 lc rgb "green" lw ' + LINEWIDTH,
'set style line 6 lt 1 lc rgb "green" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u1000/vicbf-scaling-fpr0.1-retronly.csv",
"x": "1",
"y": "($6 / 1024 / 1024)",
"title": "Compressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 6"
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-retronly.csv",
"x": "1",
"y": "($6 / 1024 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.001-retronly.csv",
"x": "1",
"y": "($6 / 1024 / 1024)",
"title": "Compressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 4"
]
},
]
}, # End of VICBF FPR scaling graph for u=100000 for productive KVs only
{ # Graph to display the VICBF scaling depending on FPR, productive KVs only, u=1000, static network
"output": "eval-simu-vicbf-scaling-u1000-productive-static.eps",
"xlabel": "Step",
"ylabel": "Size (KB)",
"title": "Median Size of Serialized VI-CBF for different FPRs over Time (1k initial Users, static network)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 5 lt 6 lc rgb "green" lw ' + LINEWIDTH,
'set style line 6 lt 1 lc rgb "green" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u1000/vicbf-scaling-fpr0.1-retronly-static.csv",
"x": "1",
"y": "($5 / 1024)",
"title": "Uncompressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 5",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-retronly-static.csv",
"x": "1",
"y": "($5 / 1024)",
"title": "Uncompressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 1",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.001-retronly-static.csv",
"x": "1",
"y": "($5 / 1024)",
"title": "Uncompressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 3"
]
},
{
"input": "u1000/vicbf-scaling-fpr0.1-retronly-static.csv",
"x": "1",
"y": "($6 / 1024)",
"title": "Compressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 6"
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-retronly-static.csv",
"x": "1",
"y": "($6 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.001-retronly-static.csv",
"x": "1",
"y": "($6 / 1024)",
"title": "Compressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 4"
]
},
]
}, # End of VICBF FPR scaling graph for u=100000 for productive KVs only, static network
{ # Graph to display the VICBF scaling depending on FPR, productive KVs only, u=1000, static network, compressed only
"output": "eval-simu-vicbf-scaling-u1000-productive-static-compressed.eps",
"xlabel": "Step",
"ylabel": "Size (KB)",
"title": "Median Compressed VI-CBF Size for different FPRs over Time (u=1k, static network)",
"options":
[
"set encoding iso_8859_1",
"set termoption dash",
"set key top left",
"set for [i=1:5] linetype i lt i",
'set style line 1 lt 6 lc rgb "red" lw ' + LINEWIDTH,
'set style line 2 lt 1 lc rgb "red" lw ' + LINEWIDTH,
'set style line 3 lt 6 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 4 lt 1 lc rgb "blue" lw ' + LINEWIDTH,
'set style line 5 lt 6 lc rgb "green" lw ' + LINEWIDTH,
'set style line 6 lt 1 lc rgb "green" lw ' + LINEWIDTH,
],
"plot":
[
{
"input": "u1000/vicbf-scaling-fpr0.1-retronly-static.csv",
"x": "1",
"y": "($6 / 1024)",
"title": "Compressed (FPR=0.1 \261 0.001)",
"type": "lines",
"options":
[
"ls 6"
]
},
{
"input": "u1000/vicbf-scaling-fpr0.01-retronly-static.csv",
"x": "1",
"y": "($6 / 1024)",
"title": "Compressed (FPR=0.01 \261 0.0001)",
"type": "lines",
"options":
[
"ls 2",
]
},
{
"input": "u1000/vicbf-scaling-fpr0.001-retronly-static.csv",
"x": "1",
"y": "($6 / 1024)",
"title": "Compressed (FPR=0.001 \261 0.0001)",
"type": "lines",
"options":
[
"ls 4"
]
},
]
}, # End of VICBF FPR scaling graph for u=100000 for productive KVs only, static network
{ # Graph to display the median number of Type I and II orphaned records over time
"output": "eval-simu-orphaned-records.eps",
"xlabel": "Step",
"ylabel": "Number of Pairs",
"title": "Median Number of Orphaned Key-Value-Pairs over Time (100k initial Users)",
"options":
[
"set encoding iso_8859_1",
"set key top left",
"set format y '%.0s %c'"
],
"plot":
[
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "22",
"title": "Type I",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "($32 - $22)",
"title": "Type II",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "32",
"title": "Type I + II",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
]
}, # End of orphaned records over time graph
{ # Type I + II Orphan boxplot
"output": "eval-simu-orphaned-records-1+2-boxplot.eps",
"xlabel": "Step",
"ylabel": "Number of Pairs",
"title": "Number of Orphaned Key-Value-Pairs over Time (100k initial Users)",
"options":
[
"set encoding iso_8859_1",
"set xrange[0:205]",
"set key top left",
"set format y '%.0s %c'"
],
"plot":
[
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "33:35:36:34:52",
"title": "Type I + II",
"type": "candlesticks",
"options":
[
"lw " + LINEWIDTH,
],
"options_post":
[
"whiskerbars"
]
},
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "32:32:32:32:52",
"title": None,
"type": "candlesticks",
"options":
[
"lc rgb '#000000'",
"lt -1",
"notitle"
]
},
]
}, # End of boxplot of Type I + II records
{ # Type I + II Orphan boxplot
"output": "eval-simu-productive-records-boxplot.eps",
"xlabel": "Step",
"ylabel": "Number of Pairs",
"title": "Number of Non-Orphaned Key-Value-Pairs over Time (100k initial Users)",
"options":
[
"set encoding iso_8859_1",
"set xrange[0:205]",
"set format y '%.0s %c'"
],
"plot":
[
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "38:40:41:39:52",
"title": "Quartiles / Min / Max",
"type": "candlesticks",
"options":
[
"lw " + LINEWIDTH,
],
"options_post":
[
"whiskerbars"
]
},
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "37:37:37:37:52",
"title": None,
"type": "candlesticks",
"options":
[
"lc rgb '#000000'",
"lt -1",
"notitle"
]
},
]
}, # End of boxplot of Type I + II records
{ # Type I + II Orphan boxplot
"output": "eval-simu-productive-records-static-boxplot.eps",
"xlabel": "Step",
"ylabel": "Number of Pairs",
"title": "Number of Non-Orphaned Key-Value-Pairs over Time (1k Users, Static Network)",
"options":
[
"set encoding iso_8859_1",
"set xrange[0:205]",
"set format y '%.0s %c'"
],
"plot":
[
{
"input": "u1000/simulation-rounds-static.csv",
"x": "1",
"y": "38:40:41:39:52",
"title": "Quartiles / Min / Max",
"type": "candlesticks",
"options":
[
"lw " + LINEWIDTH,
],
"options_post":
[
"whiskerbars"
]
},
{
"input": "u1000/simulation-rounds-static.csv",
"x": "1",
"y": "37:37:37:37:52",
"title": None,
"type": "candlesticks",
"options":
[
"lc rgb '#000000'",
"lt -1",
"notitle"
]
},
]
}, # End of boxplot of Type I + II records
{ # Graph to display the number of Type II orphans
"output": "eval-simu-orphans-type2.eps",
"xlabel": "Step",
"ylabel": "Number of Pairs",
"title": "Number of Type II Orphans over Time (100k initial Users)",
"options":
[
"set encoding iso_8859_1",
"set xrange[0:205]",
"set format y '%.0s %c'"
],
"plot":
[
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "($33 - $23):($35 - $25):($36 - $26):($34 - $24):52",
"title": "Quartiles / Min / Max",
"type": "candlesticks",
"options":
[
"lw " + LINEWIDTH,
],
"options_post":
[
"whiskerbars"
]
},
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "($32 - $22):($32 - $22):($32 - $22):($32 - $22):52",
"title": None,
"type": "candlesticks",
"options":
[
"lc rgb '#000000'",
"lt -1",
"notitle"
]
}
]
}, # End of Type II Orphans
{ # Graph to display the number of Type I orphans
"output": "eval-simu-orphans-type1.eps",
"xlabel": "Step",
"ylabel": "Number of Pairs",
"title": "Number of Type I Orphans over Time (100k initial Users)",
"options":
[
"set encoding iso_8859_1",
"set xrange[0:205]",
"set format y '%.0s %c'"
],
"plot":
[
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "23:25:26:24:52",
"title": "Quartiles / Min / Max",
"type": "candlesticks",
"options":
[
"lw " + LINEWIDTH,
],
"options_post":
[
"whiskerbars"
]
},
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "22:22:22:22:52",
"title": None,
"type": "candlesticks",
"options":
[
"lc rgb '#000000'",
"lt -1",
"notitle"
]
}
]
},
{ # Graph to display difference between median orphaned records
"output": "eval-simu-orphaned-records-compare.eps",
"xlabel": "Step",
"ylabel": "Number of Pairs",
"title": "Median Number of Orphaned Key-Value-Pairs over Time (Comparison, 100k initial Users)",
"options":
[
"set encoding iso_8859_1",
"set xrange[0:205]",
"set key top left",
"set format y '%.0s %c'"
],
"plot":
[
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "32",
"title": "Median (Protocol 1)",
"type": "lines",
"options":
[
"lw " + LINEWIDTH,
]
},
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "($32 - $22)",
"title": "Median (Protocol 2)",
"type": "lines",
"options":
[
"lw " + LINEWIDTH,
]
}
]
}, # End of graph to display difference between median orphaned records
{ # Graph to display the development of the user count over time for 100k initial
"output": "impl-simu-user-count-development.eps",
"xlabel": "Step",
"ylabel": "Number of Users",
"title": "Median Number of Active Users over Time (100k initial Users)",
"options":
[
"set encoding iso_8859_1",
"set key top left",
"set format y '%.0s %c'"
],
"plot":
[
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "2",
"title": "Active and Inactive Users",
"type": "lines",
"options":
[
"lw " + LINEWIDTH,
]
},
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "7",
"title": "Active Users",
"type": "lines",
"options":
[
"lw " + LINEWIDTH,
]
},
{
"input": "u100000/simulation-rounds.csv",
"x": "1",
"y": "12",
"title": "Inactive Users",
"type": "lines",
"options":
[
"lw " + LINEWIDTH,
]
}
]
}, # End of graph to display development of user count over time for 100k initial
{ # Graph to display the development of the user count over time for 1k initial
"output": "impl-simu-user-count-development-u1000.eps",
"xlabel": "Step",
"ylabel": "Number of Users",
"title": "Median Number of Active Users over Time (1k initial Users)",
"options":
[
"set encoding iso_8859_1",
"set key top left",
"set format y '%.0s %c'"
],
"plot":
[
{
"input": "u1000/simulation-rounds.csv",
"x": "1",
"y": "2",
"title": "Active and Inactive Users",
"type": "lines",
"options":
[
"lw " + LINEWIDTH,
]
},
{
"input": "u1000/simulation-rounds.csv",
"x": "1",
"y": "7",
"title": "Active Users",
"type": "lines",
"options":
[
"lw " + LINEWIDTH,
]
},
{
"input": "u1000/simulation-rounds.csv",
"x": "1",
"y": "12",
"title": "Inactive Users",
"type": "lines",
"options":
[
"lw " + LINEWIDTH,
]
}
]
}, # End of graph to display development of user count over time
{ # Graph to display the median number of Type I and II orphaned records over time (u1000)
"output": "eval-simu-orphaned-records-u1000.eps",
"xlabel": "Step",
"ylabel": "Number of Pairs",
"title": "Median Number of Orphaned Key-Value-Pairs over Time (1k initial Users)",
"options":
[
"set encoding iso_8859_1",
"set key top left",
"set format y '%.0s %c'"
],
"plot":
[
{
"input": "u1000/simulation-rounds.csv",
"x": "1",
"y": "22",
"title": "Type I",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "u1000/simulation-rounds.csv",
"x": "1",
"y": "($32 - $22)",
"title": "Type II",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "u1000/simulation-rounds.csv",
"x": "1",
"y": "32",
"title": "Type I + II",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
]
}, # End of orphaned records over time graph (u1000)
{ # Graph to display the median performance of VICBF inserts
"output": "eval-comp-vicbf-inserts.eps",
"xlabel": "Slots",
"ylabel": "ms",
"title": "Median Performance of VI-CBF Inserts",
"options":
[
"set encoding iso_8859_1",
"set format x '%.0s %c'",
"set key bottom left"
],
"plot":
[
{
"input": "< awk '$1 ~ /^insert$/ && $2 ~ /^2$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=2",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^insert$/ && $2 ~ /^3$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=3",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^insert$/ && $2 ~ /^4$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=4",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^insert$/ && $2 ~ /^5$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=5",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
]
}, # End of median VICBF insert performance
{ # Graph to display the median performance of true positive VICBF queries
"output": "eval-comp-vicbf-queryp.eps",
"xlabel": "Slots",
"ylabel": "ms",
"title": "Median Performance of VI-CBF Queries (TP)",
"options":
[
"set encoding iso_8859_1",
"set format x '%.0s %c'",
"set key bottom left"
],
"plot":
[
{
"input": "< awk '$1 ~ /^queryp$/ && $2 ~ /^2$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=2",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^queryp$/ && $2 ~ /^3$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=3",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^queryp$/ && $2 ~ /^4$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=4",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^queryp$/ && $2 ~ /^5$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=5",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
]
}, # End of true positive VICBF query performance
{ # Graph to display the median performance of true negative / false positive VICBF queries
"output": "eval-comp-vicbf-queryn.eps",
"xlabel": "Slots",
"ylabel": "ms",
"title": "Median Performance of VI-CBF Queries (TN / FP)",
"options":
[
"set encoding iso_8859_1",
"set format x '%.0s %c'"
],
"plot":
[
{
"input": "< awk '$1 ~ /^queryn$/ && $2 ~ /^2$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=2",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^queryn$/ && $2 ~ /^3$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=3",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^queryn$/ && $2 ~ /^4$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=4",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^queryn$/ && $2 ~ /^5$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4 / 1000000)",
"title": "k=5",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
]
}, # End of TN / FP VICBF Query performance
{ # Graph to display the median performance of the VICBF serialization
"output": "eval-comp-vicbf-serial.eps",
"xlabel": "Slots",
"ylabel": "ms",
"title": "Median Performance of VI-CBF Serialization (10k entries)",
"options":
[
"set encoding iso_8859_1",
"set format x '%.0s %c'",
"set key top left"
],
"plot":
[
{
"input": "< awk '$1 ~ /^serialize$/ && $2 ~ /^2$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4)",
"title": "k=2",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^serialize$/ && $2 ~ /^3$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4)",
"title": "k=3",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^serialize$/ && $2 ~ /^4$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4)",
"title": "k=4",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^serialize$/ && $2 ~ /^5$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($4)",
"title": "k=5",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
]
}, # End of TN / FP VICBF serialization performance
{ # Graph to display the median processing time increase when adding compression to serialization
"output": "eval-comp-vicbf-serial-comp-increase.eps",
"xlabel": "Slots",
"ylabel": "ms",
"title": "Median Compression Time for Serialized Data (10k entries)",
"options":
[
"set encoding iso_8859_1",
"set format x '%.0s %c'",
"set key top left"
],
"plot":
[
{
"input": "< awk '$1 ~ /^serialize$/ && $2 ~ /^2$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($9 - $4)",
"title": "k=2",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^serialize$/ && $2 ~ /^3$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($9 - $4)",
"title": "k=3",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^serialize$/ && $2 ~ /^4$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($9 - $4)",
"title": "k=4",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "< awk '$1 ~ /^serialize$/ && $2 ~ /^5$/' benchmark/vicbf-benchmark.txt",
"x": "3",
"y": "($9 - $4)",
"title": "k=5",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
]
}, # End of TN / FP VICBF serialization performance
{ # Graph to display the VICBF serialization size with different strategies
"output": "app-vicbfser-smart.eps",
"xlabel": "Number of entries",
"ylabel": "Size (bytes)",
"title": "Size of Serialized VI-CBF with Smart Serialization Strategy",
"options":
[
"set encoding iso_8859_1",
"set key bottom right"
],
"plot":
[
{
"input": "serialization/vicbf-serialization-size-smart.txt",
"x": "1",
"y": "2",
"title": "Uncompressed",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "serialization/vicbf-serialization-size-smart.txt",
"x": "1",
"y": "3",
"title": "Compressed",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
]
}, # End of VICBF serialization size with smart strategy
{ # Graph to display the VICBF serialization size with full strategy
"output": "app-vicbfser-full.eps",
"xlabel": "Number of entries",
"ylabel": "Size (bytes)",
"title": "Size of Serialized VI-CBF with Full Serialization Strategy",
"options":
[
"set encoding iso_8859_1",
"set key bottom right"
],
"plot":
[
{
"input": "serialization/vicbf-serialization-size-full.txt",
"x": "1",
"y": "2",
"title": "Uncompressed",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
{
"input": "serialization/vicbf-serialization-size-full.txt",
"x": "1",
"y": "3",
"title": "Compressed",
"type": "lines",
"options":
[
"lw " + LINEWIDTH
]
},
]
}, # End of VICBF serialization size with full strategy
]
for target in TARGET:
print "Generate:", target["title"], "=>", target["output"]
# Fill in the template
output = TEMPLATE.format(TERMINAL,
OUTPUT_PREFIX + target["output"],
target["title"],
FONTSIZE,
target["xlabel"],
target["ylabel"])
# Add additional options like logscale
if target["options"] is not None:
for option in target["options"]:
output += option + "\n"
# Add plot commands
plotcmd = "plot "
for plot in target["plot"]:
target = ""
if "filter" in plot.keys():
target = "< awk '${0} ~ /^{1}$/' {2}".format(plot["filter"]["column"],
plot["filter"]["value"],
plot["input"])
else:
target = plot["input"]
plotcmd += '"' + target + '" u ' + plot["x"] + ":" + plot["y"] + ' '
plotcmd += "w " + plot["type"] + " "
if plot["options"] is not None:
for option in plot["options"]:
plotcmd += option + " "
if plot["title"] is not None:
plotcmd += "t '" + plot["title"] + "' "
if "options_post" in plot.keys():
for option in plot["options_post"]:
plotcmd += option + " "
plotcmd += ", "
# Merge plot commands into output
output += plotcmd[:-2] + "\n"
# Execute command to generate Gnuplot
pobj = Popen(['/usr/bin/gnuplot'], stdin=PIPE)
pobj.communicate(input=output)
pobj.wait()
|
#!/usr/bin/env python
# Copyright (c) 2019, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Utility script to make it easier to update what golem builds.
import gradle
import sys
import utils
import os
import retrace_benchmark
BUILD_TARGETS = ['downloadDeps', 'downloadAndroidCts', 'downloadDx']
def Main():
gradle.RunGradle(BUILD_TARGETS)
utils.DownloadFromX20(
os.path.join(utils.THIRD_PARTY, 'gradle-plugin') + '.tar.gz.sha1')
utils.DownloadFromX20(
os.path.join(
utils.THIRD_PARTY, 'benchmarks', 'android-sdk') + '.tar.gz.sha1')
utils.DownloadFromX20(
os.path.join(utils.THIRD_PARTY, 'remapper') + '.tar.gz.sha1')
utils.DownloadFromGoogleCloudStorage(utils.SAMPLE_LIBRARIES_SHA_FILE)
utils.DownloadFromGoogleCloudStorage(utils.ANDROID_SDK + '.tar.gz.sha1',
bucket='r8-deps-internal',
auth=True)
retrace_benchmark.download_benchmarks()
if __name__ == '__main__':
sys.exit(Main())
|
import numpy as np
import pandas as pd
import copy
class Correlationer:
def __init__(self, method="pearson", critical=0.7, movingAverageWindow=2, movingWeightMax=2):
self._method = method
self._critical = critical
self._movingAverageWindow = movingAverageWindow
self._movingWeightMax = movingWeightMax
self._corrListPositive = None
self._corrListNegative = None
def fit(self, dataframe, targetColumns, combine=False, removeCombineColumn=False, removeSubCombineColumn=True, removeOriginColumn=False):
self._targetColumns = targetColumns
self._removeCombineColumn = removeCombineColumn
self._removeSubCombineColumn = removeSubCombineColumn
self._removeOriginColumn = removeOriginColumn
corr = dataframe[targetColumns].corr(method=self._method).to_numpy()
corrColumB, corrRowB = np.where((corr > self._critical) & (corr < 1))
corrColumS, corrRowS = np.where((corr < -self._critical) & (corr > -1))
self._corrListPositive = []
self._corrListNegative = []
self._combinedColumns = set()
for position in zip(corrColumB, corrRowB):
if position[0] < position[1]:
name1 = targetColumns[position[0]]
name2 = targetColumns[position[1]]
self._corrListPositive.append([name1, name2])
self._combinedColumns.add(name1)
self._combinedColumns.add(name2)
for position in zip(corrColumS, corrRowS):
if position[0] < position[1]:
name1 = targetColumns[position[0]]
name2 = targetColumns[position[1]]
self._corrListNegative.append([name1, name2])
self._combinedColumns.add(name1)
self._combinedColumns.add(name2)
self._combinedColumns = list(self._combinedColumns)
if combine:
self._corrListPositive = self._combine(self._corrListPositive)
self._corrListNegative = self._combine(self._corrListNegative)
return self._corrListPositive, self._corrListNegative
def generate(self, dataframe):
self._generatedColumns = []
for item in self._corrListPositive:
vals = []
for name in item:
vals.append(dataframe[name].astype(float).values)
colName = "_".join(item)
dataframe[colName] = self._all_diff(vals)
self._generatedColumns.append(colName)
for item in self._corrListNegative:
vals = []
for name in item:
vals.append(dataframe[name].astype(float).values)
colName = "_".join(item)
dataframe[colName] = self._all_diff(vals)
self._generatedColumns.append(colName)
if self._removeOriginColumn:
dataframe.drop(self._targetColumns, axis=1, inplace=True)
if self._removeCombineColumn:
dataframe.drop(self._combinedColumns, axis=1, inplace=True)
def fit_generate(self, dataframe, targetColumns, combine=False, removeCombineColumn=False, removeSubCombineColumn=True, removeOriginColumn=False):
self.fit(dataframe, targetColumns, combine, removeCombineColumn, removeSubCombineColumn, removeOriginColumn)
self.generate(dataframe)
def getColumnsTarget(self):
return self._targetColumns
def getColumnsGenerated(self):
return self._generatedColumns
def getColumns(self):
result = set(self._targetColumns.tolist() + self._generatedColumns)
if self._removeOriginColumn:
result -= set(self._targetColumns)
if self._removeCombineColumn:
result -= set(self._combinedColumns)
return list(result)
def _combine(self, corrList):
res = []
if len(corrList) == 0:
return res
for item in corrList:
isNew = True
item = copy.copy(item)
for x in res:
if item[0] == x[1]:
x_copy = copy.deepcopy(x)
res.append(x_copy)
x[0].append(item[1])
x[1] = item[1]
isNew = False
break
if isNew:
res.append([item, item[1]])
elif self._removeSubCombineColumn == False:
res.append([item, item[1]])
return sorted(np.array(res)[:,0].tolist())
def _all_diff(self, vals):
my = vals[0]
other = vals[1:]
diff = 0
if len(other) > 1:
diff += self._all_diff(other)
myMA = self._moving_average(my, self._movingAverageWindow)
for item in other:
diff += self._moving_average(item, self._movingAverageWindow) - myMA
return diff
def transMovingDiffAverage(self, dataframe, targetColumns, windows=3):
for colum in targetColumns:
dataframe[colum] = self._moving_average(dataframe[colum].values, windows)
def _moving_average(self, vals, windows=5):
# 가중이동평균을 위한 가중값 생성
weights = np.ones(windows)
weightMax = windows if windows < self._movingWeightMax else self._movingWeightMax
for idx in range(weightMax):
weights[idx] = weightMax-idx
# 가중 이동평균 계산
stepVals = []
stepVals.append(vals)
temp = vals
for idx in range(windows):
temp = np.insert(temp[:-1], 0, temp[0])
stepVals.append(temp)
result = np.zeros([len(vals)])
for idx, weight in zip(range(windows), weights):
result += (stepVals[idx]*weight) - (stepVals[idx+1]*(1 if weight == 1 else weight-1))
return result/weights.sum()
def getCorrelationList(self):
return self._corrListPositive, self._corrListNegative
def __repr__(self):
return f'{self._method}[critical: {self._critical}, positive: {self._corrListPositive}, negative: {self._corrListNegative}]'
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findLeaves(self, root: TreeNode) -> List[List[int]]:
heights = {}
self.getHeight(root, heights)
heights = {key:val for key,val in sorted(heights.items(), key = lambda x: x[0])}
allLeaves = []
for height in heights:
allLeaves.append(heights[height])
return allLeaves
def getHeight(self, root, heights) -> int:
if not root:
return -1
leftHeight = self.getHeight(root.left, heights)
rightHeight = self.getHeight(root.right, heights)
currHeight = max(leftHeight, rightHeight) + 1
if currHeight in heights:
heights[currHeight].append(root.val)
else:
heights[currHeight] = [root.val]
return currHeight
# def findLeaves(self, root: TreeNode) -> List[List[int]]:
# all_leaves = []
# while True:
# leaves = []
# if not self.deleteLeaves(root, None, leaves):
# all_leaves.append(leaves)
# break
# all_leaves.append(leaves)
# return all_leaves
# def deleteLeaves(self, root, parent, leaves):
# if not root.left and not root.right:
# # this is a leaf
# leaves.append(root.val)
# if parent and parent.right == root:
# parent.right = None
# elif parent and parent.left == root:
# parent.left = None
# else:
# return False
# else:
# # has at least one child
# if root.left:
# self.deleteLeaves(root.left, root, leaves)
# if root.right:
# self.deleteLeaves(root.right, root, leaves)
# return True
|
import tensorflow as tf
from keras import optimizers
from keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Activation, BatchNormalization
from keras.models import Sequential
from keras_layers import ConvGHD, FCGHD, CustomRelu
def categorical_crossentropy(y_true, y_pred):
return tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_true,
logits=y_pred)
def ghd_mnist_model(learning_rate, double_threshold, per_pixel, alpha, relu=False):
with tf.variable_scope('ghn'):
model = Sequential()
model.add(ConvGHD(filters=16,
kernel_size=[5, 5],
double_threshold=double_threshold,
per_pixel=per_pixel,
alpha=alpha,
input_shape=(28, 28, 1),
name='conv1'))
if relu:
model.add(CustomRelu())
model.add(MaxPooling2D(pool_size=[2, 2],
strides=[2, 2]))
model.add(ConvGHD(filters=64,
kernel_size=[5, 5],
double_threshold=double_threshold,
per_pixel=per_pixel,
alpha=alpha,
name='conv2'))
if relu:
model.add(CustomRelu())
model.add(MaxPooling2D(pool_size=[2, 2],
strides=[2, 2]))
model.add(Flatten())
model.add(FCGHD(units=1024,
double_threshold=double_threshold,
per_pixel=per_pixel,
alpha=alpha,
name='fc3'))
if relu:
model.add(CustomRelu())
model.add(Dropout(0.5))
model.add(FCGHD(units=10,
double_threshold=double_threshold,
per_pixel=per_pixel,
alpha=alpha,
name='fc4'))
model.compile(optimizer=optimizers.Adam(learning_rate),
loss=categorical_crossentropy,
metrics=['accuracy'])
return model
def naive_mnist_model(learning_rate):
with tf.variable_scope('naive'):
model = Sequential()
model.add(Conv2D(filters=16,
kernel_size=[5, 5],
input_shape=(28, 28, 1),
name='conv1'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=[2, 2],
strides=[2, 2]))
model.add(Conv2D(filters=64,
kernel_size=[5, 5],
name='conv2'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=[2, 2],
strides=[2, 2]))
model.add(Flatten())
model.add(Dense(units=1024,
name='fc3'))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(units=10,
name='fc4'))
model.compile(optimizer=optimizers.Adam(learning_rate),
loss=categorical_crossentropy,
metrics=['accuracy'])
return model
def bn_mnist_model(learning_rate):
with tf.variable_scope('bn'):
model = Sequential()
model.add(Conv2D(filters=16,
kernel_size=[5, 5],
input_shape=(28, 28, 1),
name='conv1'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=[2, 2],
strides=[2, 2]))
model.add(Conv2D(filters=64,
kernel_size=[5, 5],
name='conv2'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=[2, 2],
strides=[2, 2]))
model.add(Flatten())
model.add(Dense(units=1024,
name='fc3'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(units=10,
name='fc4'))
model.add(BatchNormalization())
model.compile(optimizer=optimizers.Adam(learning_rate),
loss=categorical_crossentropy,
metrics=['accuracy'])
return model
|
"""Internal Accretion constants."""
ARTIFACTS_PREFIX = "accretion/artifacts/"
ARTIFACT_MANIFESTS_PREFIX = "accretion/manifests/"
SOURCE_PREFIX = "accretion/source/"
LAYER_MANIFESTS_PREFIX = "accretion/layers/"
|
from django.shortcuts import render, get_object_or_404
from django.urls import reverse_lazy
from django.views.generic import ListView, DetailView
from django.views.generic.edit import UpdateView, CreateView, DeleteView
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.conf import settings
from django.core.mail import send_mail
from django.db.models import Count
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.postgres.search import (
SearchVector,
SearchQuery,
SearchRank,
TrigramSimilarity
)
from taggit.models import Tag
from .models import Post
from .forms import (
EmailPostForm,
CommentForm,
SearchForm
)
def post_list_view(request, *args, **kwargs):
posts = Post.published.all()
tag_slug = kwargs.get('slug')
tag = None
tag_list = None
if tag_slug:
tag_slug = kwargs.get('slug').split("-")
tag = list(Tag.objects.filter(slug__in=tag_slug))
tag_list = list(Tag.objects.values('id').filter(slug__in=tag_slug))
posts = posts.filter(tags__in=[t['id'] for t in tag_list]).distinct()
if request.GET.get('search'):
request_search = request.GET.get('search')
posts = posts.filter(body__search=request_search)
paginator = Paginator(posts, settings.PAGINATION_SIZE)
requested_page_number = request.GET.get('page')
try:
posts = paginator.get_page(requested_page_number)
except PageNotAnInteger:
posts = paginator.get_page(1)
except EmptyPage:
posts = paginator.get_page(paginator.num_pages)
return render(request, 'blogs/posts/list.html', {
"posts": posts,
"tag": tag
})
def post_details_view(request, *args, **kwargs):
slug = kwargs.get('slug')
year = kwargs.get('year')
month = kwargs.get('month')
day = kwargs.get('day')
post = get_object_or_404(Post,
publish__year=year,
publish__month=month,
publish__day=day,
slug=slug,
status='published')
comments = post.comments.filter(active=True)
comments_paginator = Paginator(comments, 2)
requested_page_number = request.GET.get('page')
try:
comments = comments_paginator.get_page(requested_page_number)
except PageNotAnInteger:
comments = comments_paginator.get_page(1)
except EmptyPage:
comments = comments_paginator.get_page(comments_paginator.num_pages)
new_comment = None
if (request.method == 'POST'):
comment_form = CommentForm(data=request.POST)
if comment_form.is_valid():
new_comment = comment_form.save(commit=False)
new_comment.post = post
new_comment.save()
else:
comment_form = CommentForm()
post_tags_ids = post.tags.values_list('id', flat=True)
similar_posts = Post.published.filter(tags__in=post_tags_ids)
similar_posts = similar_posts.annotate(
same_tags=Count('tags')
).order_by(
'-same_tags',
'-publish'
)[:4]
return render(request, 'blogs/posts/details.html', {
"post": post,
"comments": comments,
"new_comment": new_comment,
"comment_form": comment_form,
"similar_posts": similar_posts
})
class PostListView(ListView):
model = Post
context_object_name = 'posts'
paginate_by = settings.PAGINATION_SIZE
template_name = 'blogs/posts/list.html'
def get_queryset(self):
return Post.objects.filter(status="published")
class PostCreateView(LoginRequiredMixin, CreateView):
redirect_url = reverse_lazy('login')
template_name = 'blogs/posts/create.html'
model = Post
fields = ('title', 'body', 'author', 'status',)
class PostUpdateView(LoginRequiredMixin, UpdateView):
redirect_url = reverse_lazy('login')
template_name = 'blogs/posts/update.html'
model = Post
fields = ('title', 'body', 'author', 'status',)
class PostDeleteView(LoginRequiredMixin, DeleteView):
redirect_url = reverse_lazy('login')
template_name = 'blogs/posts/delete.html'
model = Post
success_url = reverse_lazy('blogs:posts-list')
def post_share(request, *args, **kwargs):
post_id = kwargs.get('post_id')
post = get_object_or_404(Post, id=post_id, status="published")
sent = False
if request.method == "POST":
form = EmailPostForm(request.POST)
if form.is_valid():
cleaned_form_data = form.cleaned_data
post_url = request.build_absolute_uri(post.get_absolute_url())
subject = "{} ({}) recommends you reading {}".format(
cleaned_form_data['name'],
cleaned_form_data['email'],
post.title,
)
message = "Read '{}' at {} \n\n {}\s comments: {}".format(
post.title,
post_url,
cleaned_form_data['name'],
cleaned_form_data['comments'],
)
send_mail(subject, message, 'admin@test.com', [
cleaned_form_data['to']
])
sent = True
else:
form = EmailPostForm()
return render(request, 'blogs/email/share.html', {
"post": post,
"form": form,
"sent": sent
})
def post_search(request, *args, **kwargs):
form = SearchForm()
query = None
results = []
if 'query' in request.GET:
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
# search_vector = SearchVector('title', weight="A") + SearchVector('body', weight="B")
# search_query = SearchQuery(query)
# results = Post.objects.annotate(
# search=SearchVector('title', 'body'),
# ).filter(search=query)
# results = Post.objects.annotate(
# search=SearchVector('title', 'body'),
# rank=SearchRank(SearchVector('title', 'body'), SearchQuery(query))
# ).filter(search=query)
# results = Post.objects.annotate(
# search=search_vector,
# rank=SearchRank(search_vector, search_query)
# ).filter(rank__gte=0.3).order_by('-rank')
results = Post.objects.annotate(
similarity=TrigramSimilarity('title', query)
).filter(similarity__gt=0.3).order_by('-similarity')
print(results)
return render(
request,
'blogs/posts/search.html',
{
"form": form,
"query": query,
"results": results
}
)
|
# -*- coding: utf-8 -*-
# @Time : 2020/7/13 0:38
# @Author : Zhongyi Hua
# @FileName: check.py
# @Usage:
# @Note:
# @E-mail: njbxhzy@hotmail.com
"""
把各个部分的check任务全部拆出来,这样方便反复使用
1. 保守基因情况
2. un normal name
3. 重复区域
4. renumber
5. cds check
"""
import gffutils
import portion as pt
import pandas as pd
from Bio import SeqIO
import os
get_seq = lambda seq, start, end: seq.seq[start - 1:end]
location = os.path.abspath(os.path.join(__file__, '..'))
tb_cds = pd.read_table(os.path.join(location, 'ref/cds.txt'))
tb_rna = pd.read_table(os.path.join(location, 'ref/rna.txt'))
standard_name_list = [record['name'] for record in tb_cds.to_dict('records')]
standard_name_list += [record['name'] for record in tb_rna.to_dict('records')]
def get_record(record_feature, record_type, attributes):
"""
Transform a gff feature of to a dict
:param record_feature: a record feature (Class gffutils.Feature)
:param record_type: gene/CDS/tRNA/rRNA...
:param attributes: a list of character that positioned in gff3 column9
:return: gff_df
"""
feature_record = {'type': record_type,
'start': record_feature.start,
'end': record_feature.end,
'strand': record_feature.strand,
'phase': '.',
'attributes': ";".join(attributes)}
return feature_record
class CheckCp:
standard_name_list = standard_name_list
def __init__(self, gff_path):
self.gff = gffutils.create_db(gff_path, ':memory:', merge_strategy='create_unique')
def check_hs(self):
"""
Check housekeeping gene: matK, rbcL
:return: a message
"""
gene_name_list = []
for gene in self.gff.features_of_type('gene', order_by='start'):
gene_name_list.append(gene.attributes['Name'][0])
if ('matK' not in gene_name_list) and ('matk' not in gene_name_list):
print('matK loss!')
if ('rbcL' not in gene_name_list) and ('rbcl' not in gene_name_list):
print('rbcL loss!')
def check_name(self):
"""
Check unstandard name
:return: messages
"""
gene_name_list = []
for gene in self.gff.features_of_type('gene', order_by='start'):
gene_name_list.append(gene.attributes['Name'][0])
for gene_name in gene_name_list:
if not ((gene_name in CheckCp.standard_name_list) or gene_name.startswith('orf')):
print('check ' + gene_name)
def check_region(self):
"""
check duplicated gene region
:return: messages
"""
region_list = []
locus_list = []
for gene in self.gff.features_of_type('gene', order_by='start'):
region_list.append(pt.closed(gene.start, gene.end))
locus_list.append([gene.attributes['Name'][0]])
for i in range(len(region_list) - 1):
if not region_list[i] < region_list[i + 1]:
print(locus_list[i], region_list[i], ' and ', locus_list[i + 1], region_list[i + 1],
' are duplicated')
print('check duplicated region done')
def renumber(self, seq_id, species_pre):
print('Renumber gene id')
feature_list = []
gene_count = 0
for gene in self.gff.features_of_type('gene', order_by='start'):
gene_count += 1
gene_id = species_pre + '%03d' % gene_count
gene_attributes = ['ID=' + gene_id]
gene_type = gene.attributes['gene_biotype'][0]
gene_attributes += [_[0] + '=' + _[1][0] for _ in gene.attributes.items() if not _[0] == 'ID']
feature_list.append(get_record(gene, 'gene', gene_attributes))
child_count = 0
if gene_type == 'protein_coding':
for cds in self.gff.children(gene, featuretype='CDS', order_by='start'):
child_count += 1
cds_attributes = ['ID=' + 'cds_' + gene_id + '_' + str(child_count),
'Parent=' + gene_id,
'product=' + cds.attributes['product'][0]]
cds_record = get_record(cds, 'CDS', cds_attributes)
cds_record.update({'phase': cds.frame})
feature_list.append(cds_record)
else:
for rna in self.gff.children(gene, featuretype=('tRNA', 'rRNA')):
rna_attributes = ['ID=' + 'rna_' + gene_id + '_1',
'Parent=' + gene_id,
'product=' + rna.attributes['product'][0]]
feature_list.append(get_record(gene, gene_type, rna_attributes))
for exon in self.gff.children(gene, featuretype='exon', order_by='start'):
child_count += 1
exon_attributes = ['ID=' + 'exon_' + gene_id + '_' + str(child_count),
'Parent=' 'rna_' + gene_id + '_1'
]
feature_list.append(get_record(exon, 'exon', exon_attributes))
_result_gff = pd.DataFrame(feature_list)
_result_gff['seqid'] = seq_id
_result_gff['score'] = '.'
_result_gff['source'] = 'GeSeq'
_result_gff = _result_gff[["seqid", "source", "type", "start", "end", "score", "strand", "phase", "attributes"]]
print('Renumber done')
return _result_gff
def check_cds(self, seq_path):
geo_seq = SeqIO.read(seq_path, 'fasta')
print('Auto check start')
for gene in self.gff.features_of_type('gene', order_by='start'):
if gene.attributes['gene_biotype'] == ['protein_coding']:
if gene.attributes['Name'] == ['rps12']:
continue
seq_combined = ""
cds_count = 0
for cds in self.gff.children(gene,
featuretype='CDS',
order_by='start',
reverse=False if gene.strand == '+' else True):
if (cds_count == 0) and (not cds.frame == '0'):
print('check ', gene.id, ' frame')
seq = get_seq(geo_seq, cds.start, cds.end)
if cds.strand == '-':
seq_combined += seq.reverse_complement()
else:
seq_combined += seq
cds_count += 1
if seq_combined == '':
continue
elif seq_combined.__len__() <= 33:
print('The CDS length of', gene.id, 'is less than 33 bp')
try:
seq_combined.translate(table=11, cds=True)
except Exception as e:
print(gene.id)
print(e)
print('Auto check done')
def add_rps12(geseq_gff, new_gff, seq_path, species_pre):
"""
:param geseq_gff: raw geseq gff file
:param new_gff: renumbered gff
:param species_pre:
:return:
"""
if type(new_gff) == str:
new_gff = pd.read_table(new_gff,
comment='#',
names=["seqid", "source", "type", "start", "end", "score", "strand", "phase", "attributes"])
raw_gff = pd.read_table(geseq_gff,
comment='#',
names=["seqid", "source", "type", "start", "end", "score", "strand", "phase", "attributes"])
seq = SeqIO.read(seq_path, 'fasta')
gene_count = sum(new_gff['type'] == 'gene')
# get rps12
features_list = []
gene_features = raw_gff[(raw_gff['attributes'].str.contains('rps12')) & (raw_gff['type'] == 'gene')]
part1 = gene_features[gene_features.duplicated(subset=['start', 'end'])].iloc[0]
part2 = gene_features.drop_duplicates(keep=False)
err_list = []
for idx, part in part2.iterrows():
gene_count += 1
gene_id = species_pre + '%03d' % gene_count
part_attributes = ['ID=' + gene_id,
'Name=rps12',
'gene_biotype=protein_coding'
]
cds1 = raw_gff[(raw_gff['start'] == part.start) & (raw_gff['type'] == 'exon')].iloc[0]
cds2 = raw_gff[(raw_gff['end'] == part.end) & (raw_gff['type'] == 'exon')].iloc[0]
cds1_attributes = ['ID=' + 'cds_' + gene_id + '_1',
'Parent=' + gene_id,
'product=30S ribosomal protein S12']
cds2_attributes = ['ID=' + 'cds_' + gene_id + '_2',
'Parent=' + gene_id,
'product=30S ribosomal protein S12']
cds3_attributes = ['ID=' + 'cds_' + gene_id + '_3',
'Parent=' + gene_id,
'product=30S ribosomal protein S12']
features_list.append(get_record(part1, 'gene', part_attributes + ['part=1/2']))
features_list.append(get_record(part, 'gene', part_attributes + ['part=2/2']))
features_list.append(get_record(part1, 'CDS', cds1_attributes))
features_list.append(get_record(cds1, 'CDS', cds2_attributes))
features_list.append(get_record(cds2, 'CDS', cds3_attributes))
# check translation
seq_part1 = get_seq(seq, part1.start, part1.end)
if part1.strand == '-':
seq_part1 = seq_part1.reverse_complement()
seq_combined = ''
for feature in [cds1, cds2]:
seq_combined += get_seq(seq, feature.start, feature.end)
if cds1.strand == '-':
seq_combined.reverse_complement()
seq_combined = seq_part1 + seq_combined
try:
seq_combined.translate(table=11, cds=True)
except Exception as e:
print(gene_id)
print(e)
err_list.append(gene_id)
rps12_df = pd.DataFrame.from_dict({idx: feature for idx, feature in enumerate(features_list)}, 'index')
rps12_df.loc[rps12_df['type'] == 'CDS', 'phase'] = 0
rps12_df['seqid'] = new_gff.seqid.to_list()[0]
rps12_df['source'] = 'GeSeq'
rps12_df['score'] = '.'
for err_id in err_list:
rps12_df.loc[rps12_df['attributes'].str.startswith('ID='+err_id), 'attributes'] += ';pseudo=true'
new_gff = new_gff.append(rps12_df, sort=False)
return new_gff
def add2_rps12(pga_gb, new_gff, species_pre):
def _get_record(location, fea_type, attributes):
return {'type': fea_type,
'start': location.start+1,
'end': location.end,
'strand': '-' if location.strand == -1 else '+',
'phase': '.',
'attributes': ";".join(attributes)}
if type(new_gff) == str:
new_gff = pd.read_table(new_gff,
comment='#',
names=["seqid", "source", "type", "start", "end", "score", "strand", "phase", "attributes"])
genome = SeqIO.read(pga_gb, 'genbank')
features_list = []
err_list = []
gene_count = sum(new_gff['type'] == 'gene')
rps12_list = [ele for ele in genome.features if ele.type == 'CDS' and ele.qualifiers.get('gene') == ['rps12']]
part1 = [part for part in rps12_list if len(part.location.parts) == 1][0]
part2_list = [part for part in rps12_list if len(part.location.parts) > 1]
for part in part2_list:
gene_count += 1
gene_id = species_pre + '%03d' % gene_count
part_attributes = ['ID=' + gene_id,
'Name=rps12',
'gene_biotype=protein_coding'
]
features_list.append(_get_record(part1.location, 'gene', part_attributes + [
'exception=trans-splicing;part=1/2']))
features_list.append(_get_record(part.location, 'gene', part_attributes + [
'exception=trans-splicing;part=2/2']))
cds_count = 1
cds_attributes = ['ID=' + 'cds_' + gene_id + '_' + str(cds_count),
'Parent=' + gene_id,
'product=30S ribosomal protein S12']
features_list.append(_get_record(part1.location, 'CDS', cds_attributes))
seq_part1 = get_seq(genome, part1.location.start+1, part1.location.end)
if part1.location.strand == -1:
seq_part1 = seq_part1.reverse_complement()
seq_combined = ''
for cds in part.location.parts:
cds_count += 1
cds_attributes = ['ID=' + 'cds_' + gene_id + '_' + str(cds_count),
'Parent=' + gene_id,
'product=30S ribosomal protein S12']
features_list.append(_get_record(cds, 'CDS', cds_attributes))
if cds.strand == -1:
seq_combined += get_seq(genome, cds.start+1, cds.end).reverse_complement()
else:
seq_combined += get_seq(genome, cds.start+1, cds.end)
seq_combined = seq_part1 + seq_combined
try:
seq_combined.translate(table=11, cds=True)
except Exception as e:
print(gene_id)
print(e)
err_list.append(gene_id)
rps12_df = pd.DataFrame.from_dict({idx: feature for idx, feature in enumerate(features_list)}, 'index')
rps12_df.loc[rps12_df['type'] == 'CDS', 'phase'] = 0
rps12_df['seqid'] = new_gff.seqid.to_list()[0]
rps12_df['source'] = 'GeSeq'
rps12_df['score'] = '.'
for err_id in err_list:
rps12_df.loc[rps12_df['attributes'].str.startswith('ID='+err_id), 'attributes'] += ';pseudo=true'
new_gff = new_gff.append(rps12_df, sort=False)
return new_gff
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Check your chloroplast genome gff file')
parser.add_argument('-i', '--info_table', required=True,
help='<file_path> meta information table which has five columns: Geseq gff path, seq path, '
'seqid, locus prefix, renumber result path. If you do not need renumber, just provide gff '
'and seq')
parser.add_argument('-c', '--cds', action='store_true', default=False,
help='check cds legitimacy')
parser.add_argument('-s', '--hs', action='store_true', default=False,
help='check house-keeping gene (matK, rbcL)')
parser.add_argument('-n', '--name', action='store_true', default=False,
help='check whether gene names are legal name')
parser.add_argument('-r', '--region', action='store_true', default=False,
help='check whether gene region duplicated')
parser.add_argument('-e', '--renumber', action='store_true', default=False,
help='renumber gene locus suffix')
args = parser.parse_args()
info_table = pd.read_table(args.info_table, names=['raw_gff_path', 'seq_path', 'seq_id', 'prefix', 'result'])
for ind, row in info_table.iterrows():
print(os.path.basename(row['raw_gff_path']))
tmp_check = CheckCp(row['raw_gff_path'])
if args.cds:
tmp_check.check_cds(row['seq_path'])
if args.renumber:
result_gff = tmp_check.renumber(row['seq_id'], row['prefix'])
result_gff.to_csv(row['result'], sep='\t', index=False)
if args.hs:
tmp_check.check_hs()
if args.name:
tmp_check.check_name()
if args.region:
tmp_check.check_region()
|
"""Support for Roku binary sensors."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from rokuecp.models import Device as RokuDevice
from homeassistant.components.binary_sensor import (
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .entity import RokuEntity
@dataclass
class RokuBinarySensorEntityDescriptionMixin:
"""Mixin for required keys."""
value_fn: Callable[[RokuDevice], bool | None]
@dataclass
class RokuBinarySensorEntityDescription(
BinarySensorEntityDescription, RokuBinarySensorEntityDescriptionMixin
):
"""Describes a Roku binary sensor entity."""
BINARY_SENSORS: tuple[RokuBinarySensorEntityDescription, ...] = (
RokuBinarySensorEntityDescription(
key="headphones_connected",
name="Headphones Connected",
icon="mdi:headphones",
value_fn=lambda device: device.info.headphones_connected,
),
RokuBinarySensorEntityDescription(
key="supports_airplay",
name="Supports AirPlay",
icon="mdi:cast-variant",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.info.supports_airplay,
),
RokuBinarySensorEntityDescription(
key="supports_ethernet",
name="Supports Ethernet",
icon="mdi:ethernet",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.info.ethernet_support,
),
RokuBinarySensorEntityDescription(
key="supports_find_remote",
name="Supports Find Remote",
icon="mdi:remote",
entity_category=EntityCategory.DIAGNOSTIC,
value_fn=lambda device: device.info.supports_find_remote,
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up a Roku binary sensors based on a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
unique_id = coordinator.data.info.serial_number
async_add_entities(
RokuBinarySensorEntity(
device_id=unique_id,
coordinator=coordinator,
description=description,
)
for description in BINARY_SENSORS
)
class RokuBinarySensorEntity(RokuEntity, BinarySensorEntity):
"""Defines a Roku binary sensor."""
entity_description: RokuBinarySensorEntityDescription
@property
def is_on(self) -> bool:
"""Return the state of the sensor."""
return bool(self.entity_description.value_fn(self.coordinator.data))
|
"""Definition for aiosql compatible adapters.
Adapters here aren't "real" adapters, that can execute queries
but just dummy classes with compatible interface.
"""
# this drivers won't and shouldn't be used as real drivers.
# they are required only for queries building.
class EdgeQLAsyncAdapter:
"""Dummy adapter for async driver."""
is_aio_driver = True
class EdgeQLSyncAdapter:
"""Dummy adapter for sync driver."""
is_aio_driver = False
|
from socket import socket, AF_INET, SOCK_DGRAM, timeout
import binascii
import json
import os
from math import ceil
FILE_PATH = 'client_storage/innopolis.jpg'
CLIENT_PORT = 12345
SERVER_PORT = 12346
SERVER_ADDR = 'localhost'
CLIENT_BUF_SIZE = 2048
SERVER_BUF_SIZE = -1 # We don't know yet
def get_crc_checksum(file_contents):
file_contents = (binascii.crc32(file_contents) & 0xFFFFFFFF)
return "%08X" % file_contents
def send_file_metadata():
file_info = {}
if not os.path.exists(FILE_PATH):
print('File does not exist')
exit()
with open(FILE_PATH, 'rb') as file:
file_info['name'] = os.path.basename(file.name)
file_info['size'] = os.path.getsize(FILE_PATH)
file_info['checksum'] = get_crc_checksum(file.read())
dmp = json.dumps(file_info)
s.sendto(dmp.encode(), (SERVER_ADDR, SERVER_PORT))
print('File metadata sent successfully')
return file_info
def esc(msg):
print(msg)
s.close()
exit()
if __name__ == '__main__':
s = socket(family=AF_INET, type=SOCK_DGRAM)
s.bind(('localhost', CLIENT_PORT))
file_metadata = send_file_metadata()
# print('Awaiting server response...')
s.settimeout(1)
try:
data, addr = s.recvfrom(CLIENT_BUF_SIZE)
print(f'Server buffer size: {data.decode()}')
SERVER_BUF_SIZE = int(data.decode())
except KeyboardInterrupt:
esc('User has quit')
except timeout:
esc("The server isn't available.")
# upload file
while True:
with open(FILE_PATH, 'rb') as f:
for i in range(ceil(file_metadata['size'] / SERVER_BUF_SIZE)):
data = f.read(SERVER_BUF_SIZE)
s.sendto(data, (SERVER_ADDR, SERVER_PORT))
# print('Awaiting server response...')
try:
data, addr = s.recvfrom(CLIENT_BUF_SIZE)
if data.decode() != 'OK':
print('Upload error, retrying...')
else:
esc('File uploaded successfully.')
except KeyboardInterrupt:
esc('User has quit')
except timeout:
esc("The server isn't available.")
|
#!/usr/bin/env python3
# Copyright (C) Alibaba Group Holding Limited.
from models.module_zoo.branches.r2plus1d_branch import R2Plus1DBranch
from models.module_zoo.branches.r2d3d_branch import R2D3DBranch
from models.module_zoo.branches.csn_branch import CSNBranch
from models.module_zoo.branches.slowfast_branch import SlowfastBranch
from models.module_zoo.branches.s3dg_branch import STConv3d
from models.module_zoo.branches.non_local import NonLocal
from models.module_zoo.branches.tada_branch import TAdaConv2d
|
# -*- coding: utf-8 -*-
import json
import uuid
from typing import List, Dict, Tuple, Iterable
from requests import Response
from TM1py.Exceptions.Exceptions import TM1pyRestException
from TM1py.Objects.Process import Process
from TM1py.Services.ObjectService import ObjectService
from TM1py.Services.RestService import RestService
from TM1py.Utils import format_url, require_admin
class ProcessService(ObjectService):
""" Service to handle Object Updates for TI Processes
"""
def __init__(self, rest: RestService):
super().__init__(rest)
def get(self, name_process: str, **kwargs) -> Process:
""" Get a process from TM1 Server
:param name_process:
:return: Instance of the TM1py.Process
"""
url = format_url(
"/api/v1/Processes('{}')?$select=*,UIData,VariablesUIData,"
"DataSource/dataSourceNameForServer,"
"DataSource/dataSourceNameForClient,"
"DataSource/asciiDecimalSeparator,"
"DataSource/asciiDelimiterChar,"
"DataSource/asciiDelimiterType,"
"DataSource/asciiHeaderRecords,"
"DataSource/asciiQuoteCharacter,"
"DataSource/asciiThousandSeparator,"
"DataSource/view,"
"DataSource/query,"
"DataSource/userName,"
"DataSource/password,"
"DataSource/usesUnicode,"
"DataSource/subset", name_process)
response = self._rest.GET(url, **kwargs)
return Process.from_dict(response.json())
def get_all(self, **kwargs) -> List[Process]:
""" Get a processes from TM1 Server
:return: List, instances of the TM1py.Process
"""
url = "/api/v1/Processes?$select=*,UIData,VariablesUIData," \
"DataSource/dataSourceNameForServer," \
"DataSource/dataSourceNameForClient," \
"DataSource/asciiDecimalSeparator," \
"DataSource/asciiDelimiterChar," \
"DataSource/asciiDelimiterType," \
"DataSource/asciiHeaderRecords," \
"DataSource/asciiQuoteCharacter," \
"DataSource/asciiThousandSeparator," \
"DataSource/view," \
"DataSource/query," \
"DataSource/userName," \
"DataSource/password," \
"DataSource/usesUnicode," \
"DataSource/subset"
response = self._rest.GET(url, **kwargs)
response_as_dict = response.json()
return [Process.from_dict(p) for p in response_as_dict['value']]
def get_all_names(self, **kwargs) -> List[str]:
""" Get List with all process names from TM1 Server
:Returns:
List of Strings
"""
response = self._rest.GET('/api/v1/Processes?$select=Name', **kwargs)
processes = list(process['Name'] for process in response.json()['value'])
return processes
def create(self, process: Process, **kwargs) -> Response:
""" Create a new process on TM1 Server
:param process: Instance of TM1py.Process class
:return: Response
"""
url = "/api/v1/Processes"
# Adjust process body if TM1 version is lower than 11 due to change in Process Parameters structure
# https://www.ibm.com/developerworks/community/forums/html/topic?id=9188d139-8905-4895-9229-eaaf0e7fa683
if int(self.version[0:2]) < 11:
process.drop_parameter_types()
response = self._rest.POST(url, process.body, **kwargs)
return response
def update(self, process: Process, **kwargs) -> Response:
""" Update an existing Process on TM1 Server
:param process: Instance of TM1py.Process class
:return: Response
"""
url = format_url("/api/v1/Processes('{}')", process.name)
# Adjust process body if TM1 version is lower than 11 due to change in Process Parameters structure
# https://www.ibm.com/developerworks/community/forums/html/topic?id=9188d139-8905-4895-9229-eaaf0e7fa683
if int(self.version[0:2]) < 11:
process.drop_parameter_types()
response = self._rest.PATCH(url, process.body, **kwargs)
return response
def update_or_create(self, process: Process, **kwargs) -> Response:
""" Update or Create a Process on TM1 Server
:param process: Instance of TM1py.Process class
:return: Response
"""
if self.exists(name=process.name, **kwargs):
return self.update(process=process, **kwargs)
return self.create(process=process, **kwargs)
def delete(self, name: str, **kwargs) -> Response:
""" Delete a process in TM1
:param name:
:return: Response
"""
url = format_url("/api/v1/Processes('{}')", name)
response = self._rest.DELETE(url, **kwargs)
return response
def exists(self, name: str, **kwargs) -> bool:
""" Check if Process exists.
:param name:
:return:
"""
url = format_url("/api/v1/Processes('{}')", name)
return self._exists(url, **kwargs)
def compile(self, name: str, **kwargs) -> List:
""" Compile a Process. Return List of Syntax errors.
:param name:
:return:
"""
url = format_url("/api/v1/Processes('{}')/tm1.Compile", name)
response = self._rest.POST(url, **kwargs)
syntax_errors = response.json()["value"]
return syntax_errors
def compile_process(self, process: Process, **kwargs) -> List:
""" Compile a Process. Return List of Syntax errors.
:param process:
:return:
"""
url = "/api/v1/CompileProcess"
payload = json.loads('{"Process":' + process.body + '}')
response = self._rest.POST(
url=url,
data=json.dumps(payload, ensure_ascii=False),
**kwargs)
syntax_errors = response.json()["value"]
return syntax_errors
def execute(self, process_name: str, parameters: Dict = None, timeout: float = None,
**kwargs) -> Response:
""" Ask TM1 Server to execute a process. Call with parameter names as keyword arguments:
tm1.processes.execute("Bedrock.Server.Wait", pLegalEntity="UK01")
:param process_name:
:param parameters: Deprecated! dictionary, e.g. {"Parameters": [ { "Name": "pLegalEntity", "Value": "UK01" }] }
:param timeout: Number of seconds that the client will wait to receive the first byte.
:return:
"""
url = format_url("/api/v1/Processes('{}')/tm1.Execute", process_name)
if not parameters:
if kwargs:
parameters = {"Parameters": []}
for parameter_name, parameter_value in kwargs.items():
parameters["Parameters"].append({"Name": parameter_name, "Value": parameter_value})
else:
parameters = {}
return self._rest.POST(url=url, data=json.dumps(parameters, ensure_ascii=False), timeout=timeout, **kwargs)
def execute_process_with_return(self, process: Process, **kwargs) -> Tuple[bool, str, str]:
"""
Run unbound TI code directly
:param process: a TI Process Object
:return: success (boolean), status (String), error_log_file (String)
"""
url = "/api/v1/ExecuteProcessWithReturn?$expand=*"
payload = json.loads("{\"Process\":" + process.body + "}")
response = self._rest.POST(
url=url,
data=json.dumps(payload, ensure_ascii=False),
**kwargs)
execution_summary = response.json()
success = execution_summary["ProcessExecuteStatusCode"] == "CompletedSuccessfully"
status = execution_summary["ProcessExecuteStatusCode"]
error_log_file = None if execution_summary["ErrorLogFile"] is None else execution_summary["ErrorLogFile"][
"Filename"]
return success, status, error_log_file
def execute_with_return(self, process_name: str, timeout: float = None, **kwargs) -> Tuple[bool, str, str]:
""" Ask TM1 Server to execute a process.
pass process parameters as keyword arguments to this function. E.g:
self.tm1.processes.execute_with_return(
process_name="Bedrock.Server.Wait",
pWaitSec=2)
:param process_name: name of the TI process
:param timeout: Number of seconds that the client will wait to receive the first byte.
:param kwargs: dictionary of process parameters and values
:return: success (boolean), status (String), error_log_file (String)
"""
url = format_url("/api/v1/Processes('{}')/tm1.ExecuteWithReturn?$expand=*", process_name)
parameters = dict()
if kwargs:
parameters = {"Parameters": []}
for parameter_name, parameter_value in kwargs.items():
parameters["Parameters"].append({"Name": parameter_name, "Value": parameter_value})
response = self._rest.POST(
url=url,
data=json.dumps(parameters, ensure_ascii=False),
timeout=timeout,
**kwargs)
execution_summary = response.json()
success = execution_summary["ProcessExecuteStatusCode"] == "CompletedSuccessfully"
status = execution_summary["ProcessExecuteStatusCode"]
error_log_file = None if execution_summary["ErrorLogFile"] is None else execution_summary["ErrorLogFile"][
"Filename"]
return success, status, error_log_file
@require_admin
def execute_ti_code(self, lines_prolog: Iterable[str], lines_epilog: Iterable[str] = None, **kwargs) -> Response:
""" Execute lines of code on the TM1 Server
:param lines_prolog: list - where each element is a valid statement of TI code.
:param lines_epilog: list - where each element is a valid statement of TI code.
"""
process_name = "".join(['}TM1py', str(uuid.uuid4())])
p = Process(name=process_name,
prolog_procedure=Process.AUTO_GENERATED_STATEMENTS + '\r\n'.join(lines_prolog),
epilog_procedure=Process.AUTO_GENERATED_STATEMENTS + '\r\n'.join(
lines_epilog) if lines_epilog else '')
self.create(p, **kwargs)
try:
return self.execute(process_name, **kwargs)
except TM1pyRestException as e:
raise e
finally:
self.delete(process_name, **kwargs)
def get_error_log_file_content(self, file_name: str, **kwargs) -> str:
""" Get content of error log file (e.g. TM1ProcessError_20180926213819_65708356_979b248b-232e622c6.log)
:param file_name: name of the error log file in the TM1 log directory
:return: String, content of the file
"""
url = format_url("/api/v1/ErrorLogFiles('{}')/Content", file_name)
response = self._rest.GET(url=url, **kwargs)
return response.text
def get_processerrorlogs(self, process_name: str, **kwargs) -> List:
""" Get all ProcessErrorLog entries for a process
:param process_name: name of the process
:return: list - Collection of ProcessErrorLogs
"""
url = format_url("/api/v1/Processes('{}')/ErrorLogs", process_name)
response = self._rest.GET(url=url, **kwargs)
return response.json()['value']
def get_last_message_from_processerrorlog(self, process_name: str, **kwargs) -> str:
""" Get the latest ProcessErrorLog from a process entity
:param process_name: name of the process
:return: String - the errorlog, e.g.: "Fehler: Prolog Prozedurzeile (9): Zeichenfolge "US772131
kann nicht in eine reelle Zahl umgewandelt werden."
"""
logs_as_list = self.get_processerrorlogs(process_name, **kwargs)
if len(logs_as_list) > 0:
timestamp = logs_as_list[-1]['Timestamp']
url = format_url("/api/v1/Processes('{}')/ErrorLogs('{}')/Content", process_name, timestamp)
# response is plain text - due to entity type Edm.Stream
response = self._rest.GET(url=url, **kwargs)
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.