content stringlengths 5 1.05M |
|---|
import random,math
def distribution(decay,buckets):
"Return random numbers, sum noamrlzes 0..1"
tmp=[random.random()]
for _ in range(buckets-1):
old=tmp[-1];
tmp += [old*decay]
s=sum(tmp)
return sorted([x/s for x in tmp])
def run(n=1000,decay=0.99,dimensions=10, buckets = 10):
ds=[distribution(decay,buckets) for _ in range(dimensions)]
print()
[print(d) for d in ds]
print()
nums=[math.prod(random.choice(d) for d in ds) for _ in range(n)]
return sorted(nums,reverse=True)
for x in run(decay=.9,dimensions=6,buckets=10,n=1000): print(x)
|
from datetime import datetime
import pytest
from sqlalchemy import func
from mock_alchemy.mocking import UnifiedAlchemyMagicMock
from quarry.web.models.query import Query
from quarry.web.models.star import Star
from quarry.web.models.queryrevision import QueryRevision
from quarry.web.models.queryrun import QueryRun
from quarry.web.models.user import User
from quarry.web.models.user import UserGroup
# @pytest.mark.usefixtures([mocker, client])
class TestUser:
@pytest.fixture(autouse=True)
def setup_method(self, mocker, client):
# mock_alchemy is not great at understanding post data;
# to work around we use string IDs rather than int IDs
# so it can match with the string that it gets from a POST
self.user_id = "myuserid"
self.user_group_id = "77"
self.user_name = "Test User With_Underscores"
self.query_id = "66"
self.rev_id = "88"
self.run_id = "44"
self.star_id = "22"
self.resultset_id = 1
self.connection_id = 1
self.format = "json"
self.complete_status = 4
self.complete_status_msg = "complete"
self.client = client
# Fake DB handler that anticipates upcoming queries:
ug = UserGroup(id=self.user_group_id, user_id=self.user_id, group_name="root")
u = User(id=self.user_id, username=self.user_name, wiki_uid="Test user")
q = Query(
id=self.query_id,
description="fake query entry",
user=u,
user_id=self.user_id,
title="a query with a grand title",
last_touched=datetime.utcnow(),
)
r = QueryRevision(id=self.rev_id, latest_run_id=self.run_id, query=q)
qr = QueryRun(
timestamp=datetime.utcnow(),
status=self.complete_status,
id=self.run_id,
query_rev_id=self.rev_id,
task_id="task_id",
extra_info='{ "what_is_this": "extra_info"}',
rev=r,
)
s = Star(
id=self.star_id,
user_id=self.user_id,
timestamp=datetime.utcnow(),
query_id=self.query_id,
)
self.db_session = UnifiedAlchemyMagicMock()
# One of each type of object we'll be asked for
self.db_session.add(u)
self.db_session.add(ug)
self.db_session.add(qr)
self.db_session.add(r)
self.db_session.add(q)
self.db_session.add(s)
mocker.patch(
"quarry.web.connections.Connections.session",
new_callable=mocker.PropertyMock(return_value=self.db_session),
)
# Simulate being logged in and authorized
with self.client.session_transaction() as flask_sess:
flask_sess["user_id"] = self.user_id
flask_sess["preferences"] = {"breakfast": "waffles", "lunch": "tacos"}
def test_sudo(self, mocker):
response = self.client.get("/sudo/%s" % self.user_id)
self.db_session.filter.assert_has_calls([mocker.call(User.id == self.user_id)])
self.db_session.assert_has_calls([mocker.call.query(UserGroup)])
self.db_session.filter.assert_has_calls(
[
mocker.call.get(
(UserGroup.user_id == self.user_id),
(UserGroup.group_name == "sudo"),
)
]
)
assert response.headers["Location"] == "http://localhost/"
assert response.status_code == 302
def test_user_page(self, mocker):
mocker.patch("quarry.web.user.get_user", return_value=None)
response = self.client.get("/%s" % self.user_name)
test_user_name = self.user_name.replace("_", " ").lower()
self.db_session.filter.assert_has_calls(
[mocker.call(func.lower(User.username) == test_user_name)]
)
assert response.status_code == 200
|
#!/bin/python
import datetime
class employee:
first=\"\";
last=\"\";
department=\"\";
start_date=\"\";
end_date=\"\";
def set_all(self,first,last,depart,start,end):
self.first=first;
self.last=last;
self.department=depart;
self.start_date=start;
self.end_date=end;
def date_diff(self,date1,date2):
datetime1=datetime.datetime.strptime(date1,%Y-%m-%d);
datetime2=datetime.datetime.strptime(date2,%Y-%m-%d);
return datetime2-datetime1;
test_employee=employee();
test_employee.set_all(\"Abel\",\"Gancsos\",\"QA-Unix\",\"2013-07-24\",\"2013-12-31\");
now=str(datetime.datetime.now());
now_date=now[0:10];
print test_employee.date_diff(now_date,test_employee.end_date);
|
import socket
import time
from menu import *
DEBUG = 0
class Client(object):
def __init__(self,host="localhost",port=9046):
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if DEBUG:
print "Socket Created."
except socket.error:
if DEBUG:
print "Failed to create socket"
sys.exit()
self.host = host
self.port = port
self.game = None
try:
self.s.connect((self.host,self.port))
print "[+] Connected To Server Successful."
data = self.s.recv(1024)
print data
except socket.error:
print "[-] Failed To Connect To Server."
sys.exit()
def receive(self):
return self.s.recv(16384)
def send(self):
while True:
user_response = raw_input("[Enter Reponse]: ")
if user_response != "":
time.sleep(0.1)
self.s.send(user_response)
return
print "[-] It Looks Like You Entered Nothing..."
def exit(self, message):
if len(message.split("quit ")) > 1:
return True
return False
def response_required(self, message):
if len(message.split("@")) > 1:
return True
return False
if __name__ == "__main__":
client = Client()
while True:
data = client.receive()
if client.response_required(data):
print data.split("@")[1]
client.send()
elif client.exit(data):
print data.split("quit")[1]
sys.exit()
else:
if data != "":
print data
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author: Jialiang Shi
from sonarqube.utils.rest_client import RestClient
from sonarqube.utils.config import (
API_PROJECTS_BULK_DELETE_ENDPOINT,
API_PROJECTS_SEARCH_ENDPOINT,
API_PROJECTS_CREATE_ENDPOINT,
API_PROJECTS_DELETE_ENDPOINT,
API_PROJECTS_UPDATE_VISIBILITY_ENDPOINT,
API_PROJECTS_UPDATE_KEY_ENDPOINT
)
class SonarQubeProjects(RestClient):
"""
SonarQube projects Operations
"""
def __init__(self, **kwargs):
"""
:param kwargs:
"""
super(SonarQubeProjects, self).__init__(**kwargs)
def __getitem__(self, key):
result = list(self.search_projects(projects=key))
for project in result:
if project['key'] == key:
return project
def search_projects(self, analyzedBefore=None, onProvisionedOnly=False, projects=None, q=None, qualifiers="TRK"):
"""
Search for projects or views to administrate them.
:param analyzedBefore: Filter the projects for which last analysis is older than the given date (exclusive).
Either a date (server timezone) or datetime can be provided.
:param onProvisionedOnly: Filter the projects that are provisioned.
Possible values are for: True or False. default value is False.
:param projects: Comma-separated list of project keys
:param q:
Limit search to:
* component names that contain the supplied string
* component keys that contain the supplied string
:param qualifiers: Comma-separated list of component qualifiers. Filter the results with the specified
qualifiers. Possible values are for:
* TRK
* VW
* APP
default value is TRK.
:return:
"""
params = {
'onProvisionedOnly': onProvisionedOnly and 'true' or 'false',
'qualifiers': qualifiers.upper()
}
page_num = 1
page_size = 1
total = 2
if analyzedBefore:
params.update({'analyzedBefore': analyzedBefore})
if projects:
params.update({'projects': projects})
if q:
params.update({'q': q})
while page_num * page_size < total:
resp = self.get(API_PROJECTS_SEARCH_ENDPOINT, params=params)
response = resp.json()
page_num = response['paging']['pageIndex']
page_size = response['paging']['pageSize']
total = response['paging']['total']
params['p'] = page_num + 1
for component in response['components']:
yield component
def create_project(self, project, name, visibility=None):
"""
Create a project.
:param project: Key of the project
:param name: Name of the project. If name is longer than 500, it is abbreviated.
:param visibility: Whether the created project should be visible to everyone, or only specific user/groups.
If no visibility is specified, the default project visibility of the organization will be used.
Possible values are for:
* private
* public
:return: request response
"""
params = {
'name': name,
'project': project
}
if visibility:
params.update({'visibility': visibility})
return self.post(API_PROJECTS_CREATE_ENDPOINT, params=params)
def delete_project(self, project):
"""
Delete a project.
:param project: Project key
:return:
"""
params = {
'project': project
}
self.post(API_PROJECTS_DELETE_ENDPOINT, params=params)
def bulk_delete_projects(self, analyzedBefore=None, onProvisionedOnly=False, projects=None,
q=None, qualifiers="TRK"):
"""
Delete one or several projects.
At least one parameter is required among analyzedBefore, projects, projectIds (deprecated since 6.4) and q
:param analyzedBefore: Filter the projects for which last analysis is older than the given date (exclusive).
Either a date (server timezone) or datetime can be provided.
:param onProvisionedOnly: Filter the projects that are provisioned.
Possible values are for: True or False. default value is False.
:param projects: Comma-separated list of project keys
:param q:
Limit to:
* component names that contain the supplied string
* component keys that contain the supplied string
:param qualifiers: Comma-separated list of component qualifiers. Filter the results with the specified
qualifiers. Possible values are for:
* TRK
* VW
* APP
default value is TRK.
:return:
"""
params = {
'onProvisionedOnly': onProvisionedOnly and 'true' or 'false',
'qualifiers': qualifiers.upper()
}
if analyzedBefore:
params.update({'analyzedBefore': analyzedBefore})
if projects:
params.update({'projects': projects})
if q:
params.update({'q': q})
self.post(API_PROJECTS_BULK_DELETE_ENDPOINT, params=params)
def update_project_key(self, previous_project_key, new_project_key):
"""
Update a project or module key and all its sub-components keys.
:param previous_project_key: Project or module key
:param new_project_key: New component key
:return:
"""
params = {
'from': previous_project_key,
'to': new_project_key
}
self.post(API_PROJECTS_UPDATE_KEY_ENDPOINT, params=params)
def update_project_visibility(self, project, visibility):
"""
Updates visibility of a project.
:param project: Project key
:param visibility: New visibility
:return:
"""
params = {
'project': project,
'visibility': visibility
}
self.post(API_PROJECTS_UPDATE_VISIBILITY_ENDPOINT, params=params)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^door/', views.changeState, name='changeState'),
] |
from ..en import Provider as PersonProvider
class Provider(PersonProvider):
pass
|
from data import *
def test_abn_lookup(py):
"""Verify search abn"""
# GIVEN ABN lookup page has loaded
# WHEN user types search query into Search field
# AND clicks search button
# THEN results contain expected results, such as ABN number, Name, Type, Location
py.visit(BASE_URL)
py.get(SEARCH_INPUT_FIELD).should().be_visible().type(SEARCH_QUERY)
py.get(SEARCH_BUTTON).should().be_clickable().click()
search_results_rows = py.find(RESULTS_TABLE_ROWS)[1:]
for expected_result in expected_results:
assert len(search_results_rows) > 4
assert any((expected_result.name in row.text()) for row in search_results_rows)
found_row = py.contains(expected_result.name).parent()
assert expected_result.abn_number in found_row.get("a").text()
assert expected_result.status in found_row.get("span").text()
assert found_row.contains(expected_result.name)
assert found_row.contains(expected_result.type)
assert expected_result.location in found_row.text()
def test_abn_details(py):
"""Verify Current details for ABN page"""
# GIVEN ABN lookup page with the results has loaded
# WHEN user clicks ABN link
# THEN user is redirected to the Current details for ABN page
# AND ABN details match expected details
py.visit(BASE_URL)
py.get(SEARCH_INPUT_FIELD).should().be_visible().type(SEARCH_QUERY)
py.get(SEARCH_BUTTON).should().be_clickable().click()
found_row = py.contains(automic_pty_ptd.name).parent()
found_row.get("[href]").should().be_clickable().click()
assert py.url().endswith(automic_pty_ltd_details.abn_number)
assert ABN_DETAILS_PAGE_H1 in py.get("h1").text()
assert automic_pty_ltd_details.entity_name in py.get(ENTITY_NAME).text()
assert automic_pty_ltd_details.abn_status in py.contains("ABN status:").parent().get("td").text()
assert automic_pty_ltd_details.entity_type in py.contains("Entity type:").parent().get("a").text()
assert automic_pty_ltd_details.business_location in py.get(LOCATION).text()
for business_name in automic_pty_ltd_details.business_names:
assert business_name in py.contains("Business name(s)").parent().text()
|
from kat.harness import Query, EDGE_STACK
from abstract_tests import AmbassadorTest, ServiceType, HTTP
# STILL TO ADD:
# Host referencing a Secret in another namespace?
# Mappings without host attributes (infer via Host resource)
# Host where a TLSContext with the inferred name already exists
class HostCRDSingle(AmbassadorTest):
"""
HostCRDSingle: a single Host with a manually-configured TLS. Since the Host is handling the
TLSContext, we expect both OSS and Edge Stack to redirect cleartext from 8080 to 8443 here.
"""
target: ServiceType
def init(self):
self.edge_stack_cleartext_host = False
self.allow_edge_stack_redirect = False
self.target = HTTP()
def manifests(self) -> str:
return self.format('''
---
apiVersion: v1
kind: Secret
metadata:
name: {self.name.k8s}-secret
labels:
kat-ambassador-id: {self.ambassador_id}
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
---
apiVersion: getambassador.io/v2
kind: Host
metadata:
name: {self.name.k8s}-host
labels:
kat-ambassador-id: {self.ambassador_id}
spec:
ambassador_id: [ {self.ambassador_id} ]
hostname: {self.path.fqdn}
acmeProvider:
authority: none
tlsSecret:
name: {self.name.k8s}-secret
selector:
matchLabels:
hostname: {self.path.fqdn}
---
apiVersion: getambassador.io/v2
kind: Mapping
metadata:
name: {self.name.k8s}-target-mapping
labels:
hostname: {self.path.fqdn}
spec:
ambassador_id: [ {self.ambassador_id} ]
prefix: /target/
service: {self.target.path.fqdn}
''') + super().manifests()
def scheme(self) -> str:
return "https"
def queries(self):
yield Query(self.url("target/"), insecure=True)
yield Query(self.url("target/", scheme="http"), expected=301)
class HostCRDNo8080(AmbassadorTest):
"""
HostCRDNo8080: a single Host with manually-configured TLS that explicitly turns off redirection
from 8080.
"""
target: ServiceType
def init(self):
self.edge_stack_cleartext_host = False
self.allow_edge_stack_redirect = False
self.target = HTTP()
def manifests(self) -> str:
return self.format('''
---
apiVersion: v1
kind: Secret
metadata:
name: {self.name.k8s}-secret
labels:
kat-ambassador-id: {self.ambassador_id}
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
---
apiVersion: getambassador.io/v2
kind: Host
metadata:
name: {self.name.k8s}-host
labels:
kat-ambassador-id: {self.ambassador_id}
spec:
ambassador_id: [ {self.ambassador_id} ]
hostname: {self.path.fqdn}
acmeProvider:
authority: none
tlsSecret:
name: {self.name.k8s}-secret
selector:
matchLabels:
hostname: {self.path.fqdn}
requestPolicy:
insecure:
additionalPort: -1
---
apiVersion: getambassador.io/v2
kind: Mapping
metadata:
name: {self.name.k8s}-target-mapping
labels:
hostname: {self.path.fqdn}
spec:
ambassador_id: [ {self.ambassador_id} ]
prefix: /target/
service: {self.target.path.fqdn}
''') + super().manifests()
def scheme(self) -> str:
return "https"
def queries(self):
yield Query(self.url("target/"), insecure=True)
if EDGE_STACK:
yield Query(self.url("target/", scheme="http"), expected=404)
else:
yield Query(self.url("target/", scheme="http"), error=[ "EOF", "connection refused" ])
class HostCRDManualContext(AmbassadorTest):
"""
A single Host with a manually-specified TLS secret and a manually-specified TLSContext,
too. Since the Host is _not_ handling the TLSContext, we do _not_ expect automatic redirection
on port 8080.
"""
target: ServiceType
def init(self):
self.edge_stack_cleartext_host = False
self.allow_edge_stack_redirect = False
self.target = HTTP()
def manifests(self) -> str:
return self.format('''
---
apiVersion: v1
kind: Secret
metadata:
name: manual-secret
labels:
kat-ambassador-id: {self.ambassador_id}
type: kubernetes.io/tls
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
---
apiVersion: getambassador.io/v2
kind: Host
metadata:
name: manual-host
labels:
kat-ambassador-id: {self.ambassador_id}
spec:
ambassador_id: [ {self.ambassador_id} ]
hostname: {self.path.fqdn}
acmeProvider:
authority: none
selector:
matchLabels:
hostname: manual-hostname
tlsSecret:
name: manual-secret
---
apiVersion: getambassador.io/v2
kind: TLSContext
metadata:
name: manual-host-context
labels:
kat-ambassador-id: {self.ambassador_id}
spec:
ambassador_id: [ {self.ambassador_id} ]
hosts:
- {self.path.fqdn}
secret: manual-secret
min_tls_version: v1.2
max_tls_version: v1.3
---
apiVersion: getambassador.io/v2
kind: Mapping
metadata:
name: manual-target-mapping
labels:
hostname: manual-hostname
spec:
ambassador_id: [ {self.ambassador_id} ]
prefix: /target/
service: {self.target.path.fqdn}
''') + super().manifests()
def scheme(self) -> str:
return "https"
def queries(self):
yield Query(self.url("target/"), insecure=True,
minTLSv="v1.2", maxTLSv="v1.3")
yield Query(self.url("target/"), insecure=True,
minTLSv="v1.0", maxTLSv="v1.0",
error=["tls: server selected unsupported protocol version 303",
"tls: no supported versions satisfy MinVersion and MaxVersion",
"tls: protocol version not supported"])
if EDGE_STACK:
yield Query(self.url("target/", scheme="http"), expected=404)
else:
yield Query(self.url("target/", scheme="http"), error=[ "EOF", "connection refused" ])
class HostCRDClearText(AmbassadorTest):
"""
A single Host specifying cleartext only. Since it's just cleartext, no redirection comes
into play.
"""
target: ServiceType
def init(self):
self.edge_stack_cleartext_host = False
self.allow_edge_stack_redirect = False
self.target = HTTP()
def manifests(self) -> str:
return self.format('''
---
apiVersion: getambassador.io/v2
kind: Host
metadata:
name: cleartext-host
labels:
kat-ambassador-id: {self.ambassador_id}
spec:
ambassador_id: [ {self.ambassador_id} ]
hostname: {self.path.fqdn}
acmeProvider:
authority: none
selector:
matchLabels:
hostname: host-cleartext
requestPolicy:
insecure:
action: Route
---
apiVersion: getambassador.io/v2
kind: Mapping
metadata:
name: cleartext-target-mapping
labels:
hostname: host-cleartext
spec:
ambassador_id: [ {self.ambassador_id} ]
prefix: /target/
service: {self.target.path.fqdn}
''') + super().manifests()
def scheme(self) -> str:
return "http"
def queries(self):
yield Query(self.url("target/"), insecure=True)
yield Query(self.url("target/", scheme="https"),
error=[ "EOF", "connection refused" ])
class HostCRDDouble(AmbassadorTest):
"""
HostCRDDouble: two Hosts with manually-configured TLS secrets, and Mappings specifying host matches.
Since the Hosts are handling TLSContexts, we expect both OSS and Edge Stack to redirect cleartext
from 8080 to 8443 here.
XXX In the future, the hostname matches should be unnecessary.
"""
target1: ServiceType
target2: ServiceType
def init(self):
self.edge_stack_cleartext_host = False
self.allow_edge_stack_redirect = False
self.target1 = HTTP(name="target1")
self.target2 = HTTP(name="target2")
def manifests(self) -> str:
return self.format('''
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURwakNDQW82Z0F3SUJBZ0lKQUpxa1Z4Y1RtQ1FITUEwR0NTcUdTSWIzRFFFQkN3VUFNR2d4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVJFd0R3WURWUVFLREFoRQpZWFJoZDJseVpURVVNQklHQTFVRUN3d0xSVzVuYVc1bFpYSnBibWN4RWpBUUJnTlZCQU1NQ1d4dlkyRnNhRzl6CmREQWVGdzB4T0RFd01UQXhNREk1TURKYUZ3MHlPREV3TURjeE1ESTVNREphTUdneEN6QUpCZ05WQkFZVEFsVlQKTVFzd0NRWURWUVFJREFKTlFURVBNQTBHQTFVRUJ3d0dRbTl6ZEc5dU1SRXdEd1lEVlFRS0RBaEVZWFJoZDJseQpaVEVVTUJJR0ExVUVDd3dMUlc1bmFXNWxaWEpwYm1jeEVqQVFCZ05WQkFNTUNXeHZZMkZzYUc5emREQ0NBU0l3CkRRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFMcTZtdS9FSzlQc1Q0YkR1WWg0aEZPVnZiblAKekV6MGpQcnVzdXcxT05MQk9jT2htbmNSTnE4c1FyTGxBZ3NicDBuTFZmQ1pSZHQ4UnlOcUFGeUJlR29XS3IvZAprQVEybVBucjBQRHlCTzk0UHo4VHdydDBtZEtEU1dGanNxMjlOYVJaT0JqdStLcGV6RytOZ3pLMk04M0ZtSldUCnFYdTI3ME9pOXlqb2VGQ3lPMjdwUkdvcktkQk9TcmIwd3ozdFdWUGk4NFZMdnFKRWprT0JVZjJYNVF3b25XWngKMktxVUJ6OUFSZVVUMzdwUVJZQkJMSUdvSnM4U042cjF4MSt1dTNLdTVxSkN1QmRlSHlJbHpKb2V0aEp2K3pTMgowN0pFc2ZKWkluMWNpdXhNNzNPbmVRTm1LUkpsL2NEb3BLemswSldRSnRSV1NnbktneFNYWkRrZjJMOENBd0VBCkFhTlRNRkV3SFFZRFZSME9CQllFRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1COEdBMVVkSXdRWU1CYUEKRkJoQzdDeVRpNGFkSFVCd0wvTkZlRTZLdnFIRE1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSFJvb0xjcFdEa1IyMEhENEJ5d1BTUGRLV1hjWnN1U2tXYWZyekhoYUJ5MWJZcktIR1o1CmFodFF3L1gwQmRnMWtidlpZUDJSTzdGTFhBSlNTdXVJT0NHTFVwS0pkVHE1NDREUThNb1daWVZKbTc3UWxxam0KbHNIa2VlTlRNamFOVjdMd0MzalBkMERYelczbGVnWFRoYWpmZ2dtLzBJZXNGRzBVWjFEOTJHNURmc0hLekpSagpNSHZyVDNtVmJGZjkrSGJhRE4yT2g5VjIxUWhWSzF2M0F2dWNXczhUWCswZHZFZ1dtWHBRcndEd2pTMU04QkRYCldoWjVsZTZjVzhNYjhnZmRseG1JckpnQStuVVZzMU9EbkJKS1F3MUY4MVdkc25tWXdweVUrT2xVais4UGt1TVoKSU4rUlhQVnZMSWJ3czBmamJ4UXRzbTArZVBpRnN2d0NsUFk9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQzZ1cHJ2eEN2VDdFK0cKdzdtSWVJUlRsYjI1ejh4TTlJejY3ckxzTlRqU3dUbkRvWnAzRVRhdkxFS3k1UUlMRzZkSnkxWHdtVVhiZkVjagphZ0JjZ1hocUZpcS8zWkFFTnBqNTY5RHc4Z1R2ZUQ4L0U4SzdkSm5TZzBsaFk3S3R2VFdrV1RnWTd2aXFYc3h2CmpZTXl0alBOeFppVms2bDd0dTlEb3ZjbzZIaFFzanR1NlVScUt5blFUa3EyOU1NOTdWbFQ0dk9GUzc2aVJJNUQKZ1ZIOWwrVU1LSjFtY2RpcWxBYy9RRVhsRTkrNlVFV0FRU3lCcUNiUEVqZXE5Y2RmcnJ0eXJ1YWlRcmdYWGg4aQpKY3lhSHJZU2IvczB0dE95UkxIeVdTSjlYSXJzVE85enAza0RaaWtTWmYzQTZLU3M1TkNWa0NiVVZrb0p5b01VCmwyUTVIOWkvQWdNQkFBRUNnZ0VBSVFsZzNpamNCRHViK21Eb2syK1hJZDZ0V1pHZE9NUlBxUm5RU0NCR2RHdEIKV0E1Z2NNNTMyVmhBV0x4UnR6dG1ScFVXR0dKVnpMWlpNN2ZPWm85MWlYZHdpcytkYWxGcWtWVWFlM2FtVHVQOApkS0YvWTRFR3Nnc09VWSs5RGlZYXRvQWVmN0xRQmZ5TnVQTFZrb1JQK0FrTXJQSWFHMHhMV3JFYmYzNVp3eFRuCnd5TTF3YVpQb1oxWjZFdmhHQkxNNzlXYmY2VFY0WXVzSTRNOEVQdU1GcWlYcDNlRmZ4L0tnNHhtYnZtN1JhYzcKOEJ3Z3pnVmljNXlSbkVXYjhpWUh5WGtyazNTL0VCYUNEMlQwUjM5VmlVM1I0VjBmMUtyV3NjRHowVmNiVWNhKwpzeVdyaVhKMHBnR1N0Q3FWK0dRYy9aNmJjOGt4VWpTTWxOUWtudVJRZ1FLQmdRRHpwM1ZaVmFzMTA3NThVT00rCnZUeTFNL0V6azg4cWhGb21kYVFiSFRlbStpeGpCNlg3RU9sRlkya3JwUkwvbURDSEpwR0MzYlJtUHNFaHVGSUwKRHhSQ2hUcEtTVmNsSytaaUNPaWE1ektTVUpxZnBOcW15RnNaQlhJNnRkNW9mWk42aFpJVTlJR2RUaGlYMjBONwppUW01UnZlSUx2UHVwMWZRMmRqd2F6Ykgvd0tCZ1FERU1MN21Mb2RqSjBNTXh6ZnM3MW1FNmZOUFhBMVY2ZEgrCllCVG4xS2txaHJpampRWmFNbXZ6dEZmL1F3Wkhmd3FKQUVuNGx2em5ncUNzZTMvUElZMy8zRERxd1p2NE1vdy8KRGdBeTBLQmpQYVJGNjhYT1B1d0VuSFN1UjhyZFg2UzI3TXQ2cEZIeFZ2YjlRRFJuSXc4a3grSFVreml4U0h5Ugo2NWxESklEdlFRS0JnUURpQTF3ZldoQlBCZk9VYlpQZUJydmhlaVVycXRob29BemYwQkJCOW9CQks1OHczVTloCjdQWDFuNWxYR3ZEY2x0ZXRCbUhEK3RQMFpCSFNyWit0RW5mQW5NVE5VK3E2V0ZhRWFhOGF3WXR2bmNWUWdTTXgKd25oK1pVYm9udnVJQWJSajJyTC9MUzl1TTVzc2dmKy9BQWM5RGs5ZXkrOEtXY0Jqd3pBeEU4TGxFUUtCZ0IzNwoxVEVZcTFoY0I4Tk1MeC9tOUtkN21kUG5IYUtqdVpSRzJ1c1RkVWNxajgxdklDbG95MWJUbVI5Si93dXVQczN4ClhWekF0cVlyTUtNcnZMekxSQWgyZm9OaVU1UDdKYlA5VDhwMFdBN1N2T2h5d0NobE5XeisvRlltWXJxeWcxbngKbHFlSHRYNU03REtJUFhvRndhcTlZYVk3V2M2K1pVdG4xbVNNajZnQkFvR0JBSTgwdU9iTkdhRndQTVYrUWhiZApBelkrSFNGQjBkWWZxRytzcTBmRVdIWTNHTXFmNFh0aVRqUEFjWlg3RmdtT3Q5Uit3TlFQK0dFNjZoV0JpKzBWCmVLV3prV0lXeS9sTVZCSW0zVWtlSlRCT3NudTFVaGhXbm5WVDhFeWhEY1FxcndPSGlhaUo3bFZSZmRoRWFyQysKSnpaU0czOHVZUVlyc0lITnRVZFgySmdPCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: test-tlscontext-secret-0
labels:
kat-ambassador-id: hostdouble
type: kubernetes.io/tls
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUpycUl0ekY2MTBpTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB4TUI0WERURTRNVEV3TVRFek5UTXhPRm9YCkRUSTRNVEF5T1RFek5UTXhPRm93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRFd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUM5T2dDOHd4eUlyUHpvCkdYc0xwUEt0NzJERXgyd2p3VzhuWFcyd1dieWEzYzk2bjJuU0NLUEJuODVoYnFzaHpqNWloU1RBTURJb2c5RnYKRzZSS1dVUFhUNEtJa1R2M0NESHFYc0FwSmxKNGxTeW5ReW8yWnYwbytBZjhDTG5nWVpCK3JmenRad3llRGhWcAp3WXpCVjIzNXp6NisycWJWbUNabHZCdVhiVXFUbEVZWXZ1R2xNR3o3cFBmT1dLVXBlWW9kYkcyZmIraEZGcGVvCkN4a1VYclFzT29SNUpkSEc1aldyWnVCTzQ1NVNzcnpCTDhSbGU1VUhvMDVXY0s3YkJiaVF6MTA2cEhDSllaK3AKdmxQSWNOU1g1S2gzNEZnOTZVUHg5bFFpQTN6RFRLQmZ5V2NMUStxMWNabExjV2RnUkZjTkJpckdCLzdyYTFWVApnRUplR2tQekFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFmCkJnTlZIU01FR0RBV2dCUkRWVUtYWWJsRFdNTzE3MUJuWWZhYlkzM0NFVEFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBUE8vRDRUdDUyWHJsQ0NmUzZnVUVkRU5DcnBBV05YRHJvR2M2dApTVGx3aC8rUUxRYk5hZEtlaEtiZjg5clhLaituVXF0cS9OUlpQSXNBSytXVWtHOVpQb1FPOFBRaVY0V1g1clE3CjI5dUtjSmZhQlhrZHpVVzdxTlFoRTRjOEJhc0JySWVzcmtqcFQ5OVF4SktuWFFhTitTdzdvRlBVSUFOMzhHcWEKV2wvS1BNVHRicWt3eWFjS01CbXExVkx6dldKb0g1Q2l6Skp3aG5rWHh0V0tzLzY3clROblBWTXorbWVHdHZTaQpkcVg2V1NTbUdMRkVFcjJoZ1VjQVpqazNWdVFoLzc1aFh1K1UySXRzQys1cXBsaEc3Q1hzb1huS0t5MVhsT0FFCmI4a3IyZFdXRWs2STVZNm5USnpXSWxTVGtXODl4d1hyY3RtTjlzYjlxNFNuaVZsegotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQzlPZ0M4d3h5SXJQem8KR1hzTHBQS3Q3MkRFeDJ3andXOG5YVzJ3V2J5YTNjOTZuMm5TQ0tQQm44NWhicXNoemo1aWhTVEFNRElvZzlGdgpHNlJLV1VQWFQ0S0lrVHYzQ0RIcVhzQXBKbEo0bFN5blF5bzJadjBvK0FmOENMbmdZWkIrcmZ6dFp3eWVEaFZwCndZekJWMjM1eno2KzJxYlZtQ1psdkJ1WGJVcVRsRVlZdnVHbE1HejdwUGZPV0tVcGVZb2RiRzJmYitoRkZwZW8KQ3hrVVhyUXNPb1I1SmRIRzVqV3JadUJPNDU1U3NyekJMOFJsZTVVSG8wNVdjSzdiQmJpUXoxMDZwSENKWVorcAp2bFBJY05TWDVLaDM0Rmc5NlVQeDlsUWlBM3pEVEtCZnlXY0xRK3ExY1psTGNXZGdSRmNOQmlyR0IvN3JhMVZUCmdFSmVHa1B6QWdNQkFBRUNnZ0VBQmFsN3BpcE1hMGFKMXNRVWEzZkhEeTlQZlBQZXAzODlQVGROZGU1cGQxVFYKeFh5SnBSQS9IaWNTL05WYjU0b05VZE5jRXlnZUNCcFJwUHAxd3dmQ3dPbVBKVmo3SzF3aWFqbmxsQldpZUJzMgpsOWFwcDdFVE9DdWJ5WTNWU2dLQldWa0piVzBjOG9uSFdEL0RYM0duUjhkTXdGYzRrTUdadkllUlo4bU1acmdHCjZPdDNKOHI2eVZsZWI2OGF1WmtneXMwR2VGc3pNdVRubHJCOEw5djI1UUtjVGtESjIvRWx1Y1p5aER0eGF0OEIKTzZOUnNubmNyOHhwUVdPci9sV3M5VVFuZEdCdHFzbXMrdGNUN1ZUNU9UanQ4WHY5NVhNSHB5Z29pTHk3czhvYwpJMGprNDJabzRKZW5JT3c2Rm0weUFEZ0E3eWlXcks0bEkzWGhqaTVSb1FLQmdRRGRqaWNkTUpYVUZWc28rNTJkCkUwT2EwcEpVMFNSaC9JQmdvRzdNakhrVWxiaXlpR1pNanA5MEo5VHFaL1ErM1pWZVdqMmxPSWF0OG5nUzB6MDAKVzA3T1ZxYXprMVNYaFZlY2tGNWFEcm5PRDNhU2VWMSthV3JUdDFXRWdqOVFxYnJZYVA5emd4UkpkRzV3WENCUApGNDNFeXE5ZEhXOWF6SSt3UHlJQ0JqNnZBd0tCZ1FEYXBTelhPR2ViMi9SMWhlWXdWV240czNGZEtYVjgzemtTCnFSWDd6d1pLdkk5OGMybDU1Y1ZNUzBoTGM0bTVPMXZCaUd5SG80eTB2SVAvR0k0Rzl4T1FhMXdpVnNmUVBiSU4KLzJPSDFnNXJLSFdCWVJUaHZGcERqdHJRU2xyRHVjWUNSRExCd1hUcDFrbVBkL09mY2FybG42MjZEamthZllieAp3dWUydlhCTVVRS0JnQm4vTmlPOHNiZ0RFWUZMbFFEN1k3RmxCL3FmMTg4UG05aTZ1b1dSN2hzMlBrZmtyV3hLClIvZVBQUEtNWkNLRVNhU2FuaVVtN3RhMlh0U0dxT1hkMk85cFI0Skd4V1JLSnkrZDJSUmtLZlU5NTBIa3I4M0gKZk50KzVhLzR3SWtzZ1ZvblorSWIvV05wSUJSYkd3ZHMwaHZIVkxCdVpjU1h3RHlFQysrRTRCSVZBb0dCQUoxUQp6eXlqWnRqYnI4NkhZeEpQd29teEF0WVhLSE9LWVJRdUdLVXZWY1djV2xrZTZUdE51V0dsb1FTNHd0VkdBa1VECmxhTWFaL2o2MHJaT3dwSDhZRlUvQ2ZHakl1MlFGbmEvMUtzOXR1NGZGRHpjenh1RVhDWFR1Vmk0eHdtZ3R2bVcKZkRhd3JTQTZrSDdydlp4eE9wY3hCdHloc3pCK05RUHFTckpQSjJlaEFvR0FkdFJKam9vU0lpYURVU25lZUcyZgpUTml1T01uazJkeFV3RVF2S1E4eWNuUnpyN0QwaEtZVWIycThHKzE2bThQUjNCcFMzZDFLbkpMVnI3TUhaWHpSCitzZHNaWGtTMWVEcEZhV0RFREFEWWI0ckRCb2RBdk8xYm03ZXdTMzhSbk1UaTlhdFZzNVNTODNpZG5HbFZiSmsKYkZKWG0rWWxJNHFkaXowTFdjWGJyREE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
kind: Secret
metadata:
name: test-tlscontext-secret-1
labels:
kat-ambassador-id: hostdouble
type: kubernetes.io/tls
---
apiVersion: v1
data:
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURnRENDQW1pZ0F3SUJBZ0lKQUlIWTY3cFNoZ3NyTUEwR0NTcUdTSWIzRFFFQkN3VUFNRlV4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlEQUpOUVRFUE1BMEdBMVVFQnd3R1FtOXpkRzl1TVFzd0NRWURWUVFLREFKRQpWekViTUJrR0ExVUVBd3dTZEd4ekxXTnZiblJsZUhRdGFHOXpkQzB5TUI0WERURTRNVEV3TVRFME1EUXhObG9YCkRUSTRNVEF5T1RFME1EUXhObG93VlRFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ01BazFCTVE4d0RRWUQKVlFRSERBWkNiM04wYjI0eEN6QUpCZ05WQkFvTUFrUlhNUnN3R1FZRFZRUUREQkowYkhNdFkyOXVkR1Y0ZEMxbwpiM04wTFRJd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUURjQThZdGgvUFdhT0dTCm9ObXZFSFoyNGpRN1BLTitENG93TEhXZWl1UmRtaEEwWU92VTN3cUczVnFZNFpwbFpBVjBQS2xELysyWlNGMTQKejh3MWVGNFFUelphWXh3eTkrd2ZITmtUREVwTWpQOEpNMk9FYnlrVVJ4VVJ2VzQrN0QzMEUyRXo1T1BseG1jMApNWU0vL0pINUVEUWhjaURybFlxZTFTUk1SQUxaZVZta2FBeXU2TkhKVEJ1ajBTSVB1ZExUY2grOTBxK3Jkd255CmZrVDF4M09UYW5iV2pub21FSmU3TXZ5NG12dnFxSUh1NDhTOUM4WmQxQkdWUGJ1OFYvVURyU1dROXpZQ1g0U0cKT2FzbDhDMFhtSDZrZW1oUERsRC9UdjB4dnlINXE1TVVjSGk0bUp0Titnem9iNTREd3pWR0VqZWY1TGVTMVY1RgowVEFQMGQrWEFnTUJBQUdqVXpCUk1CMEdBMVVkRGdRV0JCUWRGMEdRSGRxbHRoZG5RWXFWaXVtRXJsUk9mREFmCkJnTlZIU01FR0RBV2dCUWRGMEdRSGRxbHRoZG5RWXFWaXVtRXJsUk9mREFQQmdOVkhSTUJBZjhFQlRBREFRSC8KTUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFBbUFLYkNsdUhFZS9JRmJ1QWJneDBNenV6aTkwd2xtQVBiOGdtTwpxdmJwMjl1T1ZzVlNtUUFkZFBuZEZhTVhWcDFaaG1UVjVDU1F0ZFgyQ1ZNVyswVzQ3Qy9DT0Jkb1NFUTl5akJmCmlGRGNseG04QU4yUG1hR1FhK3hvT1hnWkxYZXJDaE5LV0JTWlIrWktYTEpTTTlVYUVTbEhmNXVuQkxFcENqK2oKZEJpSXFGY2E3eElGUGtyKzBSRW9BVmMveFBubnNhS2pMMlV5Z0dqUWZGTnhjT042Y3VjYjZMS0pYT1pFSVRiNQpINjhKdWFSQ0tyZWZZK0l5aFFWVk5taWk3dE1wY1UyS2pXNXBrVktxVTNkS0l0RXEyVmtTZHpNVUtqTnhZd3FGCll6YnozNFQ1MENXbm9HbU5SQVdKc0xlVmlPWVUyNmR3YkFXZDlVYitWMDFRam43OAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktrd2dnU2xBZ0VBQW9JQkFRRGNBOFl0aC9QV2FPR1MKb05tdkVIWjI0alE3UEtOK0Q0b3dMSFdlaXVSZG1oQTBZT3ZVM3dxRzNWcVk0WnBsWkFWMFBLbEQvKzJaU0YxNAp6OHcxZUY0UVR6WmFZeHd5OSt3ZkhOa1RERXBNalA4Sk0yT0VieWtVUnhVUnZXNCs3RDMwRTJFejVPUGx4bWMwCk1ZTS8vSkg1RURRaGNpRHJsWXFlMVNSTVJBTFplVm1rYUF5dTZOSEpUQnVqMFNJUHVkTFRjaCs5MHErcmR3bnkKZmtUMXgzT1RhbmJXam5vbUVKZTdNdnk0bXZ2cXFJSHU0OFM5QzhaZDFCR1ZQYnU4Vi9VRHJTV1E5ellDWDRTRwpPYXNsOEMwWG1INmtlbWhQRGxEL1R2MHh2eUg1cTVNVWNIaTRtSnROK2d6b2I1NER3elZHRWplZjVMZVMxVjVGCjBUQVAwZCtYQWdNQkFBRUNnZ0VCQUk2U3I0anYwZForanJhN0gzVnZ3S1RYZnl0bjV6YVlrVjhZWUh3RjIyakEKbm9HaTBSQllIUFU2V2l3NS9oaDRFWVM2anFHdkptUXZYY3NkTldMdEJsK2hSVUtiZVRtYUtWd2NFSnRrV24xeQozUTQwUytnVk5OU2NINDRvYUZuRU0zMklWWFFRZnBKMjJJZ2RFY1dVUVcvWnpUNWpPK3dPTXc4c1plSTZMSEtLCkdoOENsVDkrRGUvdXFqbjNCRnQwelZ3cnFLbllKSU1DSWFrb2lDRmtIcGhVTURFNVkyU1NLaGFGWndxMWtLd0sKdHFvWFpKQnlzYXhnUTFRa21mS1RnRkx5WlpXT01mRzVzb1VrU1RTeURFRzFsYnVYcHpUbTlVSTlKU2lsK01yaAp1LzVTeXBLOHBCSHhBdFg5VXdiTjFiRGw3Sng1SWJyMnNoM0F1UDF4OUpFQ2dZRUE4dGNTM09URXNOUFpQZlptCk9jaUduOW9STTdHVmVGdjMrL05iL3JodHp1L1RQUWJBSzhWZ3FrS0dPazNGN1krY2txS1NTWjFnUkF2SHBsZEIKaTY0Y0daT1dpK01jMWZVcEdVV2sxdnZXbG1nTUlQVjVtbFpvOHowMlNTdXhLZTI1Y2VNb09oenFlay9vRmFtdgoyTmxFeTh0dEhOMUxMS3grZllhMkpGcWVycThDZ1lFQTUvQUxHSXVrU3J0K0dkektJLzV5cjdSREpTVzIzUTJ4CkM5ZklUTUFSL1Q4dzNsWGhyUnRXcmlHL3l0QkVPNXdTMVIwdDkydW1nVkhIRTA5eFFXbzZ0Tm16QVBNb1RSekMKd08yYnJqQktBdUJkQ0RISjZsMlFnOEhPQWovUncrK2x4bEN0VEI2YS8xWEZIZnNHUGhqMEQrWlJiWVZzaE00UgpnSVVmdmpmQ1Y1a0NnWUVBMzdzL2FieHJhdThEaTQ3a0NBQ3o1N3FsZHBiNk92V2d0OFF5MGE5aG0vSmhFQ3lVCkNML0VtNWpHeWhpMWJuV05yNXVRWTdwVzR0cG5pdDJCU2d1VFlBMFYrck8zOFhmNThZcTBvRTFPR3l5cFlBUkoKa09SanRSYUVXVTJqNEJsaGJZZjNtL0xnSk9oUnp3T1RPNXFSUTZHY1dhZVlod1ExVmJrelByTXUxNGtDZ1lCbwp4dEhjWnNqelVidm5wd3hTTWxKUStaZ1RvZlAzN0lWOG1pQk1POEJrclRWQVczKzFtZElRbkFKdWRxTThZb2RICmF3VW03cVNyYXV3SjF5dU1wNWFadUhiYkNQMjl5QzVheFh3OHRtZlk0TTVtTTBmSjdqYW9ydGFId1pqYmNObHMKdTJsdUo2MVJoOGVpZ1pJU1gyZHgvMVB0ckFhWUFCZDcvYWVYWU0wVWtRS0JnUUNVbkFIdmRQUGhIVnJDWU1rTgpOOFBEK0t0YmhPRks2S3MvdlgyUkcyRnFmQkJPQWV3bEo1d0xWeFBLT1RpdytKS2FSeHhYMkcvREZVNzduOEQvCkR5V2RjM2ZCQWQ0a1lJamZVaGRGa1hHNEFMUDZBNVFIZVN4NzNScTFLNWxMVWhPbEZqc3VPZ0NKS28wVlFmRC8KT05paDB6SzN5Wmc3aDVQamZ1TUdGb09OQWc9PQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg==
kind: Secret
metadata:
name: test-tlscontext-secret-2
labels:
kat-ambassador-id: hostdouble
type: kubernetes.io/tls
---
apiVersion: getambassador.io/v2
kind: Host
metadata:
name: host-1
labels:
kat-ambassador-id: hostdouble
spec:
ambassador_id: [ {self.ambassador_id} ]
hostname: tls-context-host-1
acmeProvider:
authority: none
hostname: tls-context-host-1
selector:
matchLabels:
hostname: tls-context-host-1
tlsSecret:
name: test-tlscontext-secret-1
---
apiVersion: getambassador.io/v2
kind: Host
metadata:
name: host-2
labels:
kat-ambassador-id: hostdouble
spec:
ambassador_id: [ {self.ambassador_id} ]
hostname: tls-context-host-2
acmeProvider:
authority: none
hostname: tls-context-host-2
selector:
matchLabels:
hostname: tls-context-host-2
tlsSecret:
name: test-tlscontext-secret-2
---
apiVersion: getambassador.io/v2
kind: Mapping
metadata:
name: host-1-mapping
labels:
hostname: tls-context-host-1
spec:
ambassador_id: [ {self.ambassador_id} ]
host: "tls-context-host-1"
prefix: /target/
service: {self.target1.path.fqdn}
---
apiVersion: getambassador.io/v2
kind: Mapping
metadata:
name: host-2-mapping
labels:
hostname: tls-context-host-2
spec:
ambassador_id: [ {self.ambassador_id} ]
host: "tls-context-host-2"
prefix: /target/
service: {self.target2.path.fqdn}
''') + super().manifests()
def scheme(self) -> str:
return "https"
def queries(self):
# 0
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"),
headers={"Host": "tls-context-host-1"},
insecure=True,
sni=True)
# 1 - Correct host #1
yield Query(self.url("target/"),
headers={"Host": "tls-context-host-1"},
expected=200,
insecure=True,
sni=True)
# 2 - Correct host #2
yield Query(self.url("target/"),
headers={"Host": "tls-context-host-2"},
expected=200,
insecure=True,
sni=True)
# Setting the Host header really shouldn't be necessary here.
yield Query(self.url("target/", scheme="http"),
headers={ "Host": "tls-context-host-1" },
expected=301)
yield Query(self.url("target/", scheme="http"),
headers={ "Host": "tls-context-host-2" },
expected=301)
def check(self):
# XXX Ew. If self.results[0].json is empty, the harness won't convert it to a response.
errors = self.results[0].json or []
num_errors = len(errors)
assert num_errors == 0, "expected 0 errors, got {} -\n{}".format(num_errors, errors)
idx = 0
for result in self.results:
if result.status == 200 and result.query.headers:
host_header = result.query.headers['Host']
tls_common_name = result.tls[0]['Issuer']['CommonName']
assert host_header == tls_common_name, "test %d wanted CN %s, but got %s" % (idx, host_header, tls_common_name)
idx += 1
def requirements(self):
# We're replacing super()'s requirements deliberately here. Without a Host header they can't work.
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-1"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"Host": "tls-context-host-2"}, insecure=True, sni=True))
|
from django.apps import apps
from django.conf import settings
def UserCls():
return apps.get_model('cauth', settings.AUTH_USER_MODEL.split('.')[-1])
|
#!/usr/bin/env python3
import random
from typing import List
class D6s:
@staticmethod
def roll(count: int = 1) -> List[int]:
results = []
for _ in range(count):
results.append(random.randint(1, 6))
return results
|
#
# PySNMP MIB module DNOS-METRO-DOT1AG-PRIVATE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DNOS-METRO-DOT1AG-PRIVATE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:36:55 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
dnOS, = mibBuilder.importSymbols("DELL-REF-MIB", "dnOS")
IANAifType, = mibBuilder.importSymbols("IANAifType-MIB", "IANAifType")
InterfaceIndex, ifIndex = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex", "ifIndex")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Unsigned32, NotificationType, iso, Integer32, Counter64, TimeTicks, ObjectIdentity, ModuleIdentity, Bits, IpAddress, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "NotificationType", "iso", "Integer32", "Counter64", "TimeTicks", "ObjectIdentity", "ModuleIdentity", "Bits", "IpAddress", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Gauge32")
StorageType, RowStatus, RowPointer, TruthValue, DisplayString, TextualConvention, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "StorageType", "RowStatus", "RowPointer", "TruthValue", "DisplayString", "TextualConvention", "MacAddress")
fastPathDot1agPrivateMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45))
fastPathDot1agPrivateMIB.setRevisions(('2011-01-26 00:00', '2008-05-27 00:00',))
if mibBuilder.loadTexts: fastPathDot1agPrivateMIB.setLastUpdated('201101260000Z')
if mibBuilder.loadTexts: fastPathDot1agPrivateMIB.setOrganization('Dell, Inc.')
dot1agGlobalConfigGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 1))
dot1agMipConfigGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 2))
dot1agRMepConfigGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3))
agentDot1agGlobalConfigGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 1, 1))
agentDot1agCfmStatus = MibScalar((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentDot1agCfmStatus.setStatus('current')
agentDot1agCfmArchieveHoldTime = MibScalar((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentDot1agCfmArchieveHoldTime.setStatus('current')
agentDot1agCfmClearRemoteMEPs = MibScalar((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentDot1agCfmClearRemoteMEPs.setStatus('current')
agentDot1agCfmClearTraceRouteCache = MibScalar((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentDot1agCfmClearTraceRouteCache.setStatus('current')
agentDot1agMipConfigGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 2, 1))
agentDot1agMipTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 2, 1, 1), )
if mibBuilder.loadTexts: agentDot1agMipTable.setStatus('current')
agentDot1agMipEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 2, 1, 1, 1), ).setIndexNames((0, "DNOS-METRO-DOT1AG-PRIVATE-MIB", "agentDot1agMipMdIndex"), (0, "DNOS-METRO-DOT1AG-PRIVATE-MIB", "agentDot1agMipIfIndex"))
if mibBuilder.loadTexts: agentDot1agMipEntry.setStatus('current')
agentDot1agMipMdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 2, 1, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: agentDot1agMipMdIndex.setStatus('current')
agentDot1agMipIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 2, 1, 1, 1, 2), InterfaceIndex())
if mibBuilder.loadTexts: agentDot1agMipIfIndex.setStatus('current')
agentDot1agMipMode = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 2, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentDot1agMipMode.setStatus('current')
agentDot1agRMepConfigGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3, 1))
agentDot1agRMepTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3, 1, 1), )
if mibBuilder.loadTexts: agentDot1agRMepTable.setStatus('current')
agentDot1agRMepEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3, 1, 1, 1), ).setIndexNames((0, "DNOS-METRO-DOT1AG-PRIVATE-MIB", "agentDot1agRMepMdIndex"), (0, "DNOS-METRO-DOT1AG-PRIVATE-MIB", "agentDot1agRMepMaIndex"), (0, "DNOS-METRO-DOT1AG-PRIVATE-MIB", "agentDot1agRMepMepIdIndex"), (0, "DNOS-METRO-DOT1AG-PRIVATE-MIB", "agentDot1agRMepIdentifier"))
if mibBuilder.loadTexts: agentDot1agRMepEntry.setStatus('current')
agentDot1agRMepMdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3, 1, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: agentDot1agRMepMdIndex.setStatus('current')
agentDot1agRMepMaIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3, 1, 1, 1, 2), Unsigned32())
if mibBuilder.loadTexts: agentDot1agRMepMaIndex.setStatus('current')
agentDot1agRMepMepIdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3, 1, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 8191), )))
if mibBuilder.loadTexts: agentDot1agRMepMepIdIndex.setStatus('current')
agentDot1agRMepIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3, 1, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 8191), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentDot1agRMepIdentifier.setStatus('current')
agentDot1agRMepIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3, 1, 1, 1, 5), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentDot1agRMepIfIndex.setStatus('current')
agentDot1agRMepMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3, 1, 1, 1, 6), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentDot1agRMepMacAddress.setStatus('current')
agentDot1agRMepRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10895, 5000, 2, 6132, 1, 1, 45, 3, 1, 1, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: agentDot1agRMepRowStatus.setStatus('current')
mibBuilder.exportSymbols("DNOS-METRO-DOT1AG-PRIVATE-MIB", dot1agMipConfigGroup=dot1agMipConfigGroup, agentDot1agGlobalConfigGroup=agentDot1agGlobalConfigGroup, agentDot1agRMepMepIdIndex=agentDot1agRMepMepIdIndex, dot1agRMepConfigGroup=dot1agRMepConfigGroup, agentDot1agRMepRowStatus=agentDot1agRMepRowStatus, fastPathDot1agPrivateMIB=fastPathDot1agPrivateMIB, agentDot1agRMepEntry=agentDot1agRMepEntry, agentDot1agRMepMaIndex=agentDot1agRMepMaIndex, agentDot1agCfmClearTraceRouteCache=agentDot1agCfmClearTraceRouteCache, dot1agGlobalConfigGroup=dot1agGlobalConfigGroup, agentDot1agRMepTable=agentDot1agRMepTable, agentDot1agRMepMdIndex=agentDot1agRMepMdIndex, agentDot1agMipTable=agentDot1agMipTable, agentDot1agMipEntry=agentDot1agMipEntry, agentDot1agRMepMacAddress=agentDot1agRMepMacAddress, agentDot1agMipMdIndex=agentDot1agMipMdIndex, agentDot1agRMepConfigGroup=agentDot1agRMepConfigGroup, agentDot1agMipMode=agentDot1agMipMode, agentDot1agCfmArchieveHoldTime=agentDot1agCfmArchieveHoldTime, agentDot1agRMepIfIndex=agentDot1agRMepIfIndex, agentDot1agMipConfigGroup=agentDot1agMipConfigGroup, agentDot1agMipIfIndex=agentDot1agMipIfIndex, PYSNMP_MODULE_ID=fastPathDot1agPrivateMIB, agentDot1agCfmStatus=agentDot1agCfmStatus, agentDot1agRMepIdentifier=agentDot1agRMepIdentifier, agentDot1agCfmClearRemoteMEPs=agentDot1agCfmClearRemoteMEPs)
|
import numpy as np
from FUNCS import FNS, RK4
from PTSS import PtssJoint as ptssjnt
from PTSS import PtssSpatial as ptssspt
# variable class for Leg Module
class LegVar:
def __init__(self, num_deg):
self.num_deg = num_deg
self.ppc = self.Parietal(num_deg)
self.mtc = self.Motor()
self.bs = self.Brainstem()
self.spc = self.SpinalCord()
self.cbm = self.Cerebellum()
class Parietal:
def __init__(self, num_deg):
self.num_deg = num_deg
self.spt_targ = FNS().foot_init()
self.spt_pres = FNS().foot_init()
self.tpv = 1 / 2 * np.ones((2, 3, 2, 2))
self.ppv = 1 / 2 * np.ones((2, 3, 2, 2))
self.dv_erg = np.zeros((2, 3, 2, 2))
self.trk_prog = -1
self.trk_mode = -1
self.ltm_mode = 0
self.trk_plot = 0
self.ltm_mot_spt = np.zeros((2, 3, num_deg, num_deg, 3, 2))
self.dv_mot_spt = np.zeros((2, 3, 3, 2))
self.sv_mot_spt = FNS().foot_init()
self.sptmp_mot_spt = np.zeros((2, 3, num_deg, num_deg))
self.ltm_spt_mot = np.zeros((2, 3, num_deg, num_deg, num_deg, 2, 2))
self.dv_spt_mot = np.zeros((2, 3, 2, 2))
self.sv_spt_mot = np.zeros((2, 3, 2, 2))
self.sptmp_spt_mot = np.zeros((2, 3, num_deg, num_deg, num_deg))
self.mot_spt_est = np.zeros((2, 3, 3, 2))
self.spt_mot_est = np.zeros((2, 3, 2, 2))
class Motor:
def __init__(self):
self.opv = 1 / 2 * np.ones((2, 3, 2, 2))
self.sfv = 1 / 2 * np.ones((2, 3, 2, 2))
self.ifv = 1 / 2 * np.ones((2, 3, 2, 2))
self.ofpv = np.zeros((2, 3, 2, 2))
ones = np.ones(2)
zeros = np.zeros(2)
one_zero = np.array((ones, zeros))
zero_zero = np.array((zeros, zeros))
self.left = np.array([(one_zero, zero_zero, zero_zero), (zero_zero, zero_zero, zero_zero)])
self.right = np.array([(zero_zero, zero_zero, zero_zero), (one_zero, zero_zero, zero_zero)])
self.speed_mod = 1.0
class Brainstem:
def __init__(self):
self.limb = np.zeros((2))
self.inter = np.zeros((2))
self.coeff = np.array([(0.9, 0.55), (0.45, 0.9)])
class SpinalCord:
def __init__(self):
self.alpha_moto = np.zeros((2, 3, 2, 2))
self.stat_gamma = np.zeros((2, 3, 2, 2))
self.dynm_gamma = np.zeros((2, 3, 2, 2))
self.renshaw = np.zeros((2, 3, 2, 2))
self.ia_int = np.zeros((2, 3, 2, 2))
self.prim_spin = np.zeros((2, 3, 2, 2))
self.seco_spin = np.zeros((2, 3, 2, 2))
self.extra_mus = np.zeros((2, 3, 2, 2))
self.stat_intra = np.zeros((2, 3, 2, 2))
self.dynm_intra = np.zeros((2, 3, 2, 2))
self.equi_leng = np.array([(((8.83, 8.83), (3.74, 3.74)), ((22.65, 22.65), (0, 0)),
((17.03, 17.26), (0, 0))),
(((8.83, 8.83), (3.74, 3.74)), ((22.65, 22.65), (0, 0)),
((17.03, 17.26), (0, 0)))])
self.mus_leng = np.array([(((8.83, 8.83), (3.74, 3.74)), ((22.65, 22.65), (0, 0)),
((17.03, 17.26), (0, 0))),
(((8.83, 8.83), (3.74, 3.74)), ((22.65, 22.65), (0, 0)),
((17.03, 17.26), (0, 0)))])
self.mus_derv = np.zeros((2, 3, 2, 2))
self.mus_inert = 1 * np.ones((2, 3, 2))
self.mus_forc = np.zeros((2, 3, 2, 2))
self.ext_forc = np.zeros((2, 2, 3, 2))
self.mus_insert = np.zeros((2, 3, 2, 2, 3))
self.ang_posn = np.zeros((2, 3, 2))
self.ang_velo = np.zeros((2, 3, 2))
self.ang_plot = np.zeros((2, 3, 2))
class Cerebellum:
def __init__(self):
self.granule = np.zeros((2, 3, 2, 2))
self.golgi = np.zeros((2, 3, 2, 2))
self.purkinje = np.ones((2, 3, 2, 2))
self.olivary = np.zeros((2, 3, 2, 2))
self.climb = np.zeros((2, 3, 2, 2))
self.basket = np.zeros((2, 3, 2, 2))
self.nuclear = np.zeros((2, 3, 2, 2))
self.rubral = np.zeros((2, 3, 2, 2))
self.mtm_purkj = np.ones((2, 3, 2, 2))
# method class for Leg Module
class LegFun:
def __init__(self, LegVar):
self.Leg = LegVar
self.FNS = FNS()
self.RK4 = RK4()
self.ste_size = 0.01
self.num_deg = self.Leg.num_deg
self.ptssjnt = ptssjnt(self.num_deg, self.num_deg)
self.ptssspt = ptssspt(self.num_deg, self.num_deg, self.num_deg)
# model ParietalCortex for learning present representation
def ParietalMot(self):
num = self.num_deg
step = self.ste_size
ptss = self.ptssjnt
FNS = self.FNS
RK4 = self.RK4
spt_pres = self.Leg.ppc.spt_pres
ltm = self.Leg.ppc.ltm_mot_spt
tran_ltm = np.transpose(ltm, (0, 1, 4, 5, 2, 3))
sv_spt = self.Leg.ppc.sv_mot_spt
leg = FNS.thresh_fn(self.Leg.ppc.ppv, 0)
parse = FNS.parse_append(leg, num)
# reset non-dynamic maps
self.Leg.ppc.sptmp_mot_spt = np.zeros((2, 3, num, num))
# sample present representation of limbs
for s in range(2):
for l in range(3):
b_max, a_max = parse[s][l]
bound = ptss.ptssjnt_bound(b_max, a_max, num, num)
for b in bound[0]:
for a in bound[1]:
self.Leg.ppc.ltm_mot_spt[s][l][b][a] = \
RK4.rk4(ltm[s][l][b][a], 0 * ptss.ptssjnt_gradient(b, a, b_max, a_max) *
(-0.1 * ltm[s][l][b][a] + spt_pres[s][l]), step)
self.Leg.ppc.sptmp_mot_spt[s][l][b][a] = ptss.ptssjnt_gradient(b, a, b_max, a_max)
sptmp = self.Leg.ppc.sptmp_mot_spt
pres_est = np.array([[[[np.sum(sptmp[s][l] * tran_ltm[s][l][k][n])
for n in range(2)] for k in range(3)] for l in range(3)] for s in range(2)])
rev_est = np.array([[FNS.rev_mus(pres_est[s][l], 'spt') for l in range(3)] for s in range(2)])
self.Leg.ppc.sv_mot_spt = pres_est / (pres_est + rev_est + 0.01)
# check learning of present representation
self.Leg.ppc.mot_spt_est = sv_spt - spt_pres
# model ParietalCortex for learning motor command
def ParietalSpat(self):
step = self.ste_size
num = self.num_deg
ptts = self.ptssspt
RK4 = self.RK4
FNS = self.FNS
leg = self.Leg.ppc.ppv
opv = FNS.thresh_fn(self.Leg.mtc.opv, 0)
rev_opv = FNS.rev_mus(opv, 'jnt')
net_prim = FNS.thresh_fn(FNS.diff_mus(self.Leg.spc.prim_spin, 'append'), 0)
rev_prim = FNS.rev_mus(net_prim, 'jnt')
spt_targ = self.Leg.ppc.spt_targ
dv_mot = self.Leg.ppc.dv_spt_mot
sv_mot = self.Leg.ppc.sv_spt_mot
dv_spt = self.Leg.ppc.dv_mot_spt
spt_pres = self.Leg.ppc.sv_mot_spt
parse_spt = FNS.parse_targ(dv_spt, num, 'jnt')
ltm_mot = self.Leg.ppc.ltm_spt_mot
tran_ltm_mot = np.transpose(ltm_mot, (0, 1, 5, 6, 2, 3, 4))
targ = self.Leg.ppc.tpv
pres = leg
old_prog = self.Leg.ppc.trk_prog
old_mode = self.Leg.ppc.trk_mode
erg = self.Leg.ppc.dv_erg
# reset non-dynamic maps to zero
self.Leg.ppc.sptmp_spt_mot = np.zeros((2, 3, num, num, num))
# ____________________________________________________________________________________________________________
# sample motor command
"""
self.Leg.ppc.dv_mot_spt = 1 * (spt_targ - spt_pres)
for s in range(2):
for l in range(3):
a_max, b_max, r_max = parse_spt[s][l]
bound = ptts.ptssspt_bound(a_max, b_max, r_max, num, num, num)
for a in bound[0]:
for b in bound[1]:
for r in bound[2]:
self.Leg.ppc.ltm_spt_mot[s][l][a][b][r] = \
RK4.rk4(ltm_mot[s][l][a][b][r], -0 * dv_mot[s][l] *
(-0.0 * ltm_mot[s][l][a][b][r] +
ptts.ptssspt_gradient(a, b, r, a_max, b_max, r_max)), step)
self.Leg.ppc.sptmp_spt_mot[s][l][a][b][r] = \
ptts.ptssspt_gradient(a, b, r, a_max, b_max, r_max)
sptmp = self.Leg.ppc.sptmp_spt_mot
leg_est = np.array([[[[np.sum(sptmp[s][l] * tran_ltm_mot[s][l][m][n])
for n in range(2)] for m in range(2)] for l in range(3)] for s in range(2)])
self.Leg.ppc.dv_spt_mot = 1 * leg_est + targ - leg
# check learning of motor command
self.Leg.ppc.spt_mot_est = leg_est + dv_mot
# for coordination only
self.Leg.ppc.tpv = (1 /2 + -1 * leg_est)
self.Leg.ppc.sv_spt_mot = targ - leg
self.Leg.ppc.ppv = RK4.rk4(leg,
(1 - leg) * (1 * opv + 1 * rev_prim) - leg * (1 * rev_opv + 1 * net_prim), step)
"""
# ____________________________________________________________________________________________________________
# learn limb movement using endogenous random generator
self.Leg.ppc.sv_spt_mot = targ - leg
self.Leg.ppc.ppv = RK4.rk4(leg,
(1 - leg) * (1 * opv + 1 * rev_prim) - leg * (1 * rev_opv + 1 * net_prim), step)
dv_err = (targ - pres) * \
(self.Leg.mtc.left * FNS.delta_fn(old_mode, 0) + self.Leg.mtc.right * FNS.delta_fn(old_mode, 1))
cond = FNS.cond_fn(dv_err, 0.1)
# check stepping phase
if FNS.delta_fn(cond, 1) == 1 and (old_prog == 0 or old_prog == -1):
new_mode = (old_mode + 1) % 2
self.Leg.ppc.trk_mode = new_mode
# check motor program within stepping phase
if FNS.delta_fn(cond, 1) == 1:
new_prog = (old_prog + 1) % 2
self.Leg.ppc.trk_prog = new_prog
if new_prog == 0:
mode = self.Leg.ppc.trk_mode
next_cmd = FNS.rand_prog('step')[mode]
agn_cmd = next_cmd
ant_cmd = FNS.rand_fix(agn_cmd, 'rev')
self.Leg.ppc.dv_erg[mode] = agn_cmd
self.Leg.ppc.dv_erg[(mode + 1) % 2] = ant_cmd
else:
self.Leg.ppc.dv_erg = FNS.rev_mus(erg, 'jnt')
next = self.Leg.ppc.dv_erg
self.Leg.ppc.tpv = 1 / 2 + next
# model MotorCortex
def Motor(self):
FNS = self.FNS
RK4 = self.RK4
step = self.ste_size
prim_spin = FNS.thresh_fn(self.Leg.spc.prim_spin, 0)
rev_spin = FNS.rev_mus(prim_spin, 'jnt')
seco_spin = FNS.thresh_fn(self.Leg.spc.seco_spin, 0)
net_spin = FNS.thresh_fn(prim_spin - seco_spin, 0)
rev_net = FNS.rev_mus(net_spin, 'jnt')
leg = self.Leg.ppc.ppv
rev_leg = FNS.rev_mus(leg, 'jnt')
sv_mot = FNS.thresh_fn(self.Leg.ppc.sv_spt_mot, 0)
rev_sv = FNS.rev_mus(sv_mot, 'jnt')
opv = FNS.thresh_fn(self.Leg.mtc.opv, 0)
ofpv = FNS.thresh_fn(self.Leg.mtc.ofpv, 0)
sfv = FNS.thresh_fn(self.Leg.mtc.sfv, 0)
ifv = FNS.thresh_fn(self.Leg.mtc.ifv, 0)
GO = self.Leg.mtc.speed_mod
nucle = FNS.thresh_fn(self.Leg.cbm.nuclear, 0)
rev_nucle = FNS.rev_mus(nucle, 'jnt')
# compute movement command
self.Leg.mtc.opv = RK4.rk4(opv,
(1 - opv) * (GO * sv_mot + 1 * leg) - opv * (GO * rev_sv + 1 * rev_leg), step)
self.Leg.mtc.sfv = RK4.rk4(sfv, (1 - sfv) * (1 * prim_spin) - sfv * (1 * rev_spin), step)
self.Leg.mtc.ifv = RK4.rk4(ifv, (1 - ifv) * (1 * net_spin + nucle) - ifv * (1 * rev_net + rev_nucle), step)
self.Leg.mtc.ofpv = RK4.rk4(ofpv, -2 * ofpv + (opv + sfv + ifv), step)
# model Brainstem
def Brainstem(self):
step = self.ste_size
FNS = self.FNS
RK4 = self.RK4
mus_spin = FNS.thresh_fn(self.Leg.spc.prim_spin, 0)
spin_input = FNS.extract_spin(mus_spin)
limb = FNS.thresh_fn(self.Leg.bs.limb, 0)
inter = FNS.thresh_fn(self.Leg.bs.inter, 0)
coeff = self.Leg.bs.coeff
cos_input = 0.1
GO = np.max(self.Leg.mtc.speed_mod)
# compute rhythmic output
excit_input = 5 * FNS.sigmoid_fn(limb, 0.5, 2.0) + 0.55 * (1 * spin_input + cos_input * GO)
inhib_input = 3 * np.dot(coeff, FNS.sigmoid_fn(inter, 0.5, 2))
self.Leg.bs.limb = RK4.rk4(limb, -1 * limb + (1.0 - limb) * excit_input - (2 + limb) * inhib_input, step)
self.Leg.bs.inter = RK4.rk4(inter, (1 - inter) * limb - inter, step)
# model SpinalCord for musculoskeletal variables
def SpinalSkel(self):
step = self.ste_size
RK4 = self.RK4
FNS = self.FNS
left_insert, right_insert = self.Leg.spc.mus_insert
ang_velo = self.Leg.spc.ang_velo
ang_posn = self.Leg.spc.ang_posn
old_left_len, old_right_len = self.Leg.spc.mus_leng
equi_leng = self.Leg.spc.equi_leng
mus_forc = self.Leg.spc.mus_forc
diff_forc = FNS.cutoff_fn(FNS.diff_force(mus_forc, 'append'), 0.0)
extra_mus = FNS.thresh_fn(self.Leg.spc.extra_mus, 0)
inert = self.Leg.spc.mus_inert
ext_forc = 1 * self.Leg.spc.ext_forc
diff_ext = FNS.cutoff_fn(ext_forc[0] - ext_forc[1], 0.0)
# compute joint angles
self.Leg.spc.ang_velo = RK4.rk4(ang_velo, (1 / inert) * (diff_forc + 1 * diff_ext - 5 * ang_velo), step)
self.Leg.spc.ang_posn = RK4.rk4(ang_posn, 1 * ang_velo, step)
self.Leg.spc.ang_plot = np.array([FNS.angle_bound(FNS.bound_fn(1 * ang_posn[s], 1), 'leg') for s in range(2)])
new_left_pos = left_insert
new_left_len = FNS.mus_len(new_left_pos, 'append')
new_left_derv = FNS.mus_derv(old_left_len, new_left_len, step, 'append')
self.Leg.spc.mus_leng[0] = new_left_len
self.Leg.spc.mus_derv[0] = new_left_derv
new_right_pos = right_insert
new_right_len = FNS.mus_len(new_right_pos, 'append')
new_right_derv = FNS.mus_derv(old_right_len, new_right_len, step, 'append')
self.Leg.spc.mus_leng[1] = new_right_len
self.Leg.spc.mus_derv[1] = new_right_derv
new_len = np.array((new_left_len, new_right_len))
self.Leg.spc.mus_forc = FNS.thresh_fn(extra_mus + FNS.leg_mus(new_len - equi_leng), 0)
# ____________________________________________________________________________________________________________
# update drawing mode based on state of change of muscle length in response to command
mus_derv = self.Leg.spc.mus_derv
derv_input = 1 * FNS.extract_spin(mus_derv)
self.Leg.ppc.trk_plot = int(1 * np.heaviside(derv_input[0] - derv_input[1], 0) +
0 * np.heaviside(derv_input[1] - derv_input[0], 0))
# model SpinalCord for neural variables
def SpinalCore(self):
"""
# for option "locomotion with subcortical cpg" in PAN03.py
FNS = self.FNS
cpg = FNS.thresh_fn(self.Leg.bs.limb, 0)
rythm_input = 5 * FNS.extract_rythm(cpg)
ofpv = 0 * FNS.thresh_fn(self.Leg.mtc.ofpv, 0)
"""
# ____________________________________________________________________________________________________________
"""
# for option "locomotion with cortical cpg" in PAN03.py
FNS = self.FNS
cpg = FNS.thresh_fn(self.Leg.bs.limb, 0)
rythm_input = 0 * FNS.extract_rythm(cpg)
ofpv = 1.0 * FNS.thresh_fn(self.Leg.mtc.ofpv, 0)
"""
# ____________________________________________________________________________________________________________
# for option "locomotion with cortical and subcortical cpgs" in PAN03.py
FNS = self.FNS
cpg = FNS.thresh_fn(self.Leg.bs.limb, 0)
rythm_input = 1.2 * FNS.extract_rythm(cpg)
ofpv = 1.2 * FNS.thresh_fn(self.Leg.mtc.ofpv, 0)
# ____________________________________________________________________________________________________________
"""
# for PAN05.py
FNS = self.FNS
cpg = FNS.thresh_fn(self.Leg.bs.limb, 0)
rythm_input = 1.3 * FNS.extract_rythm(cpg)
ofpv = 1.3 * FNS.thresh_fn(self.Leg.mtc.ofpv, 0)
"""
# ____________________________________________________________________________________________________________
step = self.ste_size
FNS = self.FNS
RK4 = self.RK4
GO = self.Leg.mtc.speed_mod
sv_mot = FNS.thresh_fn(self.Leg.ppc.sv_spt_mot, 0)
rev_sv = FNS.rev_mus(sv_mot, 'jnt')
opv = self.Leg.mtc.opv
ia_int = FNS.thresh_fn(self.Leg.spc.ia_int, 0)
rev_int = FNS.rev_mus(ia_int, 'jnt')
alpha_moto = FNS.thresh_fn(self.Leg.spc.alpha_moto, 0)
renshaw = FNS.thresh_fn(self.Leg.spc.renshaw, 0)
rev_renshaw = FNS.rev_mus(renshaw, 'jnt')
prim_spin = FNS.thresh_fn(self.Leg.spc.prim_spin, 0)
seco_spin = FNS.thresh_fn(self.Leg.spc.seco_spin, 0)
rubral = FNS.thresh_fn(self.Leg.cbm.rubral, 0)
mus_leng = self.Leg.spc.mus_leng
mus_derv = self.Leg.spc.mus_derv
mus_forc = FNS.thresh_fn(self.Leg.spc.mus_forc, 0)
stat_gamma = FNS.thresh_fn(self.Leg.spc.stat_gamma, 0)
dynm_gamma = FNS.thresh_fn(self.Leg.spc.dynm_gamma, 0)
stat_intra = FNS.thresh_fn(self.Leg.spc.stat_intra, 0)
dynm_intra = FNS.thresh_fn(self.Leg.spc.dynm_intra, 0)
equi_leng = self.Leg.spc.equi_leng
stat_input = FNS.thresh_fn(stat_intra + FNS.leg_mus(mus_leng - equi_leng), 0)
dynm_input = FNS.thresh_fn(FNS.diff_mus(dynm_intra, 'append') + FNS.leg_mus(mus_derv), 0)
extra_mus = FNS.thresh_fn(self.Leg.spc.extra_mus, 0)
jnt_fdbk = 1 * FNS.thresh_fn(FNS.jnt_recept(self.Leg.spc.ang_plot, 'leg'), 0)
rev_jnt = FNS.rev_mus(jnt_fdbk, 'jnt')
big_size = 1 + 5 * (ofpv + 1 * rythm_input)
med_size = 0.1 + 0.5 * alpha_moto
sma_size = 0.01 + 0.05 * (ofpv + 1 * rythm_input)
# process movement command
self.Leg.spc.ia_int = \
RK4.rk4(ia_int, (10 - ia_int) * (ofpv + prim_spin) -
ia_int * (1 + renshaw + rev_int), step)
self.Leg.spc.alpha_moto = \
RK4.rk4(alpha_moto, (5 * big_size - alpha_moto) * (ofpv + rubral + prim_spin + rev_jnt + 1 * rythm_input) -
(alpha_moto + 1) * (0.5 + renshaw + rev_int), step)
self.Leg.spc.renshaw = \
RK4.rk4(renshaw, (5 * big_size - renshaw) * (med_size * alpha_moto) -
renshaw * (1 + rev_renshaw + 5 * rubral), step)
self.Leg.spc.extra_mus = \
RK4.rk4(extra_mus, (big_size - extra_mus) * (sma_size * alpha_moto) -
sma_size * extra_mus - mus_forc, step)
self.Leg.spc.stat_gamma = \
RK4.rk4(stat_gamma, (2 - stat_gamma) * (1 * opv) -
(1 + stat_gamma) * (0.1 + 0.2 * FNS.sigmoid_fn(renshaw, 0.2, 1)), step)
self.Leg.spc.stat_intra = RK4.rk4(stat_intra, -1 * stat_intra + (2 - stat_intra) * stat_gamma, step)
self.Leg.spc.dynm_gamma = \
RK4.rk4(dynm_gamma, (5 - dynm_gamma) * (1 * GO * sv_mot) -
(2 + dynm_gamma) * (0.1 + GO * rev_sv + 0.5 * FNS.sigmoid_fn(renshaw, 0.2, 1)), step)
self.Leg.spc.dynm_intra = RK4.rk4(dynm_intra, -5 * dynm_intra + (2 - dynm_intra) * dynm_gamma, step)
self.Leg.spc.prim_spin = RK4.rk4(prim_spin, -2 * prim_spin + (1 - prim_spin) * (stat_input + dynm_input), step)
self.Leg.spc.seco_spin = RK4.rk4(seco_spin, -2 * seco_spin + (1 - seco_spin) * stat_input, step)
# model Cerebellum
def Cerebellum(self):
step = self.ste_size
FNS = self.FNS
RK4 = self.RK4
GO = self.Leg.mtc.speed_mod
sv_mot = FNS.thresh_fn(self.Leg.ppc.sv_spt_mot, 0)
granule = FNS.thresh_fn(self.Leg.cbm.granule, 0)
golgi = FNS.thresh_fn(self.Leg.cbm.golgi, 0)
basket = FNS.thresh_fn(self.Leg.cbm.basket, 0)
prim_spin = FNS.thresh_fn(self.Leg.spc.prim_spin, 0)
seco_spin = FNS.thresh_fn(self.Leg.spc.seco_spin, 0)
net_spin = FNS.thresh_fn(prim_spin - seco_spin, 0)
climb = FNS.thresh_fn(self.Leg.cbm.climb, 0)
olive = FNS.thresh_fn(self.Leg.cbm.olivary, 0)
purkj = FNS.thresh_fn(self.Leg.cbm.purkinje, 0)
rev_purkj = FNS.rev_mus(purkj, 'jnt')
nuclear = FNS.thresh_fn(self.Leg.cbm.nuclear, 0)
rubral = FNS.thresh_fn(self.Leg.cbm.rubral, 0)
mtm = self.Leg.cbm.mtm_purkj
# compute adaptive gains for dynamic force
self.Leg.cbm.granule = \
RK4.rk4(granule, -2 * granule + (1 - granule) * (0.1 + 1 * GO * sv_mot) - (0.5 + granule) * golgi, step)
self.Leg.cbm.golgi = RK4.rk4(golgi, -1 * golgi + (2 - golgi) * (1 * GO * sv_mot * granule), step)
self.Leg.cbm.basket = RK4.rk4(basket, -1 * basket + (2 - basket) * granule, step)
self.Leg.cbm.mtm_purkj = RK4.rk4(mtm, 0.01 * granule * ((1 - mtm) - 10 * climb * mtm), step)
self.Leg.cbm.purkinje = \
RK4.rk4(purkj, -2 * purkj +
(1 - purkj) * (10 * granule * mtm + climb + FNS.sigmoid_fn(purkj, 0.2, 2) + 0.5) -
(0.5 + purkj) * (0.5 * rev_purkj + basket), step)
self.Leg.cbm.climb = \
RK4.rk4(climb, -climb + (1 - climb) * (10 * climb + net_spin) - (0.5 + climb) * (10 * olive), step)
self.Leg.cbm.olivary = RK4.rk4(olive, -0.1 * olive + climb, step)
self.Leg.cbm.nuclear = \
RK4.rk4(nuclear, -2 * nuclear + (1 - nuclear) * (0.1 + 10 * net_spin) - (0.5 + nuclear) * 2 * purkj, step)
self.Leg.cbm.rubral = RK4.rk4(rubral, -0.1 * rubral + nuclear, step)
|
#import spacy
#nlp = spacy.load('en')
def spacy_extract(text_string):
out = dict()
doc = nlp(text_string)
for ent in doc.ents:
if ent.label_ not in out:
out[ent.label_] = list()
out[ent.label_].append(ent.text)
return out
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-17 10:50
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('events', '0013_event_parent'),
]
operations = [
migrations.AlterField(
model_name='event',
name='body',
field=wagtail.core.fields.RichTextField(blank=True, default=''),
),
migrations.AlterField(
model_name='event',
name='embargo_until',
field=models.DateTimeField(blank=True, null=True),
),
]
|
#!/usr/bin/env python
from setuptools import setup, find_packages
try:
import pypandoc
try:
pypandoc.convert_file("README.md", "rst", outputfile="README.rst")
except (IOError, ImportError, RuntimeError):
pass
long_description = pypandoc.convert_file("README.md", "rst")
except (IOError, ImportError, RuntimeError):
long_description = ""
setup(name="UCCA",
version="1.0.11",
install_requires=["spacy", "requests"],
extras_require={"visualize": ["matplotlib", "networkx"]},
description="Universal Conceptual Cognitive Annotation",
long_description=long_description,
author="Daniel Hershcovich",
author_email="danielh@cs.huji.ac.il",
url="https://github.com/huji-nlp/ucca",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3.6",
"Topic :: Text Processing :: Linguistic",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
],
packages=find_packages(),
)
|
from django.test import TestCase
from dwitter.templatetags.insert_magic_links import insert_magic_links
class DweetTestCase(TestCase):
def test_insert_magic_links_bypasses_html(self):
self.assertEqual(
'prefix <h1>content</h1> suffix',
insert_magic_links('prefix <h1>content</h1> suffix')
)
# user
def test_insert_magic_links_replaces_user_with_valid_characters(self):
self.assertEqual(
'<a href="/u/a1_.@+-">u/a1_.@+-</a>',
insert_magic_links('u/a1_.@+-')
)
def test_insert_magic_links_bypasses_user_with_invalid_characters(self):
self.assertEqual(
'u/a1$',
'u/a1$'
)
def test_insert_magic_links_replaces_standalone_user(self):
self.assertEqual(
'<a href="/u/a">u/a</a>',
insert_magic_links('u/a')
)
def test_insert_magic_links_replaces_user_at_start_of_string(self):
self.assertEqual(
'<a href="/u/a">u/a</a> suffix',
insert_magic_links('u/a suffix')
)
def test_insert_magic_links_replaces_user_at_end_of_string(self):
self.assertEqual(
'prefix <a href="/u/a">u/a</a>',
insert_magic_links('prefix u/a')
)
def test_insert_magic_links_replaces_user_at_middle_of_string(self):
self.assertEqual(
'prefix <a href="/u/a">u/a</a> suffix',
insert_magic_links('prefix u/a suffix')
)
def test_insert_magic_links_bypasses_user_prefixed_by_non_space(self):
self.assertEqual(
'prefixu/a suffix',
insert_magic_links('prefixu/a suffix')
)
def test_insert_magic_links_bypasses_user_suffixed_by_non_space(self):
self.assertEqual(
'prefix u/a/suffix',
insert_magic_links('prefix u/a/suffix')
)
def test_insert_magic_links_replaces_user_suffixed_by_slash(self):
self.assertEqual(
'prefix <a href="/u/a">u/a</a> prefix/u/a',
insert_magic_links('prefix /u/a prefix/u/a')
)
def test_insert_magic_links_replaces_user_inside_parenthases(self):
self.assertEqual(
'(<a href="/u/a">u/a</a>)',
insert_magic_links('(u/a)')
)
# dweet
def test_insert_magic_links_replaces_dweet_with_valid_characters(self):
self.assertEqual(
'<a href="/d/1234567890">d/1234567890</a>',
insert_magic_links('d/1234567890')
)
def test_insert_magic_links_bypasses_dweet_with_invalid_characters(self):
self.assertEqual(
'd/1a',
'd/1a'
)
def test_insert_magic_links_replaces_standalone_dweet(self):
self.assertEqual(
'<a href="/d/1">d/1</a>',
insert_magic_links('d/1')
)
def test_insert_magic_links_replaces_dweet_at_start_of_string(self):
self.assertEqual(
'<a href="/d/1">d/1</a> suffix',
insert_magic_links('d/1 suffix')
)
def test_insert_magic_links_replaces_dweet_at_end_of_string(self):
self.assertEqual(
'prefix <a href="/d/1">d/1</a>',
insert_magic_links('prefix d/1')
)
def test_insert_magic_links_replaces_dweet_at_middle_of_string(self):
self.assertEqual(
'prefix <a href="/d/1">d/1</a> suffix',
insert_magic_links('prefix d/1 suffix')
)
def test_insert_magic_links_bypasses_dweet_prefixed_by_non_space(self):
self.assertEqual(
'prefixd/1 suffix',
insert_magic_links('prefixd/1 suffix')
)
def test_insert_magic_links_bypasses_dweet_suffixed_by_non_space(self):
self.assertEqual(
'prefix d/1/suffix',
insert_magic_links('prefix d/1/suffix')
)
def test_insert_magic_links_replaces_dweet_suffixed_by_slash(self):
self.assertEqual(
'prefix <a href="/d/1">d/1</a> prefix/d/1',
insert_magic_links('prefix /d/1 prefix/d/1')
)
def test_insert_magic_links_replaces_dweet_in_parenthases(self):
self.assertEqual(
'(<a href="/d/1">d/1</a>)',
insert_magic_links('(d/1)')
)
# mixed
def test_insert_magic_links_mixed(self):
self.assertEqual(
'<a href="/u/john">u/john</a> remixed '
'<a href="/d/123">d/123</a> by '
'<a href="/u/jane">u/jane</a>',
insert_magic_links('u/john remixed d/123 by /u/jane')
)
|
# Створити змінну n ( Де n - кількість елементів ) і пустий массив arr = [ ] .
# Ввести з клавіатури числа і добавити в массив число в квадраті .
# В кінці надрукувати сам массив тобто print( arr ) .
# 5 ** 2, pow(3, 2), 4 * 4
print("Enter number of elements in array")
n = int(input())
arr = [0 for x in range(n)]
for x in range(n):
print("Enter element ",x+1,": ")
arr[x] = pow(float(input()),2)
print(arr)
# arr = [0 for x in range(n)]
# print("Enter element 1")
# arr[0] = float(input())
# print("Enter element 2")
# arr[1] = float(input())
# print("Enter element 3")
# arr[2] = float(input())
# print("Enter element 4")
# arr[3] = float(input())
# print("Enter element 5")
# arr[4] = float(input())
# arr = []
# print("Enter element 1")
# x1 = float(input())
# arr.append(x1)
# print("Enter element 2")
# x2 = float(input())
# arr.append(x2)
# print("Enter element 3")
# x3 = float(input())
# arr.append(x3)
# print("Enter element 4")
# x4 = float(input())
# arr.append(x4)
# print("Enter element 5")
# x5 = float(input())
# arr.append(x5)
# print(arr)
|
import os
import re
from distutils.core import setup
tests_require = [
'pytest',
'pytest-runner',
'python-coveralls',
'pytest-pep8'
]
install_requires = [
'pathlib'
]
def sanitize_string(str):
str = str.replace('\"', '')
str = str.replace("\'", '')
return str
def parse_version_file():
"""Parse the __version__.py file"""
here = os.path.abspath(os.path.dirname(__file__))
ver_dict = {}
with open(os.path.join(here, 'opath', '__version__.py'), 'r') as f:
for line in f.readlines():
m = re.match('__(\w+)__\s*=\s*(.+)', line)
if m:
ver_dict[m.group(1)] = sanitize_string(m.group(2))
return ver_dict
def read(fname):
"""Read a file"""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
ver = parse_version_file()
# setup
setup(
name=ver['title'],
version=ver['version'],
packages=[ver['package']],
url='https://github.com/jvrana/opath',
license=ver['license'],
author=ver['author'],
author_email='justin.vrana@gmail.com',
keywords='directory python tree path',
description='intuitive python directory tree management for all',
long_description=read("README"),
install_requires=install_requires,
python_requires='>=3.3',
tests_require=tests_require,
)
|
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
## Model to represent a student
class Stewdent(models.Model):
STATES = (
('QLD', 'QLD'),
('NSW', 'NSW'),
('VIC', 'VIC'),
('TAS', 'TAS'),
('ACT', 'ACT'),
('SA', 'SA'),
('WA', 'WA'),
('NT', 'NT'),
)
UNI = (
('Australian Catholic University', 'Australian Catholic University'),
('Australian National University', 'Australian National University'),
('Bond University', 'Bond University'),
('Central Queensland University', 'Central Queensland University'),
('Charles Darwin University', 'Charles Darwin University'),
('Charles Sturt University', 'Charles Sturt University'),
('Curtin University', 'Curtin University'),
('Deakin University', 'Deakin University'),
('Edith Cowan University', 'Edith Cowan University'),
('Federation University', 'Federation University'),
('Flinders University', 'Flinders University'),
('Griffith University', 'Griffith University'),
('James Cook University', 'James Cook University'),
('La Trobe University', 'La Trobe University'),
('Macquarie University', 'Macquarie University'),
('Monash University', 'Monash University'),
('Murdoch University', 'Murdoch University'),
('Queensland University of Technology', 'Queensland University of Technology'),
('RMIT University', 'RMIT University'),
('Southern Cross University', 'Southern Cross University'),
('Swinburne University of Technology', 'Swinburne University of Technology'),
('University of Adelaide', 'University of Adelaide'),
('University of Canberra', 'University of Canberra'),
('University of Melbourne', 'University of Melbourne'),
('University of New England', 'University of New England'),
('University of New South Wales', 'University of New South Wales'),
('University of Newcastle', 'University of Newcastle'),
('University of Notre Dame', 'University of Notre Dame'),
('University of Queensland', 'University of Queensland'),
('University of South Australia', 'University of South Australia'),
('University of Southern Queensland', 'University of Southern Queensland'),
('University of Sydney', 'University of Sydney'),
('University of Tasmania', 'University of Tasmania'),
('University of Technology Sydney', 'University of Technology Sydney'),
('University of the Sunshine Coast', 'University of the Sunshine Coast'),
('University of Western Australia', 'University of Western Australia'),
('University of Western Sydney', 'University of Western Sydney'),
('University of Wollongong', 'University of Wollongong'),
('Victoria University', 'Victoria University'),
)
GENDERS = (
('Male', 'Male'),
('Female', 'Female'),
('Not specified', 'Not specified')
)
user = models.OneToOneField(User, null=True)
created = models.DateTimeField(auto_now=True)
first_name = models.CharField(max_length=100, blank=False)
last_name = models.CharField(max_length=100, blank=False)
gender = models.CharField(max_length=30, choices=GENDERS, null=True)
dob = models.DateField(blank=False)
university = models.CharField(max_length=300, choices=UNI, blank=False)
student_num = models.CharField(max_length=50, blank=True, null=True)
degree = models.CharField(max_length=500, blank=True, null=True)
# start_year = models.DateField(blank=False)
# end_year = models.DateField(blank=False)
start_year = models.CharField(max_length=4, blank=False)
end_year = models.CharField(max_length=4, blank=False)
occupation = models.CharField(max_length=100, blank=True)
phone_num = models.CharField(max_length=50, blank=True, null=True)
email = models.EmailField(unique=True, blank=False, null=True)
address = models.CharField(max_length=200, blank=True, null=True)
city = models.CharField(max_length=100, blank=False)
state = models.CharField(max_length=3, choices=STATES, blank=False)
post_code = models.CharField(max_length=5, blank=True, null=True)
country = models.CharField(max_length=100, blank=False)
agreed = models.BooleanField(default=True)
def __unicode__(self):
return "%s %s" % (self.first_name, self.last_name)
class Meta:
ordering = ('created', )
class Skill(models.Model):
# stewdent = models.OneToOneField('stewdent')
# software_skills = models.CharField(max_length=3000, blank=False)
# computer_based = models.CharField(max_length=3000, blank=False)
# personal = models.CharField(max_length=3000, blank=True, null=True)
# languages_coding = models.CharField(max_length=3000, blank=True, null=True)
# languages_spoken = models.CharField(max_length=300, blank=False)
# smartphone = models.CharField(max_length=100, blank=True, null=True)
# tablet = models.CharField(max_length=100, blank=True, null=True)
stewdent = models.OneToOneField('stewdent')
creativeDesignSkill = models.TextField(max_length=1000, blank=True, null=True)
creativeDesignSoft = models.TextField(max_length=1000, blank=True, null=True)
techDesignSkill = models.TextField(max_length=1000, blank=True, null=True)
techDesignSoft = models.TextField(max_length=1000, blank=True, null=True)
itSkill = models.TextField(max_length=1000, blank=True, null=True)
itSoft = models.TextField(max_length=1000, blank=True, null=True)
marketSkill = models.TextField(max_length=1000, blank=True, null=True)
marketSoft = models.TextField(max_length=1000, blank=True, null=True)
writingSkill = models.TextField(max_length=1000, blank=True, null=True)
writingSoft = models.TextField(max_length=1000, blank=True, null=True)
mediaSkill = models.TextField(max_length=1000, blank=True, null=True)
mediaSoft = models.TextField(max_length=1000, blank=True, null=True)
financeSkill = models.TextField(max_length=1000, blank=True, null=True)
financeSoft = models.TextField(max_length=1000, blank=True, null=True)
researchSkill = models.TextField(max_length=1000, blank=True, null=True)
researchSoft = models.TextField(max_length=1000, blank=True, null=True)
personalSkill = models.TextField(max_length=1000, blank=True, null=True)
personalSoft = models.TextField(max_length=1000, blank=True, null=True)
otherSkill = models.TextField(max_length=1000, blank=True, null=True)
otherSoft = models.TextField(max_length=1000, blank=True, null=True)
languages_coding = models.TextField(max_length=1000, blank=True, null=True)
languages_spoken = models.TextField(max_length=1000, blank=True, null=True)
class Meta:
ordering = ('stewdent', )
class Work(models.Model):
"""
Represents a work relation from the database. The work table holds
information on the stewdents industry goals and experience.
"""
stewdent = models.OneToOneField('stewdent')
industry_one = models.CharField(max_length=100, null=True, blank=True)
industry_two = models.CharField(max_length=100, null=True, blank=True)
company_one = models.CharField(max_length=100, null=True, blank=True)
company_two = models.CharField(max_length=100, null=True, blank=True)
other_goals = models.CharField(max_length=1000, null=True, blank=True)
exp_industry_one = models.CharField(null=True, blank=True, max_length=100)
exp_company_one = models.CharField(null=True, blank=True, max_length=100)
exp_duration_one = models.CharField(null=True, blank=True, max_length=100)
exp_learning_one = models.CharField(null=True, blank=True, max_length=1000)
exp_industry_two = models.CharField(null=True, blank=True, max_length=100)
exp_company_two = models.CharField(null=True, blank=True, max_length=100)
exp_duration_two = models.CharField(null=True, blank=True, max_length=100)
exp_learning_two = models.CharField(null=True, blank=True, max_length=1000)
exp_industry_three = models.CharField(null=True, blank=True, max_length=100)
exp_company_three = models.CharField(null=True, blank=True, max_length=100)
exp_duration_three = models.CharField(null=True, blank=True, max_length=100)
exp_learning_three = models.CharField(null=True, blank=True, max_length=1000)
|
import requests
from bs4 import BeautifulSoup
URL = "https://www.amazon.in/Wear-Your-Opinion-Printed-T-Shirt/dp/B09BZFM6SY/ref=sr_1_4_sspa?crid=3SRLZDG0M0VNR&dchild=1&keywords=code&qid=1635232842&s=apparel&sprefix=cod%2Cfashion%2C254&sr=1-4-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUEyUkJSMEVXVzZUMzBKJmVuY3J5cHRlZElkPUEwODQxOTMwMzMxVUJVT0ZJRVc5SyZlbmNyeXB0ZWRBZElkPUEwMTc4MzAwMTJIVEJGSjhFODdXQiZ3aWRnZXROYW1lPXNwX2F0ZiZhY3Rpb249Y2xpY2tSZWRpcmVjdCZkb05vdExvZ0NsaWNrPXRydWU="
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
# print(soup)
results = soup.find(id="Blog1")
job_elements = results.find_all("h3", class_="post-title entry-title item-title")
print(job_elements)
|
import random
import discord
from jb2.bot import bot
from jb2.embed import error_embed
def przondling(text):
przondling_factor = 0.3
letter_dict = {
'q': 'qwas',
'w': 'qweasd',
'e': 'wresfd',
'r': 'rtefdg',
't': 'yrtghf',
'y': 'uythg',
'u': 'iuyjkh',
'i': 'ioukjl',
'o': 'ipokl',
'p': 'pol',
'a': 'qasz',
's': 'wsadx',
'd': 'erdfsxc',
'f': 'rtdfgcv',
'g': 'tygfhbv',
'h': 'uyjghnb',
'j': 'uikjhmn',
'k': 'iojklm,',
'l': 'pokl',
'z': 'aszx',
'x': 'sdxzc',
'c': 'xcv',
'v': 'bvc',
'b': 'vbn',
'n': 'bmn',
'm': 'mnjkl'
}
out = ""
for l in list(text):
if l not in letter_dict:
out += l
continue
r = random.uniform(0.0, 1.0)
if r > 0.05:
new_text = l
else:
new_text = ""
while True:
r = random.uniform(0.0, 1.0)
if r < przondling_factor:
r2 = random.choice([1, 2])
char = random.choice(list(letter_dict[l]))
if r2 == 1:
new_text += char
else:
new_text = char + new_text
else:
break
out += new_text
return out
@bot.command()
async def przondlo(ctx, *args):
if len(args) < 1:
emb = error_embed(ctx.author.mention, "Podaj tekst do przondłowania.")
else:
emoji = ':bee:'
answer = przondling(' '.join(args))
text = '{} {}: {}'.format(emoji, ctx.author.mention, answer)
emb = discord.Embed(description=text, color=0xff7777)
await ctx.send(embed=emb) |
import torch
import torchvision
import torchvision.transforms as T
import random
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
import matplotlib.pyplot as plt
from cs231n.image_utils import SQUEEZENET_MEAN, SQUEEZENET_STD
from PIL import Image
def preprocess(img, size=224):
transform = T.Compose([
T.Resize(size),
T.ToTensor(),
T.Normalize(mean=SQUEEZENET_MEAN.tolist(),
std=SQUEEZENET_STD.tolist()),
T.Lambda(lambda x: x[None]),
])
return transform(img)
def deprocess(img, should_rescale=True):
transform = T.Compose([
T.Lambda(lambda x: x[0]),
T.Normalize(mean=[0, 0, 0], std=(1.0 / SQUEEZENET_STD).tolist()),
T.Normalize(mean=(-SQUEEZENET_MEAN).tolist(), std=[1, 1, 1]),
T.Lambda(rescale) if should_rescale else T.Lambda(lambda x: x),
T.ToPILImage(),
])
return transform(img)
def rescale(x):
low, high = x.min(), x.max()
x_rescaled = (x - low) / (high - low)
return x_rescaled
def blur_image(X, sigma=1):
X_np = X.cpu().clone().numpy()
X_np = gaussian_filter1d(X_np, sigma, axis=2)
X_np = gaussian_filter1d(X_np, sigma, axis=3)
X.copy_(torch.Tensor(X_np).type_as(X))
return X
# Download and load the pretrained SqueezeNet model.
model = torchvision.models.squeezenet1_1(pretrained=True)
# We don't want to train the model, so tell PyTorch not to compute gradients
# with respect to model parameters.
for param in model.parameters():
param.requires_grad = False
# you may see warning regarding initialization deprecated, that's fine, please continue to next steps
from cs231n.data_utils import load_imagenet_val
X, y, class_names = load_imagenet_val(num=5)
# plt.figure(figsize=(12, 6))
# for i in range(5):
# plt.subplot(1, 5, i + 1)
# plt.imshow(X[i])
# plt.title(class_names[y[i]])
# plt.axis('off')
# plt.gcf().tight_layout()
# plt.show()
# Example of using gather to select one entry from each row in PyTorch
def gather_example():
N, C = 4, 5
s = torch.randn(N, C)
y = torch.LongTensor([1, 2, 1, 3])
print(s)
print(y)
print(s.gather(1, y.view(-1, 1)).squeeze())
gather_example()
def compute_saliency_maps(X, y, model):
"""
Compute a class saliency map using the model for images X and labels y.
Input:
- X: Input images; Tensor of shape (N, 3, H, W)
- y: Labels for X; LongTensor of shape (N,)
- model: A pretrained CNN that will be used to compute the saliency map.
Returns:
- saliency: A Tensor of shape (N, H, W) giving the saliency maps for the input
images.
"""
# Make sure the model is in "test" mode
model.eval()
# Make input tensor require gradient
X.requires_grad_()
saliency = None
##############################################################################
# TODO: Implement this function. Perform a forward and backward pass through #
# the model to compute the gradient of the correct class score with respect #
# to each input image. You first want to compute the loss over the correct #
# scores (we'll combine losses across a batch by summing), and then compute #
# the gradients with a backward pass. #
##############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
scores = model(X)
correct_scores = scores.gather(1, y.view(-1, 1)).squeeze()
loss = correct_scores.sum()
loss.backward()
saliency = torch.max(X.grad.data.abs(), dim=1)[0]
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
##############################################################################
# END OF YOUR CODE #
##############################################################################
return saliency
def show_saliency_maps(X, y):
# Convert X and y from numpy arrays to Torch Tensors
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)
y_tensor = torch.LongTensor(y)
# Compute saliency maps for images in X
saliency = compute_saliency_maps(X_tensor, y_tensor, model)
# Convert the saliency map from Torch Tensor to numpy array and show images
# and saliency maps together.
saliency = saliency.numpy()
N = X.shape[0]
for i in range(N):
plt.subplot(2, N, i + 1)
plt.imshow(X[i])
plt.axis('off')
plt.title(class_names[y[i]])
plt.subplot(2, N, N + i + 1)
plt.imshow(saliency[i], cmap=plt.cm.hot)
plt.axis('off')
plt.gcf().set_size_inches(12, 5)
plt.show()
# show_saliency_maps(X, y)
def make_fooling_image(X, target_y, model):
"""
Generate a fooling image that is close to X, but that the model classifies
as target_y.
Inputs:
- X: Input image; Tensor of shape (1, 3, 224, 224)
- target_y: An integer in the range [0, 1000)
- model: A pretrained CNN
Returns:
- X_fooling: An image that is close to X, but that is classifed as target_y
by the model.
"""
# Initialize our fooling image to the input image, and make it require gradient
X_fooling = X.clone()
X_fooling = X_fooling.requires_grad_()
learning_rate = 1
##############################################################################
# TODO: Generate a fooling image X_fooling that the model will classify as #
# the class target_y. You should perform gradient ascent on the score of the #
# target class, stopping when the model is fooled. #
# When computing an update step, first normalize the gradient: #
# dX = learning_rate * g / ||g||_2 #
# #
# You should write a training loop. #
# #
# HINT: For most examples, you should be able to generate a fooling image #
# in fewer than 100 iterations of gradient ascent. #
# You can print your progress over iterations to check your algorithm. #
##############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
for _ in range(100):
score = model(X_fooling)
if torch.argmax(score) == target_y:
break
correct_score = score[0, target_y]
correct_score.backward()
dX = learning_rate * (X_fooling.grad / torch.norm(X_fooling.grad, p=2))
X_fooling.data += dX
model.zero_grad()
X_fooling.grad.data.zero_()
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
##############################################################################
# END OF YOUR CODE #
##############################################################################
return X_fooling
idx = 0
target_y = 6
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)
X_fooling = make_fooling_image(X_tensor[idx:idx+1], target_y, model)
scores = model(X_fooling)
# assert target_y == scores.data.max(1)[1][0].item(), 'The model is not fooled!'
X_fooling_np = deprocess(X_fooling.clone())
X_fooling_np = np.asarray(X_fooling_np).astype(np.uint8)
def jitter(X, ox, oy):
"""
Helper function to randomly jitter an image.
Inputs
- X: PyTorch Tensor of shape (N, C, H, W)
- ox, oy: Integers giving number of pixels to jitter along W and H axes
Returns: A new PyTorch Tensor of shape (N, C, H, W)
"""
if ox != 0:
left = X[:, :, :, :-ox]
right = X[:, :, :, -ox:]
X = torch.cat([right, left], dim=3)
if oy != 0:
top = X[:, :, :-oy]
bottom = X[:, :, -oy:]
X = torch.cat([bottom, top], dim=2)
return X
def create_class_visualization(target_y, model, dtype, **kwargs):
"""
Generate an image to maximize the score of target_y under a pretrained model.
Inputs:
- target_y: Integer in the range [0, 1000) giving the index of the class
- model: A pretrained CNN that will be used to generate the image
- dtype: Torch datatype to use for computations
Keyword arguments:
- l2_reg: Strength of L2 regularization on the image
- learning_rate: How big of a step to take
- num_iterations: How many iterations to use
- blur_every: How often to blur the image as an implicit regularizer
- max_jitter: How much to gjitter the image as an implicit regularizer
- show_every: How often to show the intermediate result
"""
model.type(dtype)
l2_reg = kwargs.pop('l2_reg', 1e-3)
learning_rate = kwargs.pop('learning_rate', 25)
num_iterations = kwargs.pop('num_iterations', 100)
blur_every = kwargs.pop('blur_every', 10)
max_jitter = kwargs.pop('max_jitter', 16)
show_every = kwargs.pop('show_every', 25)
# Randomly initialize the image as a PyTorch Tensor, and make it requires gradient.
img = torch.randn(1, 3, 224, 224).mul_(1.0).type(dtype).requires_grad_()
for t in range(num_iterations):
# Randomly jitter the image a bit; this gives slightly nicer results
ox, oy = random.randint(0, max_jitter), random.randint(0, max_jitter)
img.data.copy_(jitter(img.data, ox, oy))
########################################################################
# TODO: Use the model to compute the gradient of the score for the #
# class target_y with respect to the pixels of the image, and make a #
# gradient step on the image using the learning rate. Don't forget the #
# L2 regularization term! #
# Be very careful about the signs of elements in your code. #
########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
score = model(img)
correct_score = score[0, target_y]
not_image_i_star = correct_score - (l2_reg * torch.norm(img, p=2))
# i_star = correct_score - l2_reg * img / torch.norm(img, p=2) # Isn't this the way they say to do it in the equation provided?
not_image_i_star.backward()
dX = learning_rate * img.grad / torch.norm(img.grad, p=2)
img.data += dX
model.zero_grad()
img.grad.data.zero_()
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
########################################################################
# END OF YOUR CODE #
########################################################################
# Undo the random jitter
img.data.copy_(jitter(img.data, -ox, -oy))
# As regularizer, clamp and periodically blur the image
for c in range(3):
lo = float(-SQUEEZENET_MEAN[c] / SQUEEZENET_STD[c])
hi = float((1.0 - SQUEEZENET_MEAN[c]) / SQUEEZENET_STD[c])
img.data[:, c].clamp_(min=lo, max=hi)
if t % blur_every == 0:
blur_image(img.data, sigma=0.5)
# Periodically show the image
if t == 0 or (t + 1) % show_every == 0 or t == num_iterations - 1:
plt.imshow(deprocess(img.data.clone().cpu()))
class_name = class_names[target_y]
plt.title('%s\nIteration %d / %d' % (class_name, t + 1, num_iterations))
plt.gcf().set_size_inches(4, 4)
plt.axis('off')
plt.show()
return deprocess(img.data.cpu())
dtype = torch.FloatTensor
# dtype = torch.cuda.FloatTensor # Uncomment this to use GPU
model.type(dtype)
target_y = 76 # Tarantula
# target_y = 78 # Tick
# target_y = 187 # Yorkshire Terrier
# target_y = 683 # Oboe
# target_y = 366 # Gorilla
# target_y = 604 # Hourglass
out = create_class_visualization(target_y, model, dtype)
# target_y = 78 # Tick
# target_y = 187 # Yorkshire Terrier
# target_y = 683 # Oboe
# target_y = 366 # Gorilla
# target_y = 604 # Hourglass
target_y = np.random.randint(1000)
print(class_names[target_y])
X = create_class_visualization(target_y, model, dtype)
print('stall') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : sim_app
# @Time : 2021/9/1 下午2:45
# @Author : yuanjie
# @WeChat : 313303303
# @Software : PyCharm
# @Description : https://blog.csdn.net/Datawhale/article/details/107053926
# http://cw.hubwiz.com/card/c/streamlit-manual/1/6/50/
import wget
import zipfile
import streamlit as st
from meutils.pipe import *
from meutils.log_utils import logger4wecom
from bertzoo.simbert2vec import Simbert2vec
from gensim.models import KeyedVectors
data_server = 'http://101.34.187.143:8000'
def get_model():
if not Path('chinese_roformer-sim-char-ft_L-6_H-384_A-6.zip').exists():
# magic_cmd(
# f"""
# wget https://raw.githubusercontent.com/Jie-Yuan/AppZoo/master/appzoo/apps_streamlit/simbert/chinese_roformer-sim-char-ft_L-6_H-384_A-6.zip &&
# wget {data_server}/vecs.txt &&
# unzip chinese_roformer-sim-char-ft_L-6_H-384_A-6.zip
# """,
# print_output=True
# )
wget.download(f"{data_server}/vecs.txt")
wget.download(
"https://raw.githubusercontent.com/Jie-Yuan/AppZoo/master/appzoo/apps_streamlit/simbert/chinese_roformer-sim-char-ft_L-6_H-384_A-6.zip")
magic_cmd("""unzip chinese_roformer-sim-char-ft_L-6_H-384_A-6.zip""", print_output=True)
with zipfile.ZipFile("chinese_roformer-sim-char-ft_L-6_H-384_A-6.zip", "r") as zf:
zf.extractall()
s2v = Simbert2vec('chinese_roformer-sim-char-ft_L-6_H-384_A-6')
model = KeyedVectors.load_word2vec_format('vecs.txt', no_header=True)
return s2v, model
s2v, model = get_model()
@lru_cache()
def text2vec(text='年收入'):
return s2v.encoder([text], output_dim=None)[0]
# UI
st.markdown(
"""
# 字段名检索
实现方式:simbert + ann
"""
)
text = st.sidebar.text_input('字段', value="东北证券") # st.text_area('xx', value="小米\n苹果")
topn = st.sidebar.slider('召回数', value=20, min_value=1, max_value=100)
text2score = model.similar_by_vector(text2vec(text), topn=topn)
df = pd.DataFrame(text2score, columns=['text', 'score'])
# if st.checkbox('是否将查询结果发送到企业微信'):
# logger4wecom(text, f"`{dict(text2score)}`")
if st.checkbox('是否输出json', value=True):
st.sidebar.json(dict(text2score))
# st.dataframe(df)
st.table(df)
# pandas plotly_chart
import cufflinks as cf
cf.set_config_file(offline=True)
fig = df.iplot('bar', orientation='v', x='text', y='score', sortbars=True, asFigure=True)
st.plotly_chart(fig, use_container_width=True)
# st.sidebar.plotly_chart(fig)
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Silo(AutotoolsPackage):
"""Silo is a library for reading and writing a wide variety of scientific
data to binary, disk files."""
homepage = "http://wci.llnl.gov/simulation/computer-codes/silo"
url = "https://wci.llnl.gov/content/assets/docs/simulation/computer-codes/silo/silo-4.10.2/silo-4.10.2.tar.gz"
version('4.10.2', sha256='3af87e5f0608a69849c00eb7c73b11f8422fa36903dd14610584506e7f68e638', preferred=True)
version('4.10.2-bsd', sha256='4b901dfc1eb4656e83419a6fde15a2f6c6a31df84edfad7f1dc296e01b20140e',
url="https://wci.llnl.gov/content/assets/docs/simulation/computer-codes/silo/silo-4.10.2/silo-4.10.2-bsd.tar.gz")
version('4.9', sha256='90f3d069963d859c142809cfcb034bc83eb951f61ac02ccb967fc8e8d0409854')
version('4.8', sha256='c430c1d33fcb9bc136a99ad473d535d6763bd1357b704a915ba7b1081d58fb21')
variant('fortran', default=True, description='Enable Fortran support')
variant('shared', default=True, description='Build shared libraries')
variant('silex', default=False,
description='Builds Silex, a GUI for viewing Silo files')
variant('pic', default=True,
description='Produce position-independent code (for shared libs)')
variant('mpi', default=True,
description='Compile with MPI Compatibility')
depends_on('hdf5@:1.10.999', when='@:4.10.2')
depends_on('hdf5~mpi', when='~mpi')
depends_on('mpi', when='+mpi')
depends_on('hdf5+mpi', when='+mpi')
depends_on('qt~framework@4.8:4.9', when='+silex')
depends_on('libx11', when='+silex')
depends_on('readline')
depends_on('zlib')
patch('remove-mpiposix.patch', when='@4.8:4.10.2')
def flag_handler(self, name, flags):
spec = self.spec
if name == 'ldflags':
if spec['hdf5'].satisfies('~shared'):
flags.append('-ldl')
flags.append(spec['readline'].libs.search_flags)
if '+pic' in spec:
if name == 'cflags':
flags.append(self.compiler.cc_pic_flag)
elif name == 'cxxflags':
flags.append(self.compiler.cxx_pic_flag)
elif name == 'fcflags':
flags.append(self.compiler.fc_pic_flag)
return (flags, None, None)
@when('%clang@9:')
def patch(self):
# Clang 9 and later include macro definitions in <math.h> that conflict
# with typedefs DOMAIN and RANGE used in Silo plugins.
# It looks like the upstream fpzip repo has been fixed, but that change
# hasn't yet made it into silo.
# https://github.com/LLNL/fpzip/blob/master/src/pcmap.h
def repl(match):
# Change macro-like uppercase to title-case.
return match.group(1).title()
files_to_filter = [
"src/fpzip/codec.h",
"src/fpzip/pcdecoder.inl",
"src/fpzip/pcencoder.inl",
"src/fpzip/pcmap.h",
"src/fpzip/pcmap.inl",
"src/fpzip/read.cpp",
"src/fpzip/write.cpp",
"src/hzip/hzmap.h",
"src/hzip/hzresidual.h",
]
filter_file(r'\b(DOMAIN|RANGE|UNION)\b', repl, *files_to_filter)
def configure_args(self):
spec = self.spec
config_args = [
'--with-hdf5=%s,%s' % (spec['hdf5'].prefix.include,
spec['hdf5'].prefix.lib),
'--with-zlib=%s,%s' % (spec['zlib'].prefix.include,
spec['zlib'].prefix.lib),
'--enable-install-lite-headers',
'--enable-fortran' if '+fortran' in spec else '--disable-fortran',
'--enable-silex' if '+silex' in spec else '--disable-silex',
'--enable-shared' if '+shared' in spec else '--disable-shared',
]
if '+silex' in spec:
x = spec['libx11']
config_args.extend([
'--with-Qt-dir=' + spec['qt'].prefix,
'--with-Qt-lib=QtGui -lQtCore',
'--x-includes=' + x.prefix.include,
'--x-libraries=' + x.prefix.lib,
])
if '+mpi' in spec:
config_args.append('CC=%s' % spec['mpi'].mpicc)
config_args.append('CXX=%s' % spec['mpi'].mpicxx)
config_args.append('FC=%s' % spec['mpi'].mpifc)
return config_args
|
from plugin.core.database.manager import DatabaseManager
from stash import Stash, ApswArchive
from threading import Lock, Thread
import logging
import time
DEFAULT_SERIALIZER = 'msgpack:///'
log = logging.getLogger(__name__)
class CacheManager(object):
active = {}
_lock = Lock()
_process_interval = 10
_process_running = True
_process_thread = None
@classmethod
def get(cls, key, serializer=DEFAULT_SERIALIZER):
with cls._lock:
if key in cls.active:
return cls.active[key]
return cls.open(
key,
serializer=serializer,
block=False
)
@classmethod
def open(cls, key, serializer=DEFAULT_SERIALIZER, block=True):
if block:
# Open cache in lock
with cls._lock:
return cls.open(
key,
serializer=serializer,
block=False
)
# Construct cache
cls.active[key] = Cache(
key,
serializer=serializer
)
# Ensure process thread has started
cls._start()
# Return cache
log.debug('Opened "%s" cache (serializer: %r)', key, serializer)
return cls.active[key]
@classmethod
def _start(cls):
if cls._process_thread is not None:
return
cls._process_thread = Thread(name='CacheManager._process', target=cls._process)
cls._process_thread.daemon = True
cls._process_thread.start()
@classmethod
def _process(cls):
try:
cls._process_run()
except Exception as ex:
log.error('Exception raised in CacheManager: %s', ex, exc_info=True)
@classmethod
def _process_run(cls):
while cls._process_running:
# Retrieve current time
now = time.time()
# Retrieve active caches
with cls._lock:
caches = cls.active.values()
# Sync caches that have been queued
for cache in caches:
if cache.flush_at is None or cache.flush_at > now:
continue
cache.flush()
time.sleep(cls._process_interval)
class Cache(object):
def __init__(self, key, serializer=DEFAULT_SERIALIZER):
self.key = key
self.stash = self._construct(key, serializer=serializer)
self._flush_at = None
self._flush_lock = Lock()
@property
def flush_at(self):
return self._flush_at
@staticmethod
def _construct(key, serializer=DEFAULT_SERIALIZER):
# Parse `key`
fragments = key.split('.')
if len(fragments) != 2:
raise ValueError('Invalid "key" format')
database, table = tuple(fragments)
# Construct cache
return Stash(
ApswArchive(DatabaseManager.cache(database), table),
'lru:///?capacity=500&compact_threshold=1500',
serializer=serializer,
key_transform=(lambda k: str(k), lambda k: k)
)
def get(self, key, default=None):
return self.stash.get(key, default)
def prime(self, keys=None, force=False):
return self.stash.prime(keys, force)
def __getitem__(self, key):
return self.stash[key]
def __setitem__(self, key, value):
self.stash[key] = value
def flush(self, force=False):
with self._flush_lock:
self._flush(force=force)
def _flush(self, force=False):
if not force and self._flush_at is None:
return
try:
self.stash.flush()
log.debug('Flushed "%s" cache', self.key)
except Exception as ex:
log.error('Unable to flush "%s" cache: %s', self.key, ex, exc_info=True)
finally:
self.flush_clear()
def flush_queue(self, delay=120):
log.debug('Queued flush for "%s" cache in %ss', self.key, delay)
self._flush_at = time.time() + 120
def flush_clear(self):
if self._flush_at is None:
return
log.debug('Cleared flush for "%s" cache', self.key)
self._flush_at = None
|
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torch
from samplers import TripletSampler
from utils import args
class BaseData(Dataset):
def __init__(self, data, neg_samples, sampling_method="triplet"):
self.data = data
self.sampler = TripletSampler(self.data, neg_samples)
self.is_train = data.train
self.data_length = len(data)
self.n_groundtruths = self.groundtruths_per_class()
self.is_triplet = self.sampler.is_triplet
def groundtruths_per_class(self):
n_groundtruths = dict()
for class_id, class_idxs in self.sampler.class_idxs.items():
n_groundtruths[class_id] = len(class_idxs)
return n_groundtruths
def __getitem__(self, idx):
data_items = dict()
anchor, anchor_target = self.data[idx]
data_items["anchor"] = anchor
data_items["anchor_target"] = anchor_target
if self.is_train:
data_items["pos"], data_items["neg"] = self.__getitem_triplet(
idx, anchor_target)
data_items["neg"] = torch.cat(data_items["neg"])
return data_items
def __getitem_triplet(self, idx, anchor_target):
pos_id, neg_ids = self.sampler.sample_data(idx, anchor_target)
pos, _ = self.data[pos_id]
negs = [self.data[neg_id][0] for neg_id in neg_ids]
return pos, negs
def __len__(self):
return self.data_length
def show_image(self, idx):
im = self.data.data[idx]
trans = transforms.ToPILImage()
im = trans(im)
im.show()
def get_train_loader(train_partition):
train_dataset = BaseData(train_partition, args.neg_samples)
train_loader = DataLoader(
train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, shuffle=True)
return train_loader
def get_test_loaders(query_data, gallery_data):
query_dataset = BaseData(query_data, args.neg_samples)
query_loader = DataLoader(
query_dataset, batch_size=args.batch_size, num_workers=args.num_workers)
gallery_dataset = BaseData(
gallery_data, args.neg_samples)
gallery_loader = DataLoader(
gallery_dataset, batch_size=args.batch_size, num_workers=args.num_workers)
return query_loader, gallery_loader
|
from jina import Executor, requests
import rocketqa
class RocketQADualEncoder(Executor):
"""
Calculate the `embedding` of the passages and questions with RocketQA Dual-Encoder models.
"""
def __init__(self, model, use_cuda=False, device_id=0, batch_size=1, *args, **kwargs):
"""
:param model: A model name return by `rocketqa.available_models()` or the path of an user-specified
checkpoint config :param use_cuda: Set to `True` (default: `False`) to use GPU :param device_id: The GPU
device id to load the model. Set to integers starting from 0 to `N`, where `N` is the number of GPUs minus 1.
:param batch_size: the batch size during inference.
"""
super().__init__(*args, **kwargs)
self.encoder = rocketqa.load_model(model=model, use_cuda=use_cuda, device_id=device_id, batch_size=batch_size)
self.b_s = batch_size
@requests(on='/index')
def encode_passage(self, docs, **kwargs):
batch_generator = (
docs.traverse_flat(
traversal_paths='r',
filter_fn=lambda d: d.tags.get('title', None) is not None and d.tags.get('para', None) is not None)
.batch(batch_size=self.b_s))
for batch in batch_generator:
titles, paras = batch.get_attributes('tags__title', 'tags__para')
para_embs = self.encoder.encode_para(para=paras, title=titles)
for doc, emb in zip(batch, para_embs):
doc.embedding = emb.squeeze()
@requests(on='/search')
def encode_question(self, docs, **kwargs):
for doc in docs:
query_emb = self.encoder.encode_query(query=[doc.text])
doc.embedding = query_emb.squeeze()
|
import os
import json
import statistics
import pandas as pd
import numpy as np
class DataSet:
def __init__(self, D, name="default"):
self.D = D
self.n, self.d = self.D.shape
self.name = name
def getResource(self, index):
return self.D.iloc[index, :]
def saveMetaData(self, filepath):
metadata = dict(d=self.d, n=self.n, name=self.name)
string = json.dumps(metadata, indent=2)
with open(filepath, 'w') as f:
f.write(string)
return string
def getMatrix(self):
return self.D.as_matrix()
def convertDtype(l):
try:
return np.array(l, dtype="float")
except:
pass
l = np.array(l, dtype=str)
l[l == 'nan'] = 'NA'
return l
class CSVDataSet(DataSet):
""" A dataset living locally in a .csv file
"""
def __init__(self,
csv_path,
index_column=None,
NA_val=".",
name="mydataset"):
self.name = name
# Load the data set
D = pd.read_csv(csv_path, dtype="unicode")
self.n, self.d = D.shape
print("Dataset of size", self.n, "samples", self.d, "dimensions",
"Loaded")
# Convert to numeric all numeric rows
D = D.replace(NA_val, "nan")
print("Replacing all", NA_val, "with nan")
d = []
for c in D.columns:
d.append(convertDtype(list(D[c])))
print("Converting", c, end="\r\r")
newcolumns = D.columns
newindex = D.index
D = list(d)
D = pd.DataFrame(dict(zip(newcolumns, D)), index=newindex)
# Set the index column as specified
if index_column is not None:
print("Setting index column as", index_column)
D.index = D[index_column]
print("Deleting", index_column, "from dataset")
del D[index_column]
self.D = D
# Remove all columns which have all null values
keep = []
allnull = self.D.isnull().all(axis=0)
for c in self.D.columns[allnull]:
print("Removing column", c, "because it has all null values")
keep = self.D.columns[~allnull]
self.D = self.D[keep]
# Remove all rows which have all null values
allnull = self.D.isnull().all(axis=1)
for r in self.D.index[allnull]:
print("Removing row", r, "because it has all null values")
keep = self.D.index[~allnull]
self.D = self.D.loc[keep]
n, d = self.D.shape
print("Dataset of size", n, "samples", d, "dimensions", "Resulting")
self.N = self.D.shape[0]
def imputeColumns(self, numeric):
keep = []
keep = (self.D.dtypes == "float64").as_matrix()
for c in self.D.columns[~keep]:
print("Removing column", c, "because it is not numeric")
self.D = self.D[self.D.columns[keep]]
cmean = self.D.mean(axis=0)
values = dict(list(zip(self.D.columns, cmean.as_matrix())))
#self.D.fillna(value=values, inplace=True)
d = self.D.as_matrix()
for i, c in enumerate(self.D.columns):
print("Imputing column", c, "with value", values[c])
d[:, i][np.isnan(d[:, i])] = values[c]
D = pd.DataFrame(d)
D.index = self.D.index
D.index.names = self.D.index.names
D.columns = self.D.columns
D.columns.names = self.D.columns.names
self.D = D
allzero = np.all(self.D.as_matrix() == 0, axis=0)
for c in self.D.columns[allzero]:
print("Removing column", c, "because it has all zero values")
keep = self.D.columns[~allzero]
allsame = np.std(self.D.as_matrix(), axis=0) == 0
for c in self.D.columns[allsame]:
print(
"Removing column", c,
"because it has all zero standard deviation (all values same)")
keep = self.D.columns[~allsame]
self.D = self.D[keep]
n, d = self.D.shape
print("Dataset of size", n, "samples", d, "dimensions", "Resulting")
print("Dataset has", self.D.isnull().sum().sum(), "nans")
print("Dataset has", np.sum(np.isinf(self.D.as_matrix())), "infs")
def getResource(self, index):
"""Get a specific data point from the data set.
Parameters
----------
index : int or string
The index of the data point in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
A ndarray of the data point.
"""
if type(index) is int:
return self.D.iloc[index].as_matrix()
else:
return self.D.loc[index].as_matrix()
def getColumn(self, index):
"""Get a column of the dataframe.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
The values in the column.
"""
if type(index) is int:
return self.D.iloc[:, index].as_matrix()
else:
return self.D[index].as_matrix()
def getColumnValues(self, index):
"""Get the unique values of a column.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
A ndarray of the unique values.
"""
column = self.getColumn(index)
if column.dtype == "float64":
column = column[~np.isnan(column)]
else:
column = column[np.array([x != "NA" for x in column])]
return np.unique(column)
def getColumnDistribution(self, index):
"""Get the distribution of values in a column.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`, :obj:`ndarray`
An array x of the unique labels, and an array y of the count of that label
"""
x = self.getColumnValues(index)
column = self.getColumn(index)
y = [np.sum(column == v) for v in x]
return x, y
def getColumnNADist(self, index):
column = self.getColumn(index)
if column.dtype == "float64":
na = np.sum([np.isnan(x) for x in column])
not_na = len(column) - na
return na, not_na
else:
na = np.sum([x == "NA" for x in column])
not_na = len(column) - na
return na, not_na
return na, not_na
def getColumnDescription(self, index, sep="\n"):
"""Get a description of the column.
"""
desc = []
if type(index) is int:
index = self.D.columns.values[index]
for i, name in enumerate(self.D.columns.names):
desc.append(name + ": " + index[i])
return sep.join(desc)
def getLevelValues(self, index):
return np.unique(self.D.columns.get_level_values(index))
|
"""
This package contains the JIRA implementations of the interfaces in
server.git.Interfaces.
"""
from urllib.parse import urljoin
import logging
import os
from oauthlib.oauth1 import SIGNATURE_RSA
from requests_oauthlib import OAuth1
from IGitt.Interfaces import get
from IGitt.Interfaces import Token
from IGitt.Utils import CachedDataMixin
from tests import PRIVATE_KEY
JIRA_INSTANCE_URL = os.environ.get('JIRA_INSTANCE_URL',
'https://jira.atlassian.com')
BASE_URL = urljoin(JIRA_INSTANCE_URL, '/rest/api/2')
JIRA_RSA_PRIVATE_KEY_PATH = os.environ.get('JIRA_RSA_PRIVATE_KEY_PATH')
JIRA_RSA_PRIVATE_KEY = PRIVATE_KEY
try:
JIRA_RSA_PRIVATE_KEY = open(JIRA_RSA_PRIVATE_KEY_PATH, 'r').read().strip()
except (FileNotFoundError, TypeError):
logging.warning('JIRA REST APIs work only with key signing, please '
'include the correct path to your registered RSA private '
'key.')
class JiraMixin(CachedDataMixin):
"""
Base object for all things on Jira.
"""
def _get_data(self):
return get(self._token, self.url)
@staticmethod
def absolute_url(url):
"""
Builds an absolute URL from the base URL and specified url.
"""
return BASE_URL + url
@property
def hoster(self):
"""
Returns `jira`.
"""
return 'jira'
@property
def url(self):
"""
Returns JIRA API url.
"""
return self.absolute_url(self._url)
@property
def web_url(self):
"""
Returns the web link for the corresponding JIRA object.
"""
raise NotImplementedError
def __repr__(self): # dont cover
return '<{} object(url={}) at {}>'.format(self.__class__.__name__,
self.url,
hex(id(self)))
class JiraOAuth1Token(Token):
"""
Object representation of JIRA OAuth v1.0 token.
"""
def __init__(self, client_key, key, secret):
self.client_key = client_key
self.key = key
self.secret = secret
@property
def headers(self):
return {}
@property
def parameter(self):
return {}
@property
def value(self):
return {'client_key': self.client_key,
'oauth_token': self.key,
'oauth_token_secret': self.secret}
@property
def auth(self):
return OAuth1(self.client_key,
rsa_key=JIRA_RSA_PRIVATE_KEY,
resource_owner_key=self.key,
resource_owner_secret=self.secret,
signature_method=SIGNATURE_RSA,
signature_type='auth_header')
|
#!/usr/bin/env python3
"""Rewrap lines without breaking words"""
import sys
import argparse
import textwrap
def main():
parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__)
parser.add_argument('infile', type=argparse.FileType('r', encoding='UTF-8'), help='Input file')
parser.add_argument('outfile', type=argparse.FileType('w', encoding='UTF-8'), help='Output file')
parser.add_argument('width', type=int, help='Output line width')
args = parser.parse_args()
for line in args.infile:
for wrapped in textwrap.wrap(line, args.width):
args.outfile.write(wrapped + '\n')
if __name__ == '__main__':
main()
|
import logging
import tmllc
logging.basicConfig(format='%(asctime)s %(levelname)s: %(name)s(%(funcName)s): %(message)s', level=logging.INFO)
# Transforming the TESS files into big tables that are saved in pickle form
# TODO allow for a function to be applied to the data first!
tmllc.utils.fits2pickle(["planet", "none", "eb", "backeb"], timeColumn="TIME") |
##Write code that uses the string stored in org and creates an acronym which is assigned to the variable acro. Only the first letter of each word should be used, each letter in the acronym should be a capital letter, and there should be nothing to separate the letters of the acronym. Words that should not be included in the acronym are stored in the list stopwords. For example, if org was assigned the string “hello to world” then the resulting acronym should be “HW”.
stopwords = ['to', 'a', 'for', 'by', 'an', 'am', 'the', 'so', 'it', 'and', "The"]
org = "The organization for health, safety, and education"
orglst = org.split()
acro = ""
for word in orglst:
if word not in stopwords:
acro = acro+word[0]
acro = acro.upper() |
import pandas as pd
import numpy as np
import numerical as mynum
def test_filter_constant():
"""Test if function correctly deletes columns"""
testDf = pd.DataFrame(
{"time": np.arange(5), "one": np.ones(5), "zero": np.zeros(5)}
)
resultDf = pd.DataFrame({"time": np.arange(5), "zero": np.zeros(5)})
assert np.all(
mynum.df_filter_constant(testDf, ["zero"], VarThreshold=0) == resultDf
)
def test_autocorrelation():
"""test if the autocorrelation function is working as intended"""
testDf = pd.DataFrame(
{"one": [1, 0, 0], "two": [0, 1, 0], "three": [0, 0, 1], "four": [1, 1, 1]}
)
assert np.all(
mynum.autocorrelation(testDf, ["four"])[-1]
== np.array([1, 0, 0], dtype=complex)
)
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: jar.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='jar.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b(
'\n\tjar.proto\"#\n\x03jar\x12\x0f\n\x07scan_id\x18\x01 \x02(\x05\x12\x0b\n\x03url\x18\x02 \x02(\t')
)
_JAR = _descriptor.Descriptor(
name='jar',
full_name='jar',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scan_id', full_name='jar.scan_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url', full_name='jar.url', index=1,
number=2, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=13,
serialized_end=48,
)
DESCRIPTOR.message_types_by_name['jar'] = _JAR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
jar = _reflection.GeneratedProtocolMessageType('jar', (_message.Message,), dict(
DESCRIPTOR=_JAR,
__module__='jar_pb2'
# @@protoc_insertion_point(class_scope:jar)
))
_sym_db.RegisterMessage(jar)
# @@protoc_insertion_point(module_scope)
|
"""
--- Day 22: Slam Shuffle ---
https://adventofcode.com/2019/day/22
"""
import re
import aocd
DATA = aocd.data.splitlines()
nCards = 10007
techniques = {
re.compile(r"deal into new stack"): lambda p: nCards - 1 - p,
re.compile(r"cut (-?\d+)"): lambda p, n: (p - int(n)) % nCards,
re.compile(r"deal with increment (\d+)"): lambda p, n: (p * int(n)) % nCards,
}
pos = 2019
for shuffle in DATA:
for pattern, func in techniques.items():
if m := re.fullmatch(pattern, shuffle):
pos = func(pos, *m.groups())
print("Part One:", pos)
nCards = 119315717514047
nShuffles = 101741582076661
# First denotes the number of the first card in the deck.
# Increment denotes the difference between to consecutive numbers.
# The number of the card at a position p then corresponds to first + increment * p.
first, increment = 0, 1
def inv(n: int) -> int:
"""Computes the modular inverse of n."""
# Fermat's little theorem states that n^(nCards - 1) ≡ 1 ⇒ 1/n ≡ n^(nCards - 2).
return pow(n, nCards - 2, nCards)
def reverse(first, increment):
increment *= -1 # reverse the order
first += increment # second number must become the fist
return first % nCards, increment % nCards
def cut(first, increment, n):
first += int(n) * increment # move the n-th card to the front
return first % nCards, increment
def spread(first, increment, n):
# The card at the front does not change, the i-th card moves to position i*n.
# This means that the increment (between the first and second card) is modified by i, where i * n ≡ 1.
increment *= inv(int(n))
return first, increment % nCards
techniques = {
re.compile(r"deal into new stack"): reverse,
re.compile(r"cut (-?\d+)"): cut,
re.compile(r"deal with increment (\d+)"): spread,
}
for shuffle in DATA:
for pattern, func in techniques.items():
if m := re.fullmatch(pattern, shuffle):
first, increment = func(first, increment, *m.groups())
# We now have the first and increment after one complete shuffle process.
# Applying another shuffle leads to:
# first' = first + first * increment
# increment' = increment * increment
# Thus, after n shuffles we get:
# n=2: first + first * increment
# n=3: first + first * increment + first * increment^2
# n=4: first + first * increment + first * increment^2 + first * increment^3
# ...
# ∑ first * increment^k = first * (1 - increment^n)/(1 - increment)
#
# and for the increment:
# increment^n
first = (first * (1 - pow(increment, nShuffles, nCards)) * inv(1 - increment)) % nCards
increment = pow(increment, nShuffles, nCards)
card = (first + 2020 * increment) % nCards
print("Part Two:", card)
|
# Importing libraries
import pandas as pd
import numpy as np
import math
import operator
import matplotlib.pyplot as plt
from sklearn import datasets, neighbors
from mlxtend.plotting import plot_decision_regions
#### Start of STEP 1
# Importing data
data = pd.read_csv('artists1.csv')
#Normalization
#cols_to_norm = ['years', 'genre', 'nationality']
#data[cols_to_norm] = data[cols_to_norm].apply(lambda x: (x - x.min()) / (x.max()-x.min()) )
print(data.head(5))
# Defining a function which calculates euclidean distance between two data points
def euclideanDistance(data1, data2, length):
distance = 0
distance += np.square(data1[0] - data2[0])
distance += np.square(data1[1] - data2[1])
distance += (0.5*(np.square(data1[2] - data2[2])))
return np.sqrt(distance)
# Defining a function which calculates manhattan distance between two data points
def manhattanDistance(data1, data2, length):
distance = 0
for x in range(length):
distance += np.abs((data1[x] - data2[x]))
return (distance)
def L3(data1, data2, length):
distance = 0
for x in range(length):
distance += np.square((data1[x] - data2[x])) * (data1[x] - data2[x])
return (distance**(1/3))
# Defining our KNN model
def knn(trainingSet, testInstance, k):
distances = {}
sort = {}
length = testInstance.shape[1]
#### Start of STEP 3
# Calculating euclidean distance between each row of training data and test data
for x in range(len(trainingSet)):
#### Start of STEP 3.1
dist = euclideanDistance(testInstance, trainingSet.iloc[x], length)
distances[x] = dist[0]
#### End of STEP 3.1
#### Start of STEP 3.2
# Sorting them on the basis of distance
sorted_d = sorted(distances.items(), key=operator.itemgetter(1))
#### End of STEP 3.2
neighbors = []
#### Start of STEP 3.3
# Extracting top k neighbors
for x in range(k):
neighbors.append(sorted_d[x][0])
#### End of STEP 3.3
classVotes = {}
#### Start of STEP 3.4
# Calculating the most freq class in the neighbors
for x in range(len(neighbors)):
response = trainingSet.iloc[neighbors[x]][-1]
if response in classVotes:
classVotes[response] += 1
else:
classVotes[response] = 1
#### End of STEP 3.4
#### Start of STEP 3.5
sortedVotes = sorted(classVotes.items(), key=operator.itemgetter(1), reverse=True)
return(sortedVotes[0][0], neighbors)
#### End of STEP 3.5
testSet = [[1798, 3, 9]]
test = pd.DataFrame(testSet)
#### Start of STEP 2
# Setting number of neighbors = 1
print('\n\nWith 1 Nearest Neighbour \n\n')
k = 1
#### End of STEP 2
# Running KNN model
result,neigh = knn(data, test, k)
# Predicted class
print('\nPredicted Class of the datapoint = ', result)
# Nearest neighbor
print('\nNearest Neighbour of the datapoints = ',neigh)
print('\n\nWith 3 Nearest Neighbours\n\n')
# Setting number of neighbors = 3
k = 3
# Running KNN model
result,neigh = knn(data, test, k)
# Predicted class
print('\nPredicted class of the datapoint = ',result)
# Nearest neighbor
print('\nNearest Neighbours of the datapoints = ',neigh)
print('\n\nWith 5 Nearest Neighbours\n\n')
# Setting number of neighbors = 3
k = 5
# Running KNN model
result,neigh = knn(data, test, k)
# Predicted class
print('\nPredicted class of the datapoint = ',result)
# Nearest neighbor
print('\nNearest Neighbours of the datapoints = ',neigh)
|
"""
HTTP utilities. Contains no business logic.
This mainly exists because we didn't want to depend on werkzeug.
"""
import re
from typing import Dict
from typing import Iterable
from typing import List
from typing import Tuple
# copied from werkzeug.http
_accept_re = re.compile(
r"""
( # media-range capturing-parenthesis
[^\s;,]+ # type/subtype
(?:[ \t]*;[ \t]* # ";"
(?: # parameter non-capturing-parenthesis
[^\s;,q][^\s;,]* # token that doesn't start with "q"
| # or
q[^\s;,=][^\s;,]* # token that is more than just "q"
)
)* # zero or more parameters
) # end of media-range
(?:[ \t]*;[ \t]*q= # weight is a "q" parameter
(\d*(?:\.\d+)?) # qvalue capturing-parentheses
[^,]* # "extension" accept params: who cares?
)? # accept params are optional
""",
re.VERBOSE,
)
def parse_accept_header(value: str) -> List[Tuple[str, float]]:
"""Like werkzeug.http.parse_accept_header(), but returns a plain list."""
# copied from werkzeug.http, with some modifications
if not value:
return []
result = []
for match in _accept_re.finditer(value):
quality_match = match.group(2)
if not quality_match:
quality: float = 1
else:
quality = max(min(float(quality_match), 1), 0)
result.append((match.group(1), quality))
result.sort(key=lambda t: t[1], reverse=True)
return result
def unparse_accept_header(values: Iterable[Tuple[str, float]]) -> str:
"""Like werkzeug.datastructures.MIMEAccept(values).to_header()."""
parts = []
for value, quality in sorted(values, key=lambda t: t[1], reverse=True):
if quality != 1:
value = f"{value};q={quality}"
parts.append(value)
return ','.join(parts)
def parse_options_header(value: str) -> Tuple[str, Dict[str, str]]:
"""Like werkzeug.http.parse_options_header(), but ignores the options."""
return value.partition(';')[0].strip(), {}
|
# -*- coding: utf-8 -*-
#
# This file is part of the ska-tmc-common project
#
#
#
# Distributed under the terms of the BSD-3-Clause license.
# See LICENSE.txt for more info.
""" Tango Group Client Code
"""
# Tango imports
import tango
from tango import DevFailed
import logging
class TangoGroupClient:
"""
Class for TangoGroupClient API
"""
def __init__(self, group_name, logger):
if logger == None:
self.logger = logging.getLogger(__name__)
else:
self.logger = logger
self.group_name = group_name
self.tango_group = self.get_tango_group(group_name)
def get_tango_group(self, group_name):
"""
Creates a Tango Group with given name
"""
self.tango_group = tango.Group(group_name)
return self.tango_group
def add_device(self, device_to_add):
"""
Add device element in the Group.
:param:
device_to_add: string. Device FQDN to add in the group
:returns: None
"""
try:
log_msg = f"Adding in group: {device_to_add}."
self.logger.debug(log_msg)
self.tango_group.add(device_to_add)
except DevFailed as dev_failed:
self.logger.exception("Failed to add device")
tango.Except.re_throw_exception(dev_failed,
"Failed to add device",
str(dev_failed),
"TangoGroupClient.add_device()")
def remove_device(self, device_to_remove):
"""
Removes specified elements in the device_to_remove from the Group.
:param:
device_to_remove: string. FQDN of the device to be removed from group.
:returns: None
:throws: DevFailed on failure in removing the device from the group.
"""
try:
log_msg = f"Removing from group: {device_to_remove}."
self.logger.debug(log_msg)
self.tango_group.remove(device_to_remove)
except DevFailed as dev_failed:
self.logger.exception("Failed to remove device")
tango.Except.re_throw_exception(dev_failed,
"Failed to remove device",
str(dev_failed),
"TangoGroupClient.remove_device()")
def delete_group(self, group_to_delete):
"""
Deletes the Tango Group.
"""
try:
log_msg = f"Deleting group: {group_to_delete}."
self.logger.debug(log_msg)
self.tango_group.delete(group_to_delete)
except DevFailed as dev_failed:
self.logger.exception("Failed to delete group")
tango.Except.re_throw_exception(dev_failed,
"Failed to remove device",
str(dev_failed),
"TangoGroupClient.delete_group()")
def get_group_device_list(self, forward=True):
"""
Returns the list of devices in the group
:params: None
:return: list. The list of devices
:throws:
DevFailed on failure in getting group device list.
"""
try:
return self.tango_group.get_device_list()
except DevFailed as dev_failed:
self.logger.exception("Failed to get group device list")
tango.Except.re_throw_exception(dev_failed,
"Failed to get group device list",
str(dev_failed),
"TangoGroupClient.get_group_device_list()")
def remove_all_device(self):
"""
Removes all the devices from the group.
"""
self.logger.debug("Removing all devices from the group.")
self.tango_group.remove_all()
def send_command(self, command_name, command_data = None):
"""
Invokes command on the Tango group synchronously.
:param:
command_name: string. Name of the command to be invoked
command_data: (optional) Void. The arguments with the command.
:returns: Sequence of tango.GroupCmdReply objects.
:throws:
DevFailed on failure in executing the command.
"""
try:
log_msg = f"Invoking {command_name} on {self.group_name} synchronously."
self.logger.debug(log_msg)
return self.tango_group.command_inout(command_name, command_data)
except DevFailed as dev_failed:
self.logger.exception("Failed to execute command .")
tango.Except.re_throw_exception(dev_failed,
"Failed to execute command.",
str(dev_failed),
"TangoGroupClient.send_command()")
def send_command_async(self, command_name, command_data = None, callback_method = None):
"""
Invokes command on the Tango group asynchronously.
:param:
command_name: string. Name of the command to be invoked
command_data: (optional) Void. The arguments with the command.
callback_method: The callback method that should be executed upon execution
:returns: int. Request id returned by tango group. Pass this id to `get_command_reply`
to retrieve the reply of the command.
:throws:
DevFailed on failure in executing the command.
"""
try:
log_msg = f"Invoking {command_name} on {self.group_name} asynchronously."
self.logger.debug(log_msg)
return self.tango_group.command_inout_asynch(command_name, command_data, callback_method)
except DevFailed as dev_failed:
self.logger.exception("Failed to execute command .")
tango.Except.re_throw_exception(dev_failed,
"Failed to execute command.",
str(dev_failed),
"TangoGroupClient.send_command_async()")
def get_command_reply(self, command_id, timeout = 0):
"""
Retrieves the response of the command
:params:
command_id: int. It is a request identifier previously returned by one of the
command_inout_asynch methods.
timeout: (optional) int. Timeout in milliseconds. If no timeout is mentioned,
the API waits indefinitely.
:returns:
The results of an asynchronous command as tango.GroupCmdReply object.
:throws:
DevFailed on failure in executing the command.
"""
try:
log_msg = f"Retrieving response for command id: {command_id}."
self.logger.debug(log_msg)
return self.tango_group.command_inout_reply(command_id, timeout)
except DevFailed as dev_failed:
self.logger.exception("Failed to execute command .")
tango.Except.re_throw_exception(dev_failed,
"Failed to retrieve response.",
str(dev_failed),
"TangoGroupClient.get_command_reply()")
|
#Glenn Gasmen's struggle with recursive modes
from Xml.Xslt import test_harness
sheet_1 = """<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:ft="http://xmlns.4suite.org/ext"
xmlns:ot='http://namespaces.opentechnology.org/talk'
xmlns:dc='http://purl.org/metadata/dublin_core'
extension-element-prefixes='ft'
version="1.0"
>
<xsl:output method="html" encoding="ISO-8859-1"/>
<xsl:param name='user-mode' select='"full-story"'/>
<xsl:template match="/">
<HTML><HEAD><TITLE>Test Skin</TITLE></HEAD><BODY>
<ft:apply-templates mode='{$user-mode}'/>
</BODY></HTML>
</xsl:template>
<xsl:template match="ot:story" mode="front-page">
<DIV ALIGN="CENTER">
<DIV ALIGN="CENTER">
<xsl:value-of select="dc:title"/>
</DIV>
<TABLE WIDTH="100%" BORDER="0">
<TR>
<TD ALIGN="LEFT" WIDTH="33%">
<xsl:value-of select="dc:creator"/>
</TD>
<TD ALIGN="CENTER" WIDTH="33%">
<xsl:value-of select="dc:datetime"/>
</TD>
<TD ALIGN="RIGHT" WIDTH="33%">
<xsl:value-of select="ot:link"/>
</TD>
</TR>
</TABLE>
<DIV ALIGN="JUSTIFY">
<xsl:apply-templates select="dc:description"/>
</DIV>
</DIV>
</xsl:template>
<xsl:template match="ot:story" mode="full-story">
<DIV ALIGN="CENTER">
<xsl:value-of select="dc:title"/>
</DIV>
<TABLE WIDTH="100%" BORDER="0">
<TR>
<TD ALIGN="LEFT" WIDTH="50%">
<xsl:value-of select="dc:creator"/>
</TD>
<TD ALIGN="RIGHT" WIDTH="50%">
<xsl:value-of select="dc:datetime"/>
</TD>
</TR>
</TABLE>
<DIV ALIGN="JUSTIFY">
<xsl:apply-templates select="dc:content"/>
<BR/><BR/>
</DIV>
<DIV ALIGN="JUSTIFY">
<xsl:apply-templates select="dc:description"/>
<BR/><BR/>
</DIV>
</xsl:template>
<xsl:template match="ot:comment">
<DIV ALIGN="JUSTIFY">
<xsl:for-each select="dc:title"/>
</DIV>
</xsl:template>
</xsl:stylesheet>
"""
source_1 = """<result-set>
<!--
Notes:
* All elements in the valid dublic core meta-data 1.0 element set are
in the dc namespace. All other metadata are in the ot namespace.
* for info on Dublin Core, see
http://purl.org/DC/documents/rec-dces-19990702.htm and
http://purl.org/dc/documents/rec/dcmes-qualifiers-20000711.htm
-->
<ot:story
id='urn:uuid:this-is-bogus-uuid-1'
xmlns:ot='http://namespaces.opentechnology.org/talk'
xmlns:dc='http://purl.org/metadata/dublin_core'
xmlns='http://docbook.org/docbook/xml/4.0/namespace'
>
<dc:creator ot:id='urn:uuid:this-is-bogus-uuid-2' ot:name='Xavier
Markus Langley'/>
<dc:creator>Plagerized</dc:creator>
<dc:subject>Test Data</dc:subject>
<dc:datetime>2003-02-30 13:59:23-07:00</dc:datetime>
<dc:title>Privacy, Part Two: Unwanted Gaze</dc:title>
<dc:content>
This is an example content:
A growing number of lawyers and scholars, including Rosen, say they now
believe that fundamental changes in Net architecture are necessary to
protect constitutional values and restore the notion of the "inviolate
personality" to the private lives of Americans. These would include
copyright management systems to protect the right to read anonymously,
permitting individuals to pay with untraceable digital cash; prohibiting
the collection and disclosure of identifying information without the
reader's knowledge, or using digital certificates to create psudonymous
downloading.
To Rosen, author of Gaze, cyberspace is posing a greater menace to
privacy by the day. He details the l998 forced resignation of Harvard
Divinity School dean Ronald F. Thiemann, who downloaded pornography onto
his university-owned home computer. A Harvard technician installing a
computer with more memory at the dean's residence was transferring files
from the old computer to the new one and noticed thousands of
pornographic pictures. Although none of the pictures appeared to involve
minors, the technician told his supervisor. University administrators
asked the dean to step down.
Harvard justified its decision by claiming that Divinity School rules
prohibited personal use of university computers in any way that clashed
with its educational mission. But the dean was using his computer at
home, not work. And no student or colleague suggested he had improperly
behaved in any way as head of the Divinity School. His work was never
questioned. It's ludicrous to suggest that the school would have fired
him if he'd been downloading sports scores or bidding for furniture on
eBay. But although he'd committed no crime and performed well in his
job, he was forced out in disgrace, while his intimate communications
were discussed in public.
Should free citizens in a democratic society have to spend money for
"nyms" to preserve the privacy they ought to be -- and once were --
accorded in law? How many millions of computer users will even know of
this new technology, or have the money to use it? Rosen's implication
is that even if software caused the problem, then software will clean
up.
His assurances seem a bit "gee-whiz." But to ignore them cynically on
that basis, or to trust them completely, ignores the history of
technology. What people can create, others can and will undo. Technology
that can be used will be used. In an otherwise powerful book, he also
glosses over powerful incentives for eliminating privacy in cyberspace.
First, the megacorporations dominating media, business and government
will continue to aggressively explore ways of tracking potential
customers as Net use grows. Secondly, law enforcement agencies like the
FBI have been fighting for decades for the right to deploy tracking
programs like "Carnivore" (see part one) and are hardly likely to back
off.
</dc:content>
<dc:description>
This is an example of a description:
Can pseudonymous downloading, "snoop-proof" e-mail, digital pseuds called
"nyms," PDA-like machines, allegedly untraceable digi-cash and other
changes in software and the architecture of cyberspace, restore some
privacy and restore the idea of the "Inviolate Personality?" Part Two
in a series based on Jeffrey Rosen's new book, "The Unwanted Gaze: The
Destruction of Privacy in America."
</dc:description>
</ot:story>
<ot:story
id='urn:uuid:this-is-bogus-uuid-1'
xmlns:ot='http://namespaces.opentechnology.org/talk'
xmlns:dc='http://purl.org/metadata/dublin_core'
>
<dc:creator ot:id='urn:uuid:this-is-bogus-uuid-2' ot:name='Xavier
Markus Langley'/>
<dc:subject>4Suite</dc:subject>
<dc:subject>Release</dc:subject>
<dc:subject>XSLT</dc:subject>
<dc:subject>DOM</dc:subject>
<dc:subject>XPath</dc:subject>
<dc:subject>RDF</dc:subject>
<dc:subject>Object Databases</dc:subject>
<dc:datetime>2003-11-19 13:59:23-07:00</dc:datetime>
<dc:title>4Suite 5.3.8 Released</dc:title>
<!--
Note that 4Suite is also marked as a keyword in the content. This is
fine. The final keywords are a union of all the dc:subject children of
ot:story and dc:content.
-->
<dc:content><ot:source>Fourthought, Inc.</ot:source> today announced
the latest version of their XML middleware suite,
<dc:subject>4Suite</dc:subject>. This latest version adds support for
the latest <dc:subject>XSLT</dc:subject> 2.3 Recommendation and
<dc:subject>DOM</dc:subject> Level 7.
</dc:content>
<dc:description>
Can pseudonymous downloading, "snoop-proof" e-mail, digital pseuds
called "nyms," PDA-like machines, allegedly untraceable digi-cash
and other changes in software and the architecture of cyberspace,
restore some privacy and restore the idea of the "Inviolate
Personality?" Part Two in a series based on Jeffrey Rosen's new
book, "The Unwanted Gaze: The Destruction of Privacy in America."
</dc:description>
</ot:story>
</result-set>
"""
expected_1 = """<HTML xmlns:ot="http://namespaces.opentechnology.org/talk" xmlns:dc="http://purl.org/metadata/dublin_core">\n <HEAD>\n <meta content="text/html; charset=ISO-8859-1" http-equiv="Content-Type">\n <TITLE>Test Skin</TITLE>\n </HEAD>\n <BODY>\n\n\n\n <DIV ALIGN="CENTER">Privacy, Part Two: Unwanted Gaze</DIV>\n <TABLE WIDTH="100%" BORDER="0">\n <TR>\n <TD WIDTH="50%" ALIGN="LEFT"></TD>\n <TD WIDTH="50%" ALIGN="RIGHT">2003-02-30 13:59:23-07:00</TD>\n </TR>\n </TABLE>\n <DIV ALIGN="JUSTIFY">\nThis is an example content:\nA growing number of lawyers and scholars, including Rosen, say they now\nbelieve that fundamental changes in Net architecture are necessary to\nprotect constitutional values and restore the notion of the "inviolate\npersonality" to the private lives of Americans. These would include\ncopyright management systems to protect the right to read anonymously,\npermitting individuals to pay with untraceable digital cash; prohibiting\nthe collection and disclosure of identifying information without the\nreader's knowledge, or using digital certificates to create psudonymous\ndownloading.\n\nTo Rosen, author of Gaze, cyberspace is posing a greater menace to\nprivacy by the day. He details the l998 forced resignation of Harvard\nDivinity School dean Ronald F. Thiemann, who downloaded pornography onto\nhis university-owned home computer. A Harvard technician installing a\ncomputer with more memory at the dean's residence was transferring files\nfrom the old computer to the new one and noticed thousands of\npornographic pictures. Although none of the pictures appeared to involve\nminors, the technician told his supervisor. University administrators\nasked the dean to step down.\n\nHarvard justified its decision by claiming that Divinity School rules\nprohibited personal use of university computers in any way that clashed\nwith its educational mission. But the dean was using his computer at\nhome, not work. And no student or colleague suggested he had improperly\nbehaved in any way as head of the Divinity School. His work was never\nquestioned. It's ludicrous to suggest that the school would have fired\nhim if he'd been downloading sports scores or bidding for furniture on\neBay. But although he'd committed no crime and performed well in his\njob, he was forced out in disgrace, while his intimate communications\nwere discussed in public.\n\n\nShould free citizens in a democratic society have to spend money for\n"nyms" to preserve the privacy they ought to be -- and once were --\naccorded in law? How many millions of computer users will even know of\nthis new technology, or have the money to use it? Rosen's implication\nis that even if software caused the problem, then software will clean\nup.\n\nHis assurances seem a bit "gee-whiz." But to ignore them cynically on\nthat basis, or to trust them completely, ignores the history of\ntechnology. What people can create, others can and will undo. Technology\nthat can be used will be used. In an otherwise powerful book, he also\nglosses over powerful incentives for eliminating privacy in cyberspace.\nFirst, the megacorporations dominating media, business and government\nwill continue to aggressively explore ways of tracking potential\ncustomers as Net use grows. Secondly, law enforcement agencies like the\nFBI have been fighting for decades for the right to deploy tracking\nprograms like "Carnivore" (see part one) and are hardly likely to back\noff.\n\n \n<BR>\n <BR>\n </DIV>\n <DIV ALIGN="JUSTIFY">\n This is an example of a description:\n Can pseudonymous downloading, "snoop-proof" e-mail, digital pseuds called\n "nyms," PDA-like machines, allegedly untraceable digi-cash and other\n changes in software and the architecture of cyberspace, restore some\n privacy and restore the idea of the "Inviolate Personality?" Part Two\n in a series based on Jeffrey Rosen's new book, "The Unwanted Gaze: The\n Destruction of Privacy in America."\n \n <BR>\n <BR>\n </DIV>\n\n\n <DIV ALIGN="CENTER">4Suite 5.3.8 Released</DIV>\n <TABLE WIDTH="100%" BORDER="0">\n <TR>\n <TD WIDTH="50%" ALIGN="LEFT"></TD>\n <TD WIDTH="50%" ALIGN="RIGHT">2003-11-19 13:59:23-07:00</TD>\n </TR>\n </TABLE>\n <DIV ALIGN="JUSTIFY">Fourthought, Inc. today announced\n the latest version of their XML middleware suite,\n 4Suite. This latest version adds support for\n the latest XSLT 2.3 Recommendation and\n DOM Level 7.\n \n <BR>\n <BR>\n </DIV>\n <DIV ALIGN="JUSTIFY">\n Can pseudonymous downloading, "snoop-proof" e-mail, digital pseuds\n called "nyms," PDA-like machines, allegedly untraceable digi-cash\n and other changes in software and the architecture of cyberspace,\n restore some privacy and restore the idea of the "Inviolate\n Personality?" Part Two in a series based on Jeffrey Rosen's new\n book, "The Unwanted Gaze: The Destruction of Privacy in America."\n \n <BR>\n <BR>\n </DIV>\n\n\n </BODY>\n</HTML>"""
def Test(tester):
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1)
return
|
from ylang.el import *
from contextlib import contextmanager
def y_eq(a, b):
return a is b
def y_is(a, b):
return a == b
def y_next(h):
if keywordp(car(h)):
return cddr(h)
else:
return cdr(h)
def y_key(x):
if keywordp(x):
return intern(symbol_name(x)[1:])
else:
return x
def y_for(h):
i = -1
while h:
if keywordp(car(h)):
k = y_key(car(h))
else:
i += 1
k = i
if keywordp(car(h)):
v = cadr(h)
else:
v = car(h)
yield k, v
if keywordp(car(h)):
h = cddr(h)
else:
h = cdr(h)
def y_get(h, k, *, cmp=y_eq):
if hash_table_p(h):
return gethash(k, h)
elif listp(h):
for key, val in y_for(h):
if cmp(key, k):
return val
else:
return elt(h, k)
def y_put(h, k, *args):
v = car(args)
wipe = null(args)
|
"""Main application and routing logic for TwitOff."""
from decouple import config
from flask import Flask, render_template, request
from .models import DB, User
from .predict import predict_user
from .twitter import add_or_update_user
def create_app():
"""Create and configure and instance of the Flask application"""
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB.init_app(app)
@app.route('/')
def root():
users = User.query.all()
return render_template('base.html', title='Home', users=users)
# to add users (POST resquest below)
# we don't need URL parameter '/<name>'
# because to add users we'll have a form
@app.route('/user', methods=['POST'])
@app.route('/user/<name>', methods=['GET']) # to get users - GET request
def user(name=None): # Name needs 'None' default value since it may not exist
message = ''
# import pdb; pdb.set_trace() # Python debugger swapped by 'name' line below
# if there is a name in GET request (populated name is true)
# than name equals name, otherwise name is a POST request
name = name or request.values['user_name']
# we want to be able to add new user
# but adding a user can fail, user may not exist
# or account may be private
try:
# we add user if request method is POST
if request.method == 'POST':
# here same method can add or update user
# so it's not destructive if accidentally called multiple times
add_or_update_user(name)
message = 'User {} successfully added!'.format(name)
# Whether or not we add user we still get that user's tweets so we set 'tweets'
# '.one()' below raises exception if user not found
# instead of '.first()' that would return None if not found
tweets = User.query.filter(User.name == name).one().tweets
except Exception as e:
message = 'Error adding {}: {}'.format(name, e) # name of user & error message
tweets = [] # here we'll pass on the tweets we get
# we return a 'user' template/html with user name, tweets, and message
return render_template('user.html', title=name, tweets=tweets,
message=message)
@app.route('/compare', methods=['POST'])
def compare():
user1, user2 = request.values['user1'], request.values['user2']
if user1 == user2:
return 'Cannot compare a user to themselves!'
else:
prediction = predict_user(user1, user2,
request.values['tweet_text'])
return user1 if prediction else user2
# here you should add a decorator @loginrequired to not leave this open!
# for only admin users with privilidge to reset database
@app.route('/reset')
def reset():
DB.drop_all()
DB.create_all()
return render_template('base.html', title='DB Reset!', users=[])
return app
|
# Generated by Django 3.2.5 on 2021-07-30 15:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mweb', '0012_certificate'),
]
operations = [
migrations.AlterField(
model_name='certificate',
name='cdate',
field=models.CharField(max_length=20),
),
]
|
# Created By: Hector Rodriguez
from selenium.webdriver.common.by import By
from selenium import webdriver
import os, time, sys
import bot_functions as bf
class InstagramBot:
def __init__(self, username = None, password = None):
"""Initialize parameters for Instagram login."""
self.username = username
self.password = password
self.driver = webdriver.Chrome(executable_path='/Users/hectorrodriguez/Desktop/instaBot/chromedriver')
def sign_up(self):
"""Bot for signing up for Instagram."""
self.driver.get('https://www.instagram.com/accounts/emailsignup/')
def log_in(self):
"""Bot to log onto instagram with valid credits."""
print("Logging in.........")
self.driver.get('https://www.instagram.com/accounts/login/')
time.sleep(1) # pause
self.driver.find_element_by_xpath("//input[@name='username']").send_keys(self.username)
self.driver.find_element_by_xpath("//input[@name='password']").send_keys(self.password)
time.sleep(1) # pause
self.driver.find_element_by_xpath("//button[contains(.,'Log In')]").click()
time.sleep(5) # pause
print("Successfully logged in.\n")
def save_info(self):
"""When save info appears onto screen."""
print("Save info screen.")
if self.driver.find_element_by_xpath("//button[contains(.,'Not Now')]"):
self.driver.find_element_by_xpath("//button[contains(.,'Not Now')]").click()
if __name__ == '__main__':
home = True
# Set loop for menu.
while home:
# Check main menu bot options.
val = bf.menu()
# Check if it's a valid option.
val, home = bf.check_option(val, home)
# If valid option, run option.
if not home:
bot = bf.menu_selection(val, bf)
home_log_in = True
# Set loop for log in menu.
while home_log_in:
# Check login in menu bot options.
val2 = bf.menu_log_in()
# Check if it's a valid option.
val2, home_log_in = bf.check_option_log_in(val2, home_log_in)
# If valid option, run option.
if not home_log_in and val2 != 5:
home_log_in = True
bf.menu_selection_log_in(val2, bf, bot.driver)
elif val2 == 5:
# Option log out.
bf.log_out(bf, bot.driver)
|
import csv
from tqdm import tqdm
import sys
from transformers import BertWordPieceTokenizer
tokenizer = BertWordPieceTokenizer(
"/scratch/gpfs/altosaar/dat/longform-data/BERT/bert-base-uncased.txt",
lowercase=True,
)
csv.field_size_limit(sys.maxsize)
ifile = open("fake-news.csv", "r")
reader = csv.reader(ifile)
ofile = open("mapped-fake-news.csv", "w")
writer = csv.writer(ofile, delimiter=",")
header_row = ["title", "text", "url", "publication", "model_publication", "link"]
writer.writerow(header_row)
i = 0
for row in tqdm(reader):
if len(row) != 17 or i == 0:
i += 1
continue
current_row = []
current_row.append(row[9])
raw_text = row[5]
id_text = tokenizer.encode(raw_text).ids
id_text.pop()
id_text.pop(0)
current_row.append(id_text)
current_row.append(8000000)
current_row.append("fake-news-corpus")
current_row.append(25)
current_row.append(row[4])
writer.writerow(current_row)
|
"""
Copyright Zapata Computing, Inc. All rights reserved.
"""
import os
import setuptools
readme_path = os.path.join("..", "README.md")
with open(readme_path, "r") as f:
long_description = f.read()
setuptools.setup(
name = "z-lstm",
version = "0.1.0",
author = "Zapata Computing, Inc.",
author_email = "info@zapatacomputing.com",
description = "Prediction with LSTM for Orquestra.",
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/zapatacomputing/z-lstm",
packages = setuptools.find_packages(where = "python"),
package_dir = {"" : "python"},
classifiers = (
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
),
install_requires = [
"tensorflow",
"pandas",
"numpy"
],
)
|
import tempfile
import contextlib
import os
import pytest
from gitorg import git
from gitorg.cli import run
@contextlib.contextmanager
def temp_cd():
with tempfile.TemporaryDirectory() as tmpdirname:
with cd(tmpdirname):
yield tmpdirname
@contextlib.contextmanager
def cd(dirname):
curdir = os.getcwd()
os.chdir(dirname)
try:
yield
finally:
os.chdir(curdir)
@pytest.fixture
def temp_dir():
with temp_cd():
yield
@pytest.fixture
def temp_repo():
with tempfile.TemporaryDirectory() as tmpdirname:
with cd(tmpdirname):
with open("a", "w") as fp:
fp.write("A")
with open("b", "w") as fp:
fp.write("B")
git.run("init")
git.run("add", "*")
git.run("commit", "-am", "commit text")
yield tmpdirname
@pytest.fixture
def init(temp_dir):
run(["init"])
|
import requests,re,time,math,random,os
from bs4 import BeautifulSoup
def Get_PostData(LoginUrl):
'''
获取登录参数
'''
Req_Class = requests.get(LoginUrl)#获取Get对象,准备那曲第一次Cookie
Req_Cookies = Req_Class.cookies["JSESSIONID"]#获取请求Cookie
Html_Text = Req_Class.text#获取源码
Soup_Class = BeautifulSoup(Html_Text,"lxml")#转为Soup对象
Tag_NameT = ('input[name="lt"]','input[name="execution"]','input[name="_eventId"]','input[name="rmShown"]')#标签元组
Data_ValueL = []#值列表
for Value in Tag_NameT:
Data_V = Soup_Class.select(Value)[0]#获取TAG对象
Regex_Class = re.compile(r'(?<=value=\").+(?=\"\/\>)')#编译正则表达式
Data_ValueL.append(Regex_Class.findall(str(Data_V))[0])#获取参数加入列表
return Data_ValueL,Req_Cookies
def Start_Login():
'''
开始登录
'''
ValueData,Req_Cookie = Get_PostData(LoginUrl)
Post_data = {
"username":#####,#用户名
"password":#####,#密码
"lt":ValueData[0],
"execution":ValueData[1],
"_eventId":ValueData[2],
"rmShown":int(ValueData[3])
}#构造发包数据
Post_Headers = {"Host":"ids3.jsou.cn","cookie":"JSESSIONID="+Req_Cookie}#构造headers
Post_Rep = requests.post(LoginUrl,data=Post_data,headers=Post_Headers,allow_redirects=False)#发包并且禁止重定向
Redirect_Url = Post_Rep.headers["Location"]#获取重定向url
Redirect_Req = requests.get(Redirect_Url,allow_redirects=False)#进行重定向url访问拿到登录成功的cookie
Login_Cookie = Redirect_Req.headers["Set-Cookie"].split(';')[0]#分割取首
return Login_Cookie
def Get_CourseId(CourseUrl,LoginHeaders):#获取课程总ID
Raw_Json_Data = requests.post(CourseUrl,headers=LoginHeaders).json()
Course_D = {}
Course_IDL = []
for Each_Course_Info in Raw_Json_Data['body']:
Course_Id = Each_Course_Info["courseVersionId"]
Course_Name = Each_Course_Info["courseName"]
Course_D[Course_Id] = Course_Name
Course_IDL.append(Course_Id)
return Course_D,Course_IDL
def Get_Course_ContentUrl(): #获取各个课程API URL
Course_ID,CourseID_NameD = Get_CourseId(CourseUrl,LoginHeaders)
Content_Urls = []
for ID in Course_ID:
Course_ContentUrl = "http://xuexi.jsou.cn/jxpt-web/student/course/getAllActivity/"+ID
Content_Urls.append(Course_ContentUrl)
return Content_Urls
def M_Heart():#获取各个课程下属全部视频信息并且构造数据包写入txt
Post_Data0 = {
"playStatus":"true",
"isResourcePage":"true",
"courseVersionId":"0",
"activityId":"0",
"type":2,
"isStuLearningRecord":2,
"token":SetToken(),#生成token
"timePoint":random.random()*100
}
Content_Urls = Get_Course_ContentUrl()
for Url in Content_Urls:#进行第一层循环
Course_DeatilContent = requests.get(Url,headers=LoginHeaders).json()['body']
Course_Json_Len = len(Course_DeatilContent)
for Index in range(0,Course_Json_Len):#进行第二层循环
Avtive_List = Course_DeatilContent[Index]["activitys"]
for Active_Info in Avtive_List:#进行第三层循环
Type_Ac = Active_Info["type"]
if(Type_Ac == "2"):#过滤非视频链接
ACtiveID_CourseID = {}
Deal_Url = Url.replace("http://xuexi.jsou.cn/jxpt-web/student/course/getAllActivity/","")#处理课程URL,提取CRL
Post_Data0["courseVersionId"] = Deal_Url
Post_Data0["activityId"] = Active_Info["activityId"]
with open("PostData.txt","a+") as f:
f.write(str(Post_Data0)+"\n")#写入数据包
print("写入完成,请重新运行脚本")
def SetToken():#根据网站逆向得出Token算法
len_ = 8.or(32)
Salt = "ABCDEFGHJKMNPQRSTWXYZabcdefhijkmnprstwxyz2345678"
MaxPos = len(Salt)
Pwd = ""
i = 0
while(i<len_):
i+=1
Pwd+=Salt[math.floor(random.random()*MaxPos)]
return Pwd
def Start_Run():
print("等待刷课中....")
Count = 0
with open("PostData.txt","r+") as f:
P_Data = f.readlines()
for P_E in P_Data:
Count+=1
R_I = random.randint(2,5)
print("本次随机间隔为{}秒...等待中...\n".format(R_I))
time.sleep(R_I)
Repo = requests.post(HeartBeatUrl,headers=LoginHeaders,data=eval(P_E)).json()
if(Repo["code"]=="SUCCESS"):
print("刷课运行正常,正在进行第{}个视频\n".format(Count))
else:
print("respone返回错误,刷课终止.")
exit(0)
Count=0
if __name__ == "__main__":
Num = 0
LoginUrl = "https://ids3.jsou.cn/login?service=http://xuexi.jsou.cn/jxpt-web/auth/idsLogin"
CourseUrl = "http://xuexi.jsou.cn/jxpt-web/student/courseuser/getAllCurrentCourseByStudent"
HeartBeatUrl = "http://xuexi.jsou.cn/jxpt-web/common/learningBehavior/heartbeat"
LoginHeaders = {"Host":"xuexi.jsou.cn","Cookie":Start_Login(),"Content-Type":"application/x-www-form-urlencoded; charset=UTF-8"}
if(os.path.exists("PostData.txt")==False):
M_Heart()
else:
while True:
Start_Run()
Num+=1
print("\n第{}轮结束,第{}轮即将开始...\n".format(Num,Num+1))
|
import log2
print "f1 0 4096 10 1"
log2.accelerando(0.5, 3.7, 0.4, 0.01)
print "f1 5.2 4096 10 0 1.3"
log2.accelerando(5.2, 11.7, 1.2, 0.5)
|
from collections import Counter
from contextlib import contextmanager
from datetime import date
from time import time
OPERATION_THRESHOLD_IN_SECONDS = 2.2
ALERT_THRESHOLD = 3
ALERT_MSG = 'ALERT: suffering performance hit today'
violations = Counter()
def get_today():
"""Making it easier to test/mock"""
return date.today()
@contextmanager
def timeit():
global violations
start = time()
yield
end = time()
total_time = (end - start)
if total_time >= OPERATION_THRESHOLD_IN_SECONDS:
today = [get_today()]
violations.update(today)
if violations:
if violations.most_common(1)[0][1] >= ALERT_THRESHOLD:
print(ALERT_MSG)
pass
#with timeit():
# print(abcd)
# sleep(2) |
class Tuple:
pass
|
import struct
from block.Block import Block
from block.UserDirBlock import UserDirBlock
from block.DirCacheBlock import *
from ADFSFile import ADFSFile
from ADFSNode import ADFSNode
from FileName import FileName
from FSError import *
from FSString import FSString
from MetaInfo import *
class ADFSDir(ADFSNode):
def __init__(self, volume, parent):
ADFSNode.__init__(self, volume, parent)
# state
self.entries = None
self.dcache_blks = None
self.name_hash = None
self.valid = False
def __repr__(self):
if self.block != None:
return "[Dir(%d)'%s':%s]" % (self.block.blk_num, self.block.name, self.entries)
else:
return "[Dir]"
def blocks_create_old(self, anon_blk):
ud = UserDirBlock(self.blkdev, anon_blk.blk_num)
ud.set(anon_blk.data)
if not ud.valid:
raise FSError(INVALID_USER_DIR_BLOCK, block=anon_blk)
self.set_block(ud)
return ud
def _read_add_node(self, blk, recursive):
hash_chain = None
node = None
if blk.valid_chksum and blk.type == Block.T_SHORT:
# its a userdir
if blk.sub_type == Block.ST_USERDIR:
node = ADFSDir(self.volume, self)
blk = node.blocks_create_old(blk)
if recursive:
node.read()
# its a file
elif blk.sub_type == Block.ST_FILE:
node = ADFSFile(self.volume, self)
blk = node.blocks_create_old(blk)
# unsupported
else:
raise FSError(UNSUPPORTED_DIR_BLOCK, block=blk, extra="Sub_Type: %08x" % blk.sub_type)
hash_chain = blk.hash_chain
return hash_chain,node
def _init_name_hash(self):
self.name_hash = []
for i in xrange(self.block.hash_size):
self.name_hash.append([])
def read(self, recursive=False):
self._init_name_hash()
self.entries = []
# create initial list with blk_num/hash_index for dir scan
blocks = []
for i in xrange(self.block.hash_size):
blk_num = self.block.hash_table[i]
if blk_num != 0:
blocks.append((blk_num,i))
for blk_num,hash_idx in blocks:
# read anonymous block
blk = Block(self.blkdev, blk_num)
blk.read()
if not blk.valid:
self.valid = False
return
# create file/dir node
hash_chain,node = self._read_add_node(blk, recursive)
# store node in entries
self.entries.append(node)
# store node in name_hash
self.name_hash[hash_idx].append(node)
# follow hash chain
if hash_chain != 0:
blocks.append((hash_chain,hash_idx))
# dircaches available?
if self.volume.is_dircache:
self.dcache_blks = []
dcb_num = self.block.extension
while dcb_num != 0:
dcb = DirCacheBlock(self.blkdev, dcb_num)
dcb.read()
if not dcb.valid:
self.valid = False
return
self.dcache_blks.append(dcb)
dcb_num = dcb.next_cache
def flush(self):
if self.entries:
for e in self.entries:
e.flush()
self.entries = None
self.name_hash = None
def ensure_entries(self):
if not self.entries:
self.read()
def get_entries(self):
self.ensure_entries()
return self.entries
def has_name(self, fn):
fn_hash = fn.hash()
fn_up = fn.get_upper_ami_str()
node_list = self.name_hash[fn_hash]
for node in node_list:
if node.name.get_upper_ami_str() == fn_up:
return True
return False
def blocks_create_new(self, free_blks, name, hash_chain_blk, parent_blk, meta_info):
blk_num = free_blks[0]
blkdev = self.blkdev
# create a UserDirBlock
ud = UserDirBlock(blkdev, blk_num)
ud.create(parent_blk, name.get_ami_str(), meta_info.get_protect(), meta_info.get_comment_ami_str(), meta_info.get_mod_ts(), hash_chain_blk)
ud.write()
self.set_block(ud)
self._init_name_hash()
return blk_num
def blocks_get_create_num(self):
# the number of blocks needed for a new (empty) directory
# -> only one UserDirBlock
return 1
def _create_node(self, node, name, meta_info, update_ts=True):
self.ensure_entries()
# make sure a default meta_info is available
if meta_info == None:
meta_info = MetaInfo()
meta_info.set_current_as_mod_time()
meta_info.set_default_protect()
# check file name
fn = FileName(name, is_intl=self.volume.is_intl)
if not fn.is_valid():
raise FSError(INVALID_FILE_NAME, file_name=name, node=self)
# does already exist an entry in this dir with this name?
if self.has_name(fn):
raise FSError(NAME_ALREADY_EXISTS, file_name=name, node=self)
# calc hash index of name
fn_hash = fn.hash()
hash_chain = self.name_hash[fn_hash]
if len(hash_chain) == 0:
hash_chain_blk = 0
else:
hash_chain_blk = hash_chain[0].block.blk_num
# return the number of blocks required to create this node
num_blks = node.blocks_get_create_num()
# try to find free blocks
free_blks = self.volume.bitmap.alloc_n(num_blks)
if free_blks == None:
raise FSError(NO_FREE_BLOCKS, node=self, file_name=name, extra="want %d" % num_blks)
# now create the blocks for this node
new_blk = node.blocks_create_new(free_blks, name, hash_chain_blk, self.block.blk_num, meta_info)
# dircache: create record for this node
if self.volume.is_dircache:
ok = self._dircache_add_entry(name, meta_info, new_blk, node.get_size(), update_myself=False)
if not ok:
self.delete()
raise FSError(NO_FREE_BLOCKS, node=self, file_name=name, extra="want dcache")
# update my dir
self.block.hash_table[fn_hash] = new_blk
self.block.write()
# add node
self.name_hash[fn_hash].insert(0,node)
self.entries.append(node)
# update time stamps
if update_ts:
self.update_dir_mod_time()
self.volume.update_disk_time()
def update_dir_mod_time(self):
mi = MetaInfo()
mi.set_current_as_mod_time()
self.change_meta_info(mi)
def create_dir(self, name, meta_info=None, update_ts=True):
if not isinstance(name, FSString):
raise ValueError("create_dir's name must be a FSString")
node = ADFSDir(self.volume, self)
self._create_node(node, name, meta_info, update_ts)
return node
def create_file(self, name, data, meta_info=None, update_ts=True):
if not isinstance(name, FSString):
raise ValueError("create_file's name must be a FSString")
node = ADFSFile(self.volume, self)
node.set_file_data(data)
self._create_node(node, name, meta_info, update_ts)
return node
def _delete(self, node, wipe, update_ts):
self.ensure_entries()
# can we delete?
if not node.can_delete():
raise FSError(DELETE_NOT_ALLOWED, node=node)
# make sure its a node of mine
if node.parent != self:
raise FSError(INTERNAL_ERROR, node=node, extra="node parent is not me")
if node not in self.entries:
raise FSError(INTERNAL_ERROR, node=node, extra="node not in entries")
# get hash key
hash_key = node.name.hash()
names = self.name_hash[hash_key]
# find my node
pos = None
for i in xrange(len(names)):
if names[i] == node:
pos = i
break
# hmm not found?!
if pos == None:
raise FSError(INTERNAL_ERROR, node=node, extra="node not found in hash chain")
# find prev and next in hash list
if pos > 0:
prev = names[pos-1]
else:
prev = None
if pos == len(names)-1:
next_blk = 0
else:
next_blk = names[pos+1].block.blk_num
# remove node from the hash chain
if prev == None:
self.block.hash_table[hash_key] = next_blk
self.block.write()
else:
prev.block.hash_chain = next_blk
prev.block.write()
# remove from my lists
self.entries.remove(node)
names.remove(node)
# remove blocks of node in bitmap
blk_nums = node.get_block_nums()
self.volume.bitmap.dealloc_n(blk_nums)
# dircache?
if self.volume.is_dircache:
free_blk_num = self._dircache_remove_entry(node.name.name)
else:
free_blk_num = None
# (optional) wipe blocks
if wipe:
clr_blk = '\0' * self.blkdev.block_bytes
for blk_num in blk_nums:
self.blkdev.write_block(blk_num, clr_blk)
# wipe a potentially free'ed dircache block, too
if free_blk_num != None:
self.blkdev.write_block(free_blk_num, clr_blk)
# update time stamps
if update_ts:
self.update_dir_mod_time()
self.volume.update_disk_time()
def can_delete(self):
self.ensure_entries()
return len(self.entries) == 0
def delete_children(self, wipe, all, update_ts):
self.ensure_entries()
entries = self.entries[:]
for e in entries:
e.delete(wipe, all, update_ts)
def get_entries_sorted_by_name(self):
self.ensure_entries()
return sorted(self.entries, key=lambda x : x.name.get_upper_ami_str())
def list(self, indent=0, all=False, detail=False, encoding="UTF-8"):
ADFSNode.list(self, indent, all, detail, encoding)
if not all and indent > 0:
return
self.ensure_entries()
es = self.get_entries_sorted_by_name()
for e in es:
e.list(indent=indent+1, all=all, detail=detail, encoding=encoding)
def get_path(self, pc, allow_file=True, allow_dir=True):
if len(pc) == 0:
return self
self.ensure_entries()
for e in self.entries:
if not isinstance(pc[0], FileName):
raise ValueError("get_path's pc must be a FileName array")
if e.name.get_upper_ami_str() == pc[0].get_upper_ami_str():
if len(pc) > 1:
if isinstance(e, ADFSDir):
return e.get_path(pc[1:], allow_file, allow_dir)
else:
return None
else:
if isinstance(e, ADFSDir):
if allow_dir:
return e
else:
return None
elif isinstance(e, ADFSFile):
if allow_file:
return e
else:
return None
else:
return None
return None
def draw_on_bitmap(self, bm, show_all=False, first=True):
blk_num = self.block.blk_num
bm[blk_num] = 'D'
if show_all or first:
self.ensure_entries()
for e in self.entries:
e.draw_on_bitmap(bm, show_all, False)
if self.dcache_blks != None:
for dcb in self.dcache_blks:
bm[dcb.blk_num] = 'C'
def get_block_nums(self):
self.ensure_entries()
result = [self.block.blk_num]
if self.volume.is_dircache:
for dcb in self.dcache_blks:
result.append(dcb.blk_num)
return result
def get_blocks(self, with_data=False):
self.ensure_entries()
result = [self.block]
if self.volume.is_dircache:
result += self.dcache_blks
return result
def get_size(self):
return 0
def get_size_str(self):
return "DIR"
def get_detail_str(self):
self.ensure_entries()
if self.entries != None:
s = "entries=%d" % len(self.entries)
else:
s = ""
if self.dcache_blks != None:
s += " dcache=%d" % len(self.dcache_blks)
return s
# ----- dir cache -----
def _dircache_add_entry(self, name, meta_info, entry_blk, size, update_myself=True):
# create a new dircache record
r = DirCacheRecord(entry=entry_blk, size=size, protect=meta_info.get_protect(), \
mod_ts=meta_info.get_mod_ts(), sub_type=0, name=name, comment=meta_info.get_comment())
return self._dircache_add_entry_int(r, update_myself)
def _dircache_add_entry_int(self, r, update_myself=True):
r_bytes = r.get_size()
# find a dircache block with enough space
found_blk = None
for dcb in self.dcache_blks:
free_bytes = dcb.get_free_record_size()
if r_bytes < free_bytes:
found_blk = dcb
break
# need to create a new one?
if found_blk == None:
found_blk = self._dircache_add_block(update_myself)
if found_blk == None:
return False
# add record to block and update it
found_blk.add_record(r)
found_blk.write()
return True
def _dircache_add_block(self, update_myself):
# allocate block
blk_nums = self.volume.bitmap.alloc_n(1)
if blk_nums == None:
return None
# setup dir cache block
dcb_num = blk_nums[0]
dcb = DirCacheBlock(self.blkdev, dcb_num)
dcb.create(parent=self.block.blk_num)
# link new cache block
if len(self.dcache_blks) == 0:
self.block.extension = dcb_num
if update_myself:
self.block.write()
else:
last_dcb = self.dcache_blks[-1]
last_dcb.next_cache = dcb_num
last_dcb.write()
self.dcache_blks.append(dcb)
return dcb
def _dircache_remove_entry(self, name, update_myself=True):
# first find entry
pos = None
dcb = None
record = None
n = len(self.dcache_blks)
for i in xrange(n):
dcb = self.dcache_blks[i]
record = dcb.get_record_by_name(name)
if record != None:
pos = i
break
if record == None:
raise FSError(INTERNAL_ERROR, node=self)
# remove entry from this block
dcb.remove_record(record)
# remove whole block?
if dcb.is_empty():
# next block following me
if pos == n-1:
next = 0
else:
next = self.dcache_blks[pos+1].blk_num
# update block links
if pos == 0:
# adjust extension link in this dir node
self.block.extension = next
if update_myself:
self.block.write()
else:
# adjust dircache block in front of me
prev_blk = self.dcache_blks[pos-1]
prev_blk.next_cache = next
prev_blk.write()
# free cache block in bitmap
blk_num = dcb.blk_num
self.volume.bitmap.dealloc_n([blk_num])
return blk_num # return number of just deleted block
else:
# update cache block with reduced set of records
dcb.write()
return None
def get_dircache_record(self, name):
if self.dcache_blks != None:
for dcb in self.dcache_blks:
record = dcb.get_record_by_name(name)
if record != None:
return record
return None
def update_dircache_record(self, record, rebuild):
if self.dcache_blks == None:
return
# update record
if rebuild:
self._dircache_remove_entry(record.name, update_myself=False)
self._dircache_add_entry_int(record, update_myself=True)
else:
# simply re-write the dircache block
for dcb in self.dcache_blks:
if dcb.has_record(record):
dcb.write()
break
def get_block_usage(self, all=False, first=True):
num_non_data = 1
num_data = 0
if self.dcache_blks != None:
num_non_data += len(self.dcache_blks)
if all or first:
self.ensure_entries()
for e in self.entries:
bu = e.get_block_usage(all=all, first=False)
num_data += bu[0]
num_non_data += bu[1]
return (num_data, num_non_data)
def get_file_bytes(self, all=False, first=True):
size = 0
if all or first:
self.ensure_entries()
for e in self.entries:
size += e.get_file_bytes(all=all, first=False)
return size
def is_dir(self):
return True
|
'''
Crie um programa que leia o ano de nascimento de sete pessoas.
No final, mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.
'''
from datetime import datetime
now = datetime.now()
now = now.year
contMen = 0
contMai = 0
for c in range (0,7):
nasc = int(input('Digite o Ano de Nascimento: '))
ano = (now - nasc)
if ano < 18:
contMen = contMen +1
else:
contMai = contMai +1
print('Temos {} Menor de Idade'.format(contMen))
print('Temos {} Maior de Idade'.format(contMai)) |
import os
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from astropy.io import fits
from astropy.io import fits
from wotan import flatten
from process_lightcurve_with_two_cadence import process_lightcurves_into_input_representation
from preprocess import lightcurve_detrending
def process_toi_lightcurve_into_numpy_array(hdulist, info, replace_outliers=True):
"""The function is to get the time series and the flux series from the toi light curve file."""
## get planetary and stellar parameters
quality = hdulist[1].data['QUALITY']
## extract data from tces.csv and light curve
tess_mag = hdulist[0].header['TESSMAG']
star_teff = hdulist[0].header['TEFF']
star_logg = hdulist[0].header['LOGG']
star_rad = info['Star Radius Value']
star_mass = None # 0.
epoch = float(info['Epoch Value'])
period = float(info['Orbital Period Value'])
duration = float(info['Transit Duration Value']) / 24. # unit days
depth = float(info['Transit Depth Value'])
if star_rad == '':
star_rad = None
'''if star_mass == '':
star_mass = None'''
## get time and flux
time = hdulist[1].data['time']
flux = hdulist[1].data['PDCSAP_FLUX']
## adjust epoch to first transit
epoch = np.mod(epoch - time[0], period) + time[0]
## process outliers
if replace_outliers:
quality_flag = np.where(np.array(quality) != 0)
valid_ind = np.where(np.array(quality) == 0)
median_flux = np.median(flux[valid_ind])
flux[quality_flag] = median_flux
## detrend and remove outliers
time, flux = lightcurve_detrending(time, flux, period, epoch, duration)
## adjust the time series to a fixed length
if len(time) < 20076 or len(flux) < 20076:
flux = np.pad(flux, (0, 20076-len(flux)), 'constant', constant_values=(0, np.nan))
time_pad = [time[-1]+i*2./(60.*24) for i in range(1, 20076-len(time)+1)]
time = np.append(time, time_pad)
else:
flux = flux[:20076]
time = time[:20076]
# print(len(time), len(flux))
return time, flux, [star_rad, star_mass, tess_mag, star_teff, star_logg, epoch, period, duration, depth]
if __name__ == '__main__':
csv_filename = '../target_info/tois.csv'
lightcuvre_dir = '../data/lc_data/tois_lc_data_test/'
Output_Path = '../model_input/'
## read tois.csv
data = pd.read_csv(csv_filename, header=0, skiprows=4)
## Start reading and processing light curve
toi_num = 0
## folded lightcurves and other parameters
tic = []
lcs = []
transits = []
pds = []
stellar_params = []
dispositions = []
_tmag, _period, _duration, _depth, _snr = [], [], [], [], []
for index, info in data.iterrows():
sector, ticid, disposition = int(info['Sectors'][0:2]), int(info['TIC']), info['TOI Disposition']
if pd.isna(info['Orbital Period Value']) or pd.isna(info['Epoch Value']) or pd.isna(info['Transit Duration Value']):
continue
try:
hdulist = fits.open(lightcuvre_dir + '{:016d}.fits'.format(ticid))
## extract data from original lightcurve file
time, flux, params = process_toi_lightcurve_into_numpy_array(hdulist, info)
padded_flux, obs_transits, period = process_lightcurves_into_input_representation(time, flux, params[6], params[5])
## save data to npy
tic.append(ticid)
lcs.append(padded_flux)
transits.append(obs_transits)
pds.append(period)
stellar_params.append([params[0], params[1], params[2], params[3], params[4]])
dispositions.append(disposition)
_tmag.append(info['TMag Value'])
_period.append(info['Orbital Period Value'])
_duration.append(info['Transit Duration Value'])
_depth.append(info['Transit Depth Value'])
_snr.append(info['Signal-to-noise'])
toi_num += 1
except:
continue
tic = np.array(tic)
lcs = np.array(lcs)
transits = np.array(transits)
pds = np.array(pds)
stellar_params = np.array(stellar_params, dtype=float)
dispositions = np.array(dispositions)
_tmag = np.array(_tmag)
_period = np.array(_period)
_duration = np.array(_duration)
_depth = np.array(_depth)
_snr = np.array(_snr)
print(f'Total Tois: {toi_num}')
np.savez(Output_Path+'tois.npz', tic=tic, lc=lcs, transits=transits, period=pds, dispositions=dispositions)
np.savez(Output_Path+'valid_spoc_toi_info.npz', tic=tic, tmag=_tmag, period=_period, duration=_duration, depth=_depth, snr=_snr)
|
import pygame
import random
pygame.init()
#dimensions
screen_x = 900
screen_y = 500
#window config
game_window = pygame.display.set_mode((screen_x, screen_y))
pygame.display.set_caption("Gold Cave")
game_running = True
#tick
clock = pygame.time.Clock()
#colors
red = (252, 19, 3)
purple = (96, 50, 168)
white = (255, 255, 255)
green = (0, 144, 42)
yellow = (255,215,0)
blue = (52, 97, 235)
black = (0, 0, 0)
pink = (245, 66, 206)
l_blue = (66, 245, 236)
brown = (128,0,0)
magenta = (171, 7, 163)
orange = (255, 2,0)
#player_stuff
p_speed = 1
p_x = screen_x/2
p_y = screen_y/2
p_block = 20
p_col = green
score = 0
#gold
gold_num = 1
gold = []
g_block = 15
g_col = yellow
g_x = random.randint(100, 800)
g_y = random.randint(50,450)
#rocks
rock_num = 4
rock_l = []
rock_t = []
r_block = 15
r_col = brown
rx_l = 0
ry_l = random.randint(50,490)
rx_t = random.randint(50, 890)
ry_t = 0
r_speed = 4
#lives
num_lives = 3
#rectabgle surfaces
player = pygame.Rect(p_x, p_y, p_block,p_block)
for i in range (gold_num):
gold.append(pygame.Rect(g_x,g_y,g_block,g_block))
for i in range (rock_num):
rock_l.append(pygame.Rect(rx_l,ry_l,r_block, r_block ))
for i in range(rock_num):
rock_t.append(pygame.Rect(rx_t, ry_t,r_block, r_block ))
#functions
#text function
def txt(txt,m, x, y, c, s):
font = pygame.font.Font('freesansbold.ttf', s)
dText = font.render(str(txt) + str(m), True, (c))
game_window.blit(dText, (x, y))
#movement function
def move(l):
if str(l) == "a" and player.x > 0:
for i in range(10):
player.x -= p_speed
if str(l) == "w" and player.y > 0:
for i in range(10):
player.y -= p_speed
if str(l) == "s" and player.y < screen_y - p_block:
for i in range(10):
player.y += p_speed
if str(l) == "d" and player.x < screen_x - p_block:
for i in range(10):
player.x += p_speed
#booleans
gameOver = False
winner = False
pregame = True
#background images
bground = pygame.image.load("minent.png")
bground2 = pygame.image.load("cave.png")
while game_running:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_running = False
#gold collection
for i in range(gold_num):
if player.colliderect(gold[i]):
gold[i].x = random.randint(100,800) + r_speed
gold[i].y = random.randint(50,450)
if gold[i].x == 700:
gold[i].x - r_speed
score += 1
#adding the pregame picture
game_window.blit(bground,(0,0))
#pregame screen
keys = pygame.key.get_pressed()
if pregame == True:
txt("GOLD CAVE","" ,screen_x/2.859, screen_y/15, yellow, 50)
txt("PRESS SPACE TO START", '', screen_x/2.8, screen_y/ 1.2, magenta, 24)
txt("CONTROLS: HOLD", '', screen_x/15, screen_y/4.5, brown, 30 )
txt("W = UP, S = DOWN", '', screen_x/15, screen_y/3, blue, 15)
txt("A = LEFT, D = RIGHT", '', screen_x/15, screen_y/2.6, blue, 15 )
txt("Avoid the rocks! ", '', screen_x/15, screen_y/2, red, 25)
txt("Collect 25 gold to win!", '', screen_x/15, screen_y/1.8, red, 25)
if keys[pygame.K_SPACE]:
pregame = False
winner = False
score = 0
#movement#score keeping #drawing player
#generating gold and rocks
if pregame == False and gameOver == False:
game_window.blit(bground2,(0,0))
if keys[pygame.K_w]:
move("w")
elif keys[pygame.K_s]:
move("s")
elif keys[pygame.K_a]:
move("a")
elif keys[pygame.K_d]:
move("d")
txt("LIVES: ",num_lives, screen_x/ 1.5, screen_y/15, yellow, 32)
txt("SCORE: ",score, screen_x/3.8, screen_y/15, yellow, 32 )
pygame.draw.rect(game_window, p_col, player)
for i in range(gold_num):
pygame.draw.rect(game_window, g_col, gold[i])
for i in range(rock_num):
pygame.draw.rect(game_window,r_col, rock_l[i])
pygame.draw.rect( game_window, r_col, rock_t[i])
#losing the game
if gameOver == True:
game_window.fill(white)
txt("GAMEOVER!",'',screen_x/2.859, screen_y/15, yellow, 50)
txt("FINAL SCORE: ",score,screen_x/7, screen_y/5, l_blue, 25)
txt("PRESS SPACE TO PLAY AGAIN",'', screen_x/2, screen_y/5, blue, 25)
txt("PRESS T TO QUIT",'', screen_x/2.8, screen_y/2, red, 25)
if keys[pygame.K_SPACE]:
gameOver = False
score = 0
num_lives = 3
if keys[pygame.K_t]:
game_running = False
#winning the thing
if score == 20:
game_window.fill(white)
txt("YOU WON!",'',screen_x/2.859, screen_y/15, yellow, 50)
txt("PRESS SPACE TO PLAY AGAIN",'', screen_x/3, screen_y/5, blue, 25)
txt("PRESS T TO QUIT",'', screen_x/2.8, screen_y/2, red, 25)
if keys[pygame.K_t]:
game_running = False
if keys[pygame.K_SPACE]:
gameOver = False
score = 0
num_lives = 3
#starting the game
if pregame == False:
for i in range(rock_num):
rock_l[i].x += r_speed
rock_t[i].y += r_speed
if rock_l[i].x == 800:
rock_l[i].y = random.randint(50,400) + r_speed
rock_l[i].x = 12
if rock_t[i].y == 400:
rock_t[i].x = random.randint(50,890) + r_speed
rock_t[i].y = 12
for i in range(rock_num):
if player.colliderect(rock_l[i]) or player.colliderect(rock_t[i]):
num_lives -= 1
rock_l[i].y = random.randint(50,400)
rock_l[i].x = 12
rock_t[i].x = random.randint(50, 890)
rock_t[i].y = 12
if num_lives == 0:
gameOver = True
rock_l[i].y = random.randint(50,400)
rock_l[i].x = 12
rock_t[i].x = random.randint(50, 890)
rock_t[i].y = 12
player.x = screen_x/2
player.y = screen_y/2
pygame.display.update()
pygame.quit()
|
"""
Copyright (c) 2020 Kudelski Security, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Glitch():
"""
Base glitch class
"""
@classmethod
def apply(self, pipe):
return ""
class Skip(Glitch):
"""
Simple glitch. Skips the instruction
"""
def apply(pipe):
cur_instr = pipe.cmdj("pdj 1@PC")[0]
new_addr = cur_instr['offset']+cur_instr['size']
pipe.cmd('aer PC = '+hex(new_addr))
return f"Skip {cur_instr['disasm']} @ {hex(cur_instr['offset'])}"
class ZeroSReg(Glitch):
"""
Zero source register
TODO: Add support for multiple register glitches
"""
def apply(pipe):
cur_instr = pipe.cmdj("pdj 1@PC")[0]
changes = pipe.cmdj('aeaj 1@PC')
try:
reg = changes['I'][0]
pipe.cmd(f"aer {reg} = 0")
return f"Zero {reg} in {cur_instr['disasm']} @ {hex(cur_instr['offset'])}"
except:
return None
class ZeroDReg(Glitch):
"""
Zero destination register
TODO: Add support for multiple register glitches
"""
def apply(pipe):
cur_instr = pipe.cmdj("pdj 1@PC")[0]
changes = pipe.cmdj('aeaj 1@PC')
try:
reg = changes['W'][0]
pipe.cmd(f"aer {reg} = 0")
return f"Zero {reg} in {cur_instr['disasm']} @ {hex(cur_instr['offset'])}"
except:
return None
|
#!/bin/env python
# Automatically translated python version of
# OpenSceneGraph example program "osgtexturecompression"
# !!! This program will need manual tuning before it will work. !!!
import sys
from osgpypp import osg
from osgpypp import osgDB
from osgpypp import osgGA
from osgpypp import osgText
from osgpypp import osgViewer
# Translated from file 'osgtexturecompression.cpp'
# OpenSceneGraph example, osgtexture3D.
#*
#* Permission is hereby granted, free of charge, to any person obtaining a copy
#* of this software and associated documentation files (the "Software"), to deal
#* in the Software without restriction, including without limitation the rights
#* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#* copies of the Software, and to permit persons to whom the Software is
#* furnished to do so, subject to the following conditions:
#*
#* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#* THE SOFTWARE.
#
#include <osg/Node>
#include <osg/Geometry>
#include <osg/Notify>
#include <osg/Texture2D>
#include <osg/TexGen>
#include <osg/Geode>
#include <osgDB/ReadFile>
#include <osgText/Text>
#include <osgGA/TrackballManipulator>
#include <osgViewer/CompositeViewer>
#include <iostream>
def createHUD(label):
# create a camera to set up the projection and model view matrices, and the subgraph to drawn in the HUD
camera = osg.Camera()
# set the projection matrix
camera.setProjectionMatrix(osg.Matrix.ortho2D(0,1280,0,1024))
# set the view matrix
camera.setReferenceFrame(osg.Transform.ABSOLUTE_RF)
camera.setViewMatrix(osg.Matrix.identity())
# only clear the depth buffer
camera.setClearMask(GL_DEPTH_BUFFER_BIT)
# draw subgraph after main camera view.
camera.setRenderOrder(osg.Camera.POST_RENDER)
# we don't want the camera to grab event focus from the viewers main camera(s).
camera.setAllowEventFocus(False)
# add to this camera a subgraph to render
geode = osg.Geode()
font = str("fonts/arial.ttf")
# turn lighting off for the text and disable depth test to ensure its always ontop.
stateset = geode.getOrCreateStateSet()
stateset.setMode(GL_LIGHTING,osg.StateAttribute.OFF)
position = osg.Vec3(150.0,150.0,0.0)
text = osgText.Text()
geode.addDrawable( text )
text.setFont(font)
text.setPosition(position)
text.setCharacterSize(100.0)
text.setText(label)
camera.addChild(geode)
return camera
def creatQuad(name, image, formatMode, minFilter):
group = osg.Group()
geode = osg.Geode()
geode.addDrawable(createTexturedQuadGeometry(
osg.Vec3(0.0,0.0,0.0),
osg.Vec3(float(image.s()),0.0,0.0),
osg.Vec3(0.0,0.0,float(image.t()))))
geode.setName(name)
stateset = geode.getOrCreateStateSet()
texture = osg.Texture2D(image)
texture.setInternalFormatMode(formatMode)
texture.setFilter(osg.Texture.MIN_FILTER, minFilter)
stateset.setTextureAttributeAndModes(0, texture, osg.StateAttribute.ON)
group.addChild(geode)
group.addChild(createHUD(name))
return group
def main(argv):
arguments = osg.ArgumentParser(argv)
# construct the viewer.
viewer = osgViewer.CompositeViewer(arguments)
if arguments.argc()<=1 :
print "Please supply an image filename on the commnand line."
return 1
filename = arguments[1]
image = osgDB.readImageFile(filename)
if not image :
print "Error: unable able to read image from ", filename
return 1
wsi = osg.GraphicsContext.getWindowingSystemInterface()
if not wsi :
osg.notify(osg.NOTICE), "Error, no WindowSystemInterface available, cannot create windows."
return 1
unsigned int width, height
wsi.getScreenResolution(osg.GraphicsContext.ScreenIdentifier(0), width, height)
traits = osg.GraphicsContext.Traits()
traits.x = 0
traits.y = 0
traits.width = width
traits.height = height
traits.windowDecoration = False
traits.doubleBuffer = True
gc = osg.GraphicsContext.createGraphicsContext(traits)
if not gc :
print "Error: GraphicsWindow has not been created successfully."
gc.setClearColor(osg.Vec4(0.0,0.0,0.0,1.0))
gc.setClearMask(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
trackball = osgGA.TrackballManipulator()
typedef std.vector< osg.Node > Models
models = Models()
models.push_back(creatQuad("no compression", image, osg.Texture.USE_IMAGE_DATA_FORMAT, osg.Texture.LINEAR))
models.push_back(creatQuad("ARB compression", image, osg.Texture.USE_ARB_COMPRESSION, osg.Texture.LINEAR))
models.push_back(creatQuad("DXT1 compression", image, osg.Texture.USE_S3TC_DXT1_COMPRESSION, osg.Texture.LINEAR))
models.push_back(creatQuad("DXT3 compression", image, osg.Texture.USE_S3TC_DXT3_COMPRESSION, osg.Texture.LINEAR))
models.push_back(creatQuad("DXT5 compression", image, osg.Texture.USE_S3TC_DXT5_COMPRESSION, osg.Texture.LINEAR))
numX = 1
numY = 1
# compute the number of views up and across that are need
aspectRatio = float(width)/float(height)
multiplier = sqrtf(float(models.size())/aspectRatio)
multiplier_x = multiplier*aspectRatio
multiplier_y = multiplier
if multiplier_x/ceilf(multiplier_x) : > (multiplier_y/ceilf(multiplier_y)) :
numX = int(ceilf(multiplier_x))
numY = int(ceilf(float(models.size())/float(numX)))
else:
numY = int(ceilf(multiplier_y))
numX = int(ceilf(float(models.size())/float(numY)))
# populate the view with the required view to view each model.
for(unsigned int i=0 i<models.size() ++i)
view = osgViewer.View()
xCell = i % numX
yCell = i / numX
vx = int((float(xCell)/float(numX)) * float(width))
vy = int((float(yCell)/float(numY)) * float(height))
vw = int(float(width) / float(numX))
vh = int(float(height) / float(numY))
view.setSceneData(models[i])
view.getCamera().setProjectionMatrixAsPerspective(30.0, double(vw) / double(vh), 1.0, 1000.0)
view.getCamera().setViewport(osg.Viewport(vx, vy, vw, vh))
view.getCamera().setGraphicsContext(gc)
view.getCamera().setClearMask(0)
view.setCameraManipulator(trackball)
viewer.addView(view)
return viewer.run()
if __name__ == "__main__":
main(sys.argv)
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02D111-1307, USA.
"""Book fields tests."""
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from cds_dojson.marc21.utils import create_record
from dojson.errors import MissingRule
from cds_ils.importer.errors import ManualImportRequired, \
MissingRequiredField, UnexpectedValue
from cds_ils.importer.providers.cds.cds import get_helper_dict
from cds_ils.importer.providers.cds.models.book import model
from cds_ils.importer.providers.cds.rules.values_mapping import MATERIALS, \
mapping
marcxml = (
"""<collection xmlns="http://www.loc.gov/MARC21/slim">"""
"""<record>{0}</record></collection>"""
)
def check_transformation(marcxml_body, json_body):
"""Check transformation."""
blob = create_record(marcxml.format(marcxml_body))
model._default_fields = {"_migration": {**get_helper_dict()}}
record = model.do(blob, ignore_missing=False)
expected = {
"_migration": {**get_helper_dict()},
}
expected.update(**json_body)
assert record == expected
def test_mapping():
"""Test mapping."""
with pytest.raises(UnexpectedValue):
assert mapping(MATERIALS, "softwa", raise_exception=True) == "software"
def test_subject_classification(app):
"""Test subject classification."""
with app.app_context():
check_transformation(
"""
<datafield tag="082" ind1="0" ind2="4">
<subfield code="a">515.353</subfield>
<subfield code="2">23</subfield>
</datafield>
""",
{"subjects": [{"value": "515.353", "scheme": "Dewey"}]},
)
check_transformation(
"""
<datafield tag="050" ind1=" " ind2="4">
<subfield code="a">QA76.642</subfield>
<subfield code="b">XXXX</subfield>
</datafield>
""",
{"subjects": [{"value": "QA76.642", "scheme": "LoC"}]},
)
check_transformation(
"""
<datafield tag="050" ind1=" " ind2=" ">
<subfield code="a">QA76.642</subfield>
<subfield code="b">XXXX</subfield>
</datafield>
""",
{"subjects": [{"value": "QA76.642", "scheme": "LoC"}]},
)
check_transformation(
"""
<datafield tag="050" ind1=" " ind2="4">
<subfield code="a">QA76.642</subfield>
</datafield>
<datafield tag="082" ind1=" " ind2=" ">
<subfield code="a">005.275</subfield>
</datafield>
""",
{
"subjects": [
{"value": "QA76.642", "scheme": "LoC"},
{"value": "005.275", "scheme": "Dewey"},
]
},
)
check_transformation(
"""
<datafield tag="080" ind1=" " ind2=" ">
<subfield code="a">528</subfield>
</datafield>
""",
{"subjects": [{"value": "528", "scheme": "UDC"}]},
)
check_transformation(
"""
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="a">25040.40</subfield>
</datafield>
""",
{"subjects": [{"value": "25040.40", "scheme": "ICS"}]},
)
check_transformation(
"""
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">PACS</subfield>
<subfield code="a">13.75.Jz</subfield>
</datafield>
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">PACS</subfield>
<subfield code="a">13.60.Rj</subfield>
</datafield>
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">PACS</subfield>
<subfield code="a">14.20.Jn</subfield>
</datafield>
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">PACS</subfield>
<subfield code="a">25.80.Nv</subfield>
</datafield>
""",
{},
)
check_transformation(
"""
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">CERN Yellow Report</subfield>
<subfield code="a">CERN-2018-003-CP</subfield>
</datafield>
""",
{},
)
def test_created_by_email(app):
"""Test acquisition email."""
with app.app_context():
check_transformation(
"""
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">karolina.przerwa@cern.ch</subfield>
</datafield>
""",
{
"created_by": {
"_email": "karolina.przerwa@cern.ch",
"type": "user",
},
},
)
check_transformation(
"""
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">h</subfield>
<subfield code="w">201829</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">karolina.przerwa@cern.ch</subfield>
</datafield>
""",
{
"created_by": {
"type": "user",
"_email": "karolina.przerwa@cern.ch",
},
"_created": "2018-07-16",
},
)
def test_created(app):
"""Test created dates."""
with app.app_context():
check_transformation(
"""
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SPR201701</subfield>
</datafield>
""",
{"source": "SPR", "_created": "2017-01-01"},
)
check_transformation(
"""
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">random text</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">random text</subfield>
</datafield>
""",
{
"internal_notes": [
{"value": "random text"},
]
},
)
check_transformation(
"""
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">h</subfield>
<subfield code="w">201829</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SPR201701</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">karolina.przerwa@cern.ch</subfield>
</datafield>
""",
{
"created_by": {
"type": "user",
"_email": "karolina.przerwa@cern.ch",
},
"source": "SPR",
"_created": "2017-01-01",
},
)
def test_collections(app):
"""Test collections."""
with app.app_context():
check_transformation(
"""
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="b">LEGSERLIB</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"tags": ["LEGSERLIB"],
"has_tags": True,
},
},
)
check_transformation(
"""
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">LEGSERLIB</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"tags": ["LEGSERLIB"],
"has_tags": True,
},
},
)
check_transformation(
"""
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">LEGSERLIB</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"tags": ["LEGSERLIB"],
"has_tags": True,
},
},
)
check_transformation(
"""
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">LEGSERLIBINTLAW</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"tags": ["LEGSERLIBINTLAW"],
"has_tags": True,
},
},
)
check_transformation(
"""
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">BOOKSHOP</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"tags": ["BOOKSHOP"],
"has_tags": True,
},
},
)
check_transformation(
"""
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">BOOKSHOP</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"tags": ["BOOKSHOP"],
"has_tags": True,
},
},
)
check_transformation(
"""
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">LEGSERLIBLEGRES</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"tags": ["LEGSERLIBLEGRES"],
"has_tags": True,
},
},
)
def test_document_type(app):
"""Test document type."""
with app.app_context():
check_transformation(
"""
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">BOOK</subfield>
</datafield>
""",
{
"document_type": "BOOK",
},
)
check_transformation(
"""
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">21</subfield>
</datafield>
""",
{
"document_type": "BOOK",
},
)
check_transformation(
"""
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">42</subfield>
</datafield>
""",
{
"document_type": "PROCEEDINGS",
},
)
check_transformation(
"""
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">43</subfield>
</datafield>
""",
{
"document_type": "PROCEEDINGS",
},
)
check_transformation(
"""
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">BOOK</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="b">REPORT</subfield>
</datafield>
""",
{"document_type": "BOOK"},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">virTScvyb</subfield>
</datafield>
""",
{"document_type": "BOOK"},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="b">ENGLISH BOOK CLUB</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">21</subfield>
</datafield>
""",
{"document_type": "BOOK"},
)
def test_document_type_collection(app):
"""Test document type collection."""
with app.app_context():
check_transformation(
"""
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="b">LEGSERLIB</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">BOOK</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"tags": ["LEGSERLIB"],
"has_tags": True,
},
"document_type": "BOOK",
},
)
check_transformation(
"""
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">LEGSERLIB</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="b">BOOK</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"tags": ["LEGSERLIB"],
"has_tags": True,
},
"document_type": "BOOK",
},
)
def test_urls(app):
"""Test urls."""
with app.app_context():
check_transformation(
"""
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="8">1336159</subfield>
<subfield code="s">726479</subfield>
<subfield code="u">
http://cds.cern.ch/record/1393420/files/NF-EN-13480-2-AC6.pdf
</subfield>
<subfield code="y">
Description
</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"eitems_has_files": True,
"eitems_file_links": [
{
"description": "Description",
"value": "http://cds.cern.ch/record/1393420/files/NF-EN-13480-2-AC6.pdf",
}
],
}
},
)
check_transformation(
"""
<datafield tag="8564" ind1=" " ind2=" ">
<subfield code="u">https://cds.cern.ch/record/12345/files/abc.pdf</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"eitems_has_files": True,
"eitems_file_links": [
{
"value": "https://cds.cern.ch/record/12345/files/abc.pdf"
}
],
}
},
)
check_transformation(
"""
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="8">1336158</subfield>
<subfield code="s">2445021</subfield>
<subfield code="u">http://awesome.domain/with/a/path</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="8">1336157</subfield>
<subfield code="s">2412918</subfield>
<subfield code="u">http://another.domain/with/a/path</subfield>
<subfield code="x">pdfa</subfield>
</datafield>
""",
{
"urls": [
{"value": "http://awesome.domain/with/a/path"},
{"value": "http://another.domain/with/a/path"},
],
},
)
check_transformation(
"""
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">
https://cdsweb.cern.ch/auth.py?r=EBLIB_P_1139560
</subfield>
<subfield code="y">ebook</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"eitems_has_ebl": True,
"eitems_ebl": [
{
"value": "https://cdsweb.cern.ch/auth.py?r=EBLIB_P_1139560"
}
],
}
},
)
check_transformation(
"""
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">https://learning.oreilly.com/library/view/-/9781118491300/?ar</subfield>
<subfield code="y">ebook</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"eitems_has_external": True,
"eitems_external": [
{
"value": "https://learning.oreilly.com/library/view/-/9781118491300/?ar"
}
],
}
},
)
check_transformation(
"""
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">
https://ezproxy.cern.ch/login?url=https://www.worldscientific.com/toc/rast/10
</subfield>
<subfield code="y">ebook</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"eitems_has_proxy": True,
"eitems_proxy": [
{
"value": "https://www.worldscientific.com/toc/rast/10"
}
],
}
},
)
check_transformation(
"""
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">
https://cdsweb.cern.ch/auth.py?r=EBLIB_P_1139560
</subfield>
<subfield code="y">ebook</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">
https://learning.oreilly.com/library/view/-/9781118491300/?ar
</subfield>
<subfield code="y">ebook</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"eitems_has_ebl": True,
"eitems_ebl": [
{
"value": "https://cdsweb.cern.ch/auth.py?r=EBLIB_P_1139560"
},
],
"eitems_external": [
{
"value": "https://learning.oreilly.com/library/view/-/9781118491300/?ar"
},
],
"eitems_has_external": True,
}
},
)
check_transformation(
"""
<datafield tag="8564" ind1=" " ind2=" ">
<subfield code="u">google.com</subfield>
<subfield code="y">description</subfield>
</datafield>
""",
{
"urls": [
{"value": "google.com", "description": "description"}
],
},
)
def test_authors(app):
"""Test authors."""
with app.app_context():
check_transformation(
"""
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Frampton, Paul H</subfield>
<subfield code="e">ed.</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Glashow, Sheldon Lee</subfield>
<subfield code="e">ed.</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Van Dam, Hendrik</subfield>
<subfield code="e">ed.</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Seyfert, Paul</subfield>
<subfield code="0">AUTHOR|(INSPIRE)INSPIRE-00341737</subfield>
<subfield code="0">AUTHOR|(SzGeCERN)692828</subfield>
<subfield code="0">AUTHOR|(CDS)2079441</subfield>
<subfield code="u">CERN</subfield>
<subfield code="m">paul.seyfert@cern.ch</subfield>
<subfield code="9">#BEARD#</subfield>
</datafield>
<datafield tag="720" ind1=" " ind2=" ">
<subfield code="a">Neubert, Matthias</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">John Doe</subfield>
<subfield code="u">CERN</subfield>
<subfield code="u">Univ. Gent</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Jane Doe</subfield>
<subfield code="u">CERN</subfield>
<subfield code="u">Univ. Gent</subfield>
</datafield>
""",
{
"authors": [
{
"full_name": "Frampton, Paul H",
"roles": ["EDITOR"],
"alternative_names": "Neubert, Matthias",
},
{"full_name": "Glashow, Sheldon Lee", "roles": ["EDITOR"]},
{"full_name": "Van Dam, Hendrik", "roles": ["EDITOR"]},
{
"full_name": "Seyfert, Paul",
"roles": ["AUTHOR"],
"affiliations": [{"name": "CERN"}],
"identifiers": [
{
"scheme": "INSPIRE ID",
"value": "INSPIRE-00341737",
},
{"scheme": "CERN", "value": "692828"},
{"scheme": "CDS", "value": "2079441"},
],
},
{
"full_name": "John Doe",
"roles": ["AUTHOR"],
"affiliations": [
{"name": "CERN"},
{"name": "Univ. Gent"},
],
},
{
"full_name": "Jane Doe",
"roles": ["AUTHOR"],
"affiliations": [
{"name": "CERN"},
{"name": "Univ. Gent"},
],
},
],
},
)
check_transformation(
"""
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Frampton, Paul H</subfield>
<subfield code="e">ed.</subfield>
<subfield code="u">et al.</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Glashow, Sheldon Lee</subfield>
<subfield code="e">ed.</subfield>
</datafield>
""",
{
"authors": [
{
"full_name": "Frampton, Paul H",
"roles": ["EDITOR"],
},
{"full_name": "Glashow, Sheldon Lee", "roles": ["EDITOR"]},
],
"other_authors": True,
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Langrognat, B</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Sauniere, J</subfield>
<subfield code="e">et al.</subfield>
</datafield>
""",
{},
)
# better example to be provided
def test_corporate_author(app):
"""Test corporate author."""
with app.app_context():
check_transformation(
"""
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="a"> Springer</subfield>
</datafield>
""",
{
"authors": [{"full_name": "Springer", "type": "ORGANISATION"}],
},
)
check_transformation(
"""
<datafield tag="110" ind1=" " ind2=" ">
<subfield code="a">Marston, R M</subfield>
</datafield>
""",
{
"authors": [
{"full_name": "Marston, R M", "type": "ORGANISATION"},
],
},
)
def test_collaborations(app):
"""Test_collaborations."""
with app.app_context():
check_transformation(
"""
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="5">PH-EP</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">ATLAS Collaboration</subfield>
</datafield>
""",
{
"authors": [
{"full_name": "PH-EP", "type": "ORGANISATION"},
{"full_name": "ATLAS", "type": "ORGANISATION"},
]
},
)
def test_publication_info(app):
"""Test publication info."""
with app.app_context():
check_transformation(
"""
<datafield tag="773" ind1=" " ind2=" ">
<subfield code="c">1692-1695</subfield>
<subfield code="n">10</subfield>
<subfield code="y">2007</subfield>
<subfield code="p">Radiat. Meas.</subfield>
<subfield code="v">42</subfield>
<subfield code="w">C19-01-08.1</subfield>
</datafield>
<datafield tag="962" ind1=" " ind2=" ">
<subfield code="n">BOOK</subfield>
</datafield>
""",
{
"publication_info": [
{
"page_start": 1692,
"page_end": 1695,
"year": 2007,
"journal_title": "Radiat. Meas.",
"journal_issue": "10",
"journal_volume": "42",
"material": "BOOK",
}
],
},
)
check_transformation(
"""
<datafield tag="773" ind1=" " ind2=" ">
<subfield code="c">1692-1695</subfield>
<subfield code="n">10</subfield>
<subfield code="y">2007</subfield>
<subfield code="p">Radiat. Meas.</subfield>
<subfield code="v">42</subfield>
</datafield>
<datafield tag="962" ind1=" " ind2=" ">
<subfield code="n">fsihfifri</subfield>
</datafield>
""",
{
"publication_info": [
{
"page_start": 1692,
"page_end": 1695,
"year": 2007,
"journal_title": "Radiat. Meas.",
"journal_issue": "10",
"journal_volume": "42",
}
],
"conference_info": {
"identifiers": [
{"scheme": "CERN_CODE", "value": "fsihfifri"}
]
},
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="773" ind1=" " ind2=" ">
<subfield code="c">10-95-5</subfield>
<subfield code="n">10</subfield>
<subfield code="y">2007</subfield>
<subfield code="p">Radiat. Meas.</subfield>
<subfield code="v">42</subfield>
</datafield>
""",
{
"publication_info": [
{
"page_start": 1692,
"page_end": 1695,
"year": 2007,
"journal_title": "Radiat. Meas.",
"journal_issue": "10",
"journal_volume": "42",
}
]
},
)
check_transformation(
"""
<datafield tag="773" ind1=" " ind2=" ">
<subfield code="o">1692 numebrs text etc</subfield>
<subfield code="x">Random text</subfield>
</datafield>
""",
{
"publication_info": [
{"note": "1692 numebrs text etc Random text"}
]
},
)
check_transformation(
"""
<datafield tag="962" ind1=" " ind2=" ">
<subfield code="b">2155631</subfield>
<subfield code="n">genoa20160330</subfield>
<subfield code="k">1</subfield>
</datafield>
""",
{
"document_type": "PERIODICAL_ISSUE",
"_migration": {
**get_helper_dict(),
"journal_record_legacy_recid": "2155631",
"has_journal": True,
},
"conference_info": {
"identifiers": [
{"scheme": "CERN_CODE", "value": "genoa20160330"}
]
},
"publication_info": [
{
"page_start": 1,
}
],
},
)
check_transformation(
"""
<datafield tag="773" ind1=" " ind2=" ">
<subfield code="c">1692-1695</subfield>
<subfield code="n">10</subfield>
<subfield code="y">2007</subfield>
<subfield code="p">Radiat. Meas.</subfield>
<subfield code="o">1692 numebrs text etc</subfield>
<subfield code="x">Random text</subfield>
</datafield>
""",
{
"publication_info": [
{
"page_start": 1692,
"page_end": 1695,
"year": 2007,
"journal_title": "Radiat. Meas.",
"journal_issue": "10",
"note": "1692 numebrs text etc Random text",
}
]
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="773" ind1=" " ind2=" ">
<subfield code="c">1692-1695-2000</subfield>
<subfield code="n">10</subfield>
<subfield code="y">2007</subfield>
<subfield code="p">Radiat. Meas.</subfield>
<subfield code="o">1692 numebrs text etc</subfield>
<subfield code="x">Random text</subfield>
</datafield>
""",
{
"publication_info": [
{
"page_start": 1692,
"page_end": 1695,
"year": 2007,
"journal_title": "Radiat. Meas.",
"journal_issue": "10",
"pubinfo_freetext": "1692 numebrs "
"text etc Random text",
}
]
},
)
def test_extensions(app):
"""Test extensions"""
with app.app_context():
check_transformation(
"""
<datafield tag="925" ind1=" " ind2=" ">
<subfield code="i">applicable at CERN</subfield>
<subfield code="p">Expert ICS-25.160</subfield>
<subfield code="z">Reviewed in December 2019</subfield>
</datafield>
""",
{
"extensions": {
"standard_review:applicability": "applicable at CERN",
"standard_review:checkdate": "Reviewed in December 2019",
"standard_review:expert": "Expert ICS-25.160",
},
},
)
check_transformation(
"""
<datafield tag="925" ind1=" " ind2=" ">
<subfield code="i">no longer applicable</subfield>
<subfield code="p">Expert ICS-25.160</subfield>
<subfield code="v">withdrawn</subfield>
<subfield code="z">Reviewed in December 2019</subfield>
</datafield>
""",
{
"extensions": {
"standard_review:applicability": "no longer applicable",
"standard_review:validity": "withdrawn",
"standard_review:checkdate": "Reviewed in December 2019",
"standard_review:expert": "Expert ICS-25.160",
},
},
)
check_transformation(
"""
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LHC</subfield>
<subfield code="e">ATLAS</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LHC</subfield>
<subfield code="e">CMS</subfield>
<subfield code="p">FCC</subfield>
</datafield>
<datafield tag="925" ind1=" " ind2=" ">
<subfield code="i">no longer applicable</subfield>
<subfield code="p">Expert ICS-25.160</subfield>
<subfield code="v">withdrawn</subfield>
<subfield code="z">Reviewed in December 2019</subfield>
</datafield>
""",
{
"extensions": {
"unit:accelerator": ["CERN LHC"],
"unit:experiment": ["ATLAS", "CMS"],
"unit:project": ["FCC"],
"standard_review:applicability": "no longer applicable",
"standard_review:validity": "withdrawn",
"standard_review:checkdate": "Reviewed in December 2019",
"standard_review:expert": "Expert ICS-25.160",
}
},
)
def test_related_record(app):
"""Test related record."""
with app.app_context():
check_transformation(
"""
<datafield tag="775" ind1=" " ind2=" ">
<subfield code="b">Test text</subfield>
<subfield code="c">Random text</subfield>
<subfield code="w">748392</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"has_related": True,
"related": [
{
"related_recid": "748392",
"relation_type": "Test text",
}
],
},
},
)
with pytest.raises(ManualImportRequired):
check_transformation(
"""
<datafield tag="787" ind1=" " ind2=" ">
<subfield code="i">Random text</subfield>
<subfield code="w">7483924</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"has_related": True,
"related": [
{
"related_recid": "7483924",
"relation_type": "other",
}
],
},
},
)
check_transformation(
"""
<datafield tag="775" ind1=" " ind2=" ">
<subfield code="w">7483924</subfield>
</datafield>
<datafield tag="787" ind1=" " ind2=" ">
<subfield code="w">748</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"has_related": True,
"related": [
{"related_recid": "7483924", "relation_type": "other"},
{"related_recid": "748", "relation_type": "other"},
],
},
},
)
def test_accelerator_experiments(app):
"""Test accelerator experiments."""
with app.app_context():
check_transformation(
"""
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LHC</subfield>
<subfield code="e">ATLAS</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LHC</subfield>
<subfield code="e">CMS</subfield>
<subfield code="p">FCC</subfield>
</datafield>
""",
{
"extensions": {
"unit:accelerator": ["CERN LHC"],
"unit:experiment": ["ATLAS", "CMS"],
"unit:project": ["FCC"],
}
},
)
def test_isbns(app):
"""Test isbns."""
with app.app_context():
check_transformation(
"""
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9781630814434</subfield>
<subfield code="q">(electronic bk.)</subfield>
<subfield code="u">electronic version</subfield>
<subfield code="b">electronic version</subfield>
</datafield>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9781630811051</subfield>
<subfield code="u">electronic version</subfield>
</datafield>
""",
{
"identifiers": [
{
"value": "9781630814434",
"medium": "electronic version",
"scheme": "ISBN",
},
{
"value": "9781630811051",
"medium": "electronic version",
"scheme": "ISBN",
},
],
},
)
check_transformation(
"""
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">0691090858</subfield>
</datafield>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9780691090856</subfield>
</datafield>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9781400889167</subfield>
<subfield code="q">(electronic bk.)</subfield>
<subfield code="u">electronic version</subfield>
</datafield>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="u">electronic version</subfield>
<subfield code="z">9780691090849</subfield>
</datafield>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="u">electronic version</subfield>
<subfield code="z">9780691090849</subfield>
</datafield>
""",
{
"identifiers": [
{"value": "0691090858", "scheme": "ISBN"},
{"value": "9780691090856", "scheme": "ISBN"},
{
"value": "9781400889167",
"medium": "electronic version",
"scheme": "ISBN",
},
{
"value": "9780691090849",
"medium": "electronic version",
"scheme": "ISBN",
},
],
},
)
with pytest.raises(ManualImportRequired):
check_transformation(
"""
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="q">(electronic bk.)</subfield>
<subfield code="u">electronic version</subfield>
<subfield code="b">electronic version</subfield>
</datafield>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="u">electronic version</subfield>
</datafield>
""",
{
"identifiers": [
{
"medium": "electronic version",
"scheme": "ISBN",
},
{
"medium": "electronic version",
"scheme": "ISBN",
},
],
},
)
check_transformation(
"""
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9781630814434</subfield>
<subfield code="q">(electronic bk.)</subfield>
<subfield code="u">electronic version</subfield>
<subfield code="b">electronic version</subfield>
</datafield>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9781630811051</subfield>
<subfield code="u">electronic version (v.1)</subfield>
</datafield>
""",
{
"identifiers": [
{
"value": "9781630814434",
"medium": "electronic version",
"scheme": "ISBN",
},
{
"value": "9781630811051",
"medium": "electronic version",
"scheme": "ISBN",
},
],
"volume": "(v.1)",
},
)
check_transformation(
"""
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9781630814434</subfield>
<subfield code="q">(electronic bk.)</subfield>
<subfield code="u">description</subfield>
<subfield code="b">electronic version</subfield>
</datafield>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9781630811051</subfield>
<subfield code="u">electronic version (v.1)</subfield>
</datafield>
""",
{
"identifiers": [
{
"value": "9781630814434",
"description": "description",
"scheme": "ISBN",
},
{
"value": "9781630811051",
"medium": "electronic version",
"scheme": "ISBN",
},
],
"volume": "(v.1)",
},
)
with pytest.raises(ManualImportRequired):
check_transformation(
"""
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9781630814434</subfield>
<subfield code="q">(electronic bk.)</subfield>
<subfield code="u">electronic version (v.2)</subfield>
<subfield code="b">electronic version</subfield>
</datafield>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9781630811051</subfield>
<subfield code="u">electronic version (v.1)</subfield>
</datafield>
""",
{
"identifiers": [
{
"value": "9781630814434",
"medium": "electronic version",
"scheme": "ISBN",
},
{
"value": "9781630811051",
"medium": "electronic version",
"scheme": "ISBN",
},
],
"volume": "(v.1)",
},
)
def test_report_numbers(app):
"""Test report numbers."""
with app.app_context():
check_transformation(
"""
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="9">arXiv</subfield>
<subfield code="a">arXiv:1808.02335</subfield>
</datafield>
""",
{
"alternative_identifiers": [
{"value": "arXiv:1808.02335", "scheme": "arXiv"}
],
},
)
check_transformation(
"""
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">hep-th/9509119</subfield>
</datafield>
""",
{
"identifiers": [
{"value": "hep-th/9509119", "scheme": "REPORT_NUMBER"}
],
},
)
check_transformation(
"""
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="9">arXiv</subfield>
<subfield code="a">arXiv:1808.02335</subfield>
<subfield code="c">hep-ex</subfield>
</datafield>
""",
{
"alternative_identifiers": [
{
"value": "arXiv:1808.02335",
"scheme": "arXiv",
}
],
"subjects": [
{
"scheme": "arXiv",
"value": "hep-ex",
}
],
},
)
check_transformation(
"""
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="9">arXiv</subfield>
<subfield code="a">arXiv:1808.02335</subfield>
</datafield>
""",
{
"alternative_identifiers": [
{
"value": "arXiv:1808.02335",
"scheme": "arXiv",
}
],
},
)
check_transformation(
"""
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="z">CERN-THESIS-2018-004</subfield>
</datafield>
""",
{
"identifiers": [
{
"value": "CERN-THESIS-2018-004",
"hidden": True,
"scheme": "REPORT_NUMBER",
}
],
},
)
check_transformation(
"""
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="9">CERN-ISOLDE-2018-001</subfield>
</datafield>
""",
{
"identifiers": [
{
"value": "CERN-ISOLDE-2018-001",
"hidden": True,
"scheme": "REPORT_NUMBER",
}
],
},
)
check_transformation(
"""
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="a">NAPAC-2016-MOPOB23</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="9">ATL-COM-PHYS-2018-980</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="z">ATL-COM-PHYS-2017</subfield>
</datafield>
""",
{
"identifiers": [
{"value": "NAPAC-2016-MOPOB23", "scheme": "REPORT_NUMBER"},
{
"value": "ATL-COM-PHYS-2018-980",
"hidden": True,
"scheme": "REPORT_NUMBER",
},
{
"value": "ATL-COM-PHYS-2017",
"hidden": True,
"scheme": "REPORT_NUMBER",
},
],
},
)
with pytest.raises(MissingRequiredField):
check_transformation(
"""
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="x">hep-th/9509119</subfield>
</datafield>
""",
{
"identifiers": [
{"value": "hep-th/9509119", "scheme": "REPORT_NUMBER"}
],
},
)
with pytest.raises(MissingRule):
check_transformation(
"""
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">LANL EDS</subfield>
<subfield code="a">math-ph</subfield>
</datafield>
""",
{},
)
def test_dois(app):
"""Test dois."""
with app.app_context():
check_transformation(
"""
<datafield tag="024" ind1="7" ind2=" ">
<subfield code="2">DOI</subfield>
<subfield code="a">10.1007/978-1-4613-0247-6</subfield>
<subfield code="q">data</subfield>
<subfield code="9">source</subfield>
</datafield>
""",
{
"identifiers": [
{
"source": "source",
"material": "data",
"value": "10.1007/978-1-4613-0247-6",
"scheme": "DOI",
}
],
},
)
def test_alternative_identifiers(app):
"""Test external system identifiers."""
with app.app_context():
check_transformation(
"""
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">EBL</subfield>
<subfield code="a">5231528</subfield>
</datafield>
""",
{
"alternative_identifiers": [
{
"scheme": "EBL",
"value": "5231528",
}
],
},
)
check_transformation(
"""
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">inspire-cnum</subfield>
<subfield code="a">2365039</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">Inspire</subfield>
<subfield code="a">2365039</subfield>
</datafield>
""",
{
"conference_info": {
"identifiers": [
{"scheme": "INSPIRE_CNUM", "value": "2365039"}
],
},
"alternative_identifiers": [
{
"scheme": "Inspire",
"value": "2365039",
}
],
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">Random</subfield>
<subfield code="a">2365039</subfield>
</datafield>
""",
{},
)
check_transformation(
"""
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">CERCER</subfield>
<subfield code="a">2365039</subfield>
</datafield>
""",
{},
)
check_transformation(
"""
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SLAC</subfield>
<subfield code="a">5231528</subfield>
</datafield>
""",
{},
)
check_transformation(
"""
<datafield tag="024" ind1="7" ind2=" ">
<subfield code="2">ASIN</subfield>
<subfield code="a">9402409580</subfield>
<subfield code="9">DLC</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">EBL</subfield>
<subfield code="a">5231528</subfield>
</datafield>
""",
{
"alternative_identifiers": [
{
"value": "5231528",
"scheme": "EBL",
}
]
},
)
check_transformation(
"""
<datafield tag="024" ind1="7" ind2=" ">
<subfield code="2">DOI</subfield>
<subfield code="a">10.1007/s00269-016-0862-1</subfield>
</datafield>
<datafield tag="024" ind1="7" ind2=" ">
<subfield code="2">DOI</subfield>
<subfield code="a">10.1103/PhysRevLett.121.052004</subfield>
</datafield>
<datafield tag="024" ind1="7" ind2=" ">
<subfield code="2">DOI</subfield>
<subfield code="9">arXiv</subfield>
<subfield code="a">10.1103/PhysRevLett.121.052004</subfield>
<subfield code="q">publication</subfield>
</datafield>
""",
{
"identifiers": [
{"value": "10.1007/s00269-016-0862-1", "scheme": "DOI"},
{
"value": "10.1103/PhysRevLett.121.052004",
"scheme": "DOI",
},
{
"value": "10.1103/PhysRevLett.121.052004",
"scheme": "DOI",
"material": "publication",
"source": "arXiv",
},
],
},
)
check_transformation(
"""
<datafield tag="036" ind1=" " ind2=" ">
<subfield code="9">DLC</subfield>
<subfield code="a">92074207</subfield>
</datafield>
""",
{
"alternative_identifiers": [
{
"scheme": "DLC",
"value": "92074207",
}
],
},
)
# ignore 035__9 == arXiv
check_transformation(
"""
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">arXiv</subfield>
<subfield code="a">5231528</subfield>
</datafield>
""",
{
# "alternative_identifiers": [
# {"scheme": "arXiv", "value": "5231528"}
# ]
},
)
def test_arxiv_eprints(app):
"""Test arxiv eprints."""
with app.app_context():
check_transformation(
"""
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="9">arXiv</subfield>
<subfield code="a">arXiv:1209.5665</subfield>
<subfield code="c">math-ph</subfield>
</datafield>
""",
{
"alternative_identifiers": [
{
"scheme": "arXiv",
"value": "arXiv:1209.5665",
}
],
"subjects": [{"scheme": "arXiv", "value": "math-ph"}],
},
)
check_transformation(
"""
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="9">arXiv</subfield>
<subfield code="a">arXiv:1209.5665</subfield>
<subfield code="c">math-ph</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="9">arXiv</subfield>
<subfield code="a">arXiv:1209.5665</subfield>
<subfield code="c">math.GT</subfield>
</datafield>
""",
{
"alternative_identifiers": [
{"value": "arXiv:1209.5665", "scheme": "arXiv"}
],
"subjects": [
{"scheme": "arXiv", "value": "math-ph"},
{"scheme": "arXiv", "value": "math.GT"},
],
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="9">arXiv</subfield>
<subfield code="a">arXiv:1209.5665</subfield>
<subfield code="c">math-phss</subfield>
</datafield>
""",
{
"arxiv_eprints": [
{
"categories": ["math-ph"],
"value": "arXiv:1209.5665",
}
],
},
)
def test_languages(app):
"""Test languages."""
with app.app_context():
check_transformation(
"""
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
""",
{
"languages": ["EN"],
},
)
check_transformation(
"""
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">english</subfield>
</datafield>
""",
{
"languages": ["EN"],
},
)
check_transformation(
"""
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">fre</subfield>
</datafield>
""",
{
"languages": ["FR"],
},
)
check_transformation(
"""
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">pl</subfield>
</datafield>
""",
{
"languages": ["PL"],
},
)
check_transformation(
"""
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">ger</subfield>
</datafield>
""",
{
"languages": ["DE"],
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">xxxxxxxx</subfield>
</datafield>
""",
{
"languages": ["DE"],
},
)
def test_editions(app):
"""Test editions."""
with app.app_context():
check_transformation(
"""
<datafield tag="250" ind1=" " ind2=" ">
<subfield code="a">3rd ed.</subfield>
</datafield>
""",
{"edition": "3rd"},
)
def test_imprint(app):
"""Test imprints."""
with app.app_context():
check_transformation(
"""
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="a">Sydney</subfield>
<subfield code="b">Allen & Unwin</subfield>
<subfield code="c">2013</subfield>
<subfield code="g">2015</subfield>
</datafield>
""",
{
"publication_year": "2013",
"imprint": {
"place": "Sydney",
"publisher": "Allen & Unwin",
"date": "2013",
"reprint_date": "2015",
},
},
)
@pytest.mark.skip
def test_preprint_date(app):
"""Test preprint date."""
with app.app_context():
check_transformation(
"""
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="a">Geneva</subfield>
<subfield code="b">CERN</subfield>
<subfield code="c">19 Jan 2016</subfield>
</datafield>
""",
{
"preprint_date": "2016-01-19",
},
)
check_transformation(
"""
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="a">Geneva</subfield>
<subfield code="b">CERN</subfield>
</datafield>
""",
{},
)
with pytest.raises(ManualImportRequired):
check_transformation(
"""
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="a">Geneva</subfield>
<subfield code="b">CERN</subfield>
<subfield code="c">33 Jan 2016</subfield>
</datafield>
""",
{
"preprint_date": "2016-01-19",
},
)
def test_number_of_pages(app):
"""Test number of pages."""
with app.app_context():
check_transformation(
"""
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">373 p</subfield>
</datafield>
""",
{
"number_of_pages": "373",
},
)
check_transformation(
"""
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">480 p. ; 1 CD-ROM suppl</subfield>
</datafield>
""",
{
"number_of_pages": "480",
"physical_copy_description": "1 CD-ROM",
},
)
check_transformation(
"""
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">42 p. ; 2 CD-ROM ; 1 DVD, 1 vhs</subfield>
</datafield>
""",
{
"number_of_pages": "42",
"physical_copy_description": "2 CD-ROM, 1 DVD, 1 VHS",
},
)
check_transformation(
"""
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a"></subfield>
</datafield>
""",
{},
)
check_transformation(
"""
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">mult. p</subfield>
</datafield>
""",
{},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">2 v</subfield>
</datafield>
""",
{},
)
check_transformation(
"""
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">42 p. + 17 p</subfield>
</datafield>
""",
{},
)
check_transformation(
"""
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">
amendment A1 (18 p) + amendment A2 (18 p)
</subfield>
</datafield>
""",
{},
)
check_transformation(
"""
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">
amendment A1 (18 p) + amendment A2 (18 p)
</subfield>
</datafield>
""",
{},
)
check_transformation(
"""
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">42 p. ; E22</subfield>
</datafield>
""",
{},
)
def test_abstracts(app):
"""Test abstracts."""
with app.app_context():
check_transformation(
"""
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">The publication...</subfield>
</datafield>
""",
{
"abstract": "The publication...",
},
)
check_transformation(
"""
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">The publication...</subfield>
<subfield code="9">arXiv</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">Does application...</subfield>
</datafield>
""",
{
"abstract": "The publication...",
"alternative_abstracts": ["Does application..."],
},
)
with pytest.raises(MissingRequiredField):
check_transformation(
"""
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="9">arXiv</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">Does application...</subfield>
</datafield>
""",
{"abstract": "Does application..."},
)
@pytest.mark.skip
def test_funding_info(app):
"""Test funding info."""
with app.app_context():
check_transformation(
"""
<datafield tag="536" ind1=" " ind2=" ">
<subfield code="a">CERN Technical Student Program</subfield>
</datafield>
<datafield tag="536" ind1=" " ind2=" ">
<subfield code="a">FP7</subfield>
<subfield code="c">654168</subfield>
<subfield code="f">AIDA-2020</subfield>
<subfield code="r">openAccess</subfield>
</datafield>
""",
{
"funding_info": [
{
"agency": "CERN Technical Student Program",
},
{
"agency": "FP7",
"grant_number": "654168",
"project_number": "AIDA-2020",
"openaccess": True,
},
]
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="536" ind1=" " ind2=" ">
<subfield code="a">
CERN Technical Student Program
</subfield>
</datafield>
<datafield tag="536" ind1=" " ind2=" ">
<subfield code="a">FP7</subfield>
<subfield code="c">654168</subfield>
<subfield code="f">AIDA-2020</subfield>
<subfield code="r">openAccedafss</subfield>
</datafield>
""",
{
"funding_info": [
{
"agency": "CERN Technical Student Program",
},
{
"agency": "FP7",
"grant_number": "654168",
"project_number": "AIDA-2020",
"openaccess": True,
},
]
},
)
def test_license(app):
"""Test license."""
with app.app_context():
check_transformation(
"""
<datafield tag="540" ind1=" " ind2=" ">
<subfield code="b">arXiv</subfield>
<subfield code="u">
http://arxiv.org/licenses/nonexclusive-distrib/1.0/
</subfield>
</datafield>
<datafield tag="540" ind1=" " ind2=" ">
<subfield code="3">Preprint</subfield>
<subfield code="a">CC-BY-4.0</subfield>
</datafield>
<datafield tag="540" ind1=" " ind2=" ">
<subfield code="3">Publication</subfield>
<subfield code="a">CC-BY-4.0</subfield>
<subfield code="f">SCOAP3</subfield>
<subfield code="g">DAI/7161287</subfield>
</datafield>
""",
{
"licenses": [
{
"license": {
"url": "http://arxiv.org/licenses/nonexclusive-distrib/1.0/",
"name": None,
}
},
{
"license": {
"name": "CC-BY-4.0",
"url": None,
},
"material": "preprint",
},
{
"license": {
"name": "CC-BY-4.0",
"url": None,
},
"material": "publication",
"internal_note": "DAI/7161287",
},
]
},
)
def test_copyright(app):
"""Test copyright."""
with app.app_context():
check_transformation(
"""
<datafield tag="542" ind1=" " ind2=" ">
<subfield code="d">d</subfield>
<subfield code="f">f</subfield>
<subfield code="g">2013</subfield>
<subfield code="u">u</subfield>
</datafield>
<datafield tag="542" ind1=" " ind2=" ">
<subfield code="3">Preprint</subfield>
<subfield code="d">CERN</subfield>
<subfield code="g">2018</subfield>
</datafield>
<datafield tag="542" ind1=" " ind2=" ">
<subfield code="f">This work is licensed.</subfield>
<subfield code="u">
http://creativecommons.org/licenses/by/4.0
</subfield>
</datafield>
""",
{
"copyrights": [
{
"holder": "d",
"statement": "f",
"year": 2013,
"url": "u",
},
{"material": "preprint", "holder": "CERN", "year": 2018},
{
"statement": "This work is licensed.",
"url": "http://creativecommons.org/licenses/by/4.0",
},
]
},
)
def test_conference_info(app):
"""Test conference info."""
with app.app_context():
check_transformation(
"""
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">INSPIRE-CNUM</subfield>
<subfield code="a">1234</subfield>
</datafield>
<datafield tag="111" ind1=" " ind2=" ">
<subfield code="9">20040621</subfield>
<subfield code="a">2nd Workshop on Science with
the New Generation of High Energy Gamma-ray Experiments:
between Astrophysics and Astroparticle Physics
</subfield>
<subfield code="c">Bari, Italy</subfield>
<subfield code="d">21 Jun 2004</subfield>
<subfield code="f">2004</subfield>
<subfield code="g">bari20040621</subfield>
<subfield code="n">2</subfield>
<subfield code="w">IT</subfield>
<subfield code="z">20040621</subfield>
</datafield>
<datafield tag="711" ind1=" " ind2=" ">
<subfield code="a">SNGHEGE2004</subfield>
</datafield>
""",
{
"conference_info": {
"identifiers": [
{"scheme": "INSPIRE_CNUM", "value": "1234"},
{"scheme": "CERN_CODE", "value": "bari20040621"},
],
"title": """2nd Workshop on Science with
the New Generation of High Energy Gamma-ray Experiments:
between Astrophysics and Astroparticle Physics""",
"place": "Bari, Italy",
"dates": "2004-06-21 - 2004-06-21",
"series": {"number": 2},
"country": "IT",
"acronym": "SNGHEGE2004",
}
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="111" ind1=" " ind2=" ">
<subfield code="9">20040621</subfield>
<subfield code="a">2nd Workshop on Science with
the New Generation of High Energy Gamma-ray Experiments:
between Astrophysics and Astroparticle Physics
</subfield>
<subfield code="c">Bari, Italy</subfield>
<subfield code="d">21 Jun 2004</subfield>
<subfield code="f">2004</subfield>
<subfield code="g">bari20040621</subfield>
<subfield code="n">2</subfield>
<subfield code="w">ITALIA</subfield>
<subfield code="z">20040621</subfield>
</datafield>
""",
{
"conference_info": {
"title": """2nd Workshop on Science with the New
Generation of High Energy Gamma-ray Experiments:
between Astrophysics and Astroparticle Physics""",
"place": "Bari, Italy",
"identifiers": [
{"scheme": "CERN_CODE", "value": "bari20040621"},
],
"dates": "2004-06-21 - 2004-06-21",
"series": {"number": 2},
"country_code": "IT",
"contact": "arantza.de.oyanguren.campos@cern.ch",
}
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="111" ind1=" " ind2=" ">
<subfield code="9">2gtrw</subfield>
<subfield code="a">2nd Workshop on Science with
the New Generation of High Energy Gamma-ray Experiments:
between Astrophysics and Astroparticle Physics
</subfield>
<subfield code="c">Bari, Italy</subfield>
<subfield code="d">gbrekgk</subfield>
<subfield code="f">2004</subfield>
<subfield code="g">bari20040621</subfield>
<subfield code="n">2</subfield>
<subfield code="w">IT</subfield>
<subfield code="z">2treht</subfield>
</datafield>
<datafield tag="270" ind1=" " ind2=" ">
<subfield code="m">arantza.de.oyanguren.campos@cern.ch
</subfield>
</datafield>
<datafield tag="711" ind1=" " ind2=" ">
<subfield code="a">SNGHEGE2004</subfield>
</datafield>
""",
{
"conference_info": {
"title": """2nd Workshop on Science with the New
Generation of High Energy Gamma-ray Experiments:
between Astrophysics and Astroparticle Physics""",
"place": "Bari, Italy",
"cern_conference_code": "bari20040621",
"opening_date": "2004-06-21",
"series_number": 2,
"country_code": "IT",
"closing_date": "2004-06-21",
"contact": "arantza.de.oyanguren.campos@cern.ch",
"acronym": "SNGHEGE2004",
}
},
)
with pytest.raises(MissingRequiredField):
check_transformation(
"""
<datafield tag="270" ind1=" " ind2=" ">
<subfield code="m">arantza.de.oyanguren.campos@cern.ch
</subfield>
</datafield>
""",
{
"conference_info": {
"contact": "arantza.de.oyanguren.campos@cern.ch"
}
},
)
def test_alternative_titles_a(app):
"""Test title translations."""
with app.app_context():
check_transformation(
"""
<datafield tag="242" ind1=" " ind2=" ">
<subfield code="9">submitter</subfield>
<subfield code="a">Study of the impact of stacking on simple
hard diffraction events in CMS/LHC</subfield>
<subfield code="b">Subtitle/LHC</subfield>
</datafield>
""",
{
"alternative_titles": [
{
"value": """Study of the impact of stacking on simple
hard diffraction events in CMS/LHC""",
"language": "EN",
"type": "TRANSLATED_TITLE",
},
{
"value": "Subtitle/LHC",
"language": "EN",
"type": "TRANSLATED_SUBTITLE",
},
]
},
)
def test_title(app):
"""Test title."""
with app.app_context():
check_transformation(
"""
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Incoterms 2010</subfield>
<subfield code="b">les règles de l'ICC</subfield>
</datafield>
""",
{
"title": "Incoterms 2010",
"alternative_titles": [
{"value": "les règles de l'ICC", "type": "SUBTITLE"}
],
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Incoterms 2010</subfield>
<subfield code="b">les règles de l'ICC</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">With duplicate title</subfield>
</datafield>
""",
{},
)
def test_alternative_titles(app):
"""Test alternative titles."""
with app.app_context():
check_transformation(
"""
<datafield tag="246" ind1=" " ind2=" ">
<subfield code="a">Air quality — sampling</subfield>
<subfield code="b">
part 4: guidance on the metrics
</subfield>
<subfield code="i">CERN</subfield>
</datafield>
<datafield tag="246" ind1=" " ind2=" ">
<subfield code="a">Water quality — sampling</subfield>
<subfield code="b">
part 15: guidance on preservation
</subfield>
</datafield>
""",
{
"alternative_titles": [
{
"value": "Air quality — sampling",
"type": "ALTERNATIVE_TITLE",
},
{
"value": """part 4: guidance on the metrics""",
"type": "SUBTITLE",
},
{
"value": "Water quality — sampling",
"type": "ALTERNATIVE_TITLE",
},
{
"value": """part 15: guidance on preservation""",
"type": "SUBTITLE",
},
]
},
)
with app.app_context():
check_transformation(
"""
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">BOOK</subfield>
</datafield>
<datafield tag="246" ind1=" " ind2=" ">
<subfield code="a">Water quality — sampling</subfield>
<subfield code="b">
part 15: guidance on the preservation
</subfield>
</datafield>
""",
{
"document_type": "BOOK",
"alternative_titles": [
{
"value": "Water quality — sampling",
"type": "ALTERNATIVE_TITLE",
},
{
"value": """part 15: guidance on the preservation""",
"type": "SUBTITLE",
},
],
},
)
def test_note(app):
"""Test public notes."""
with app.app_context():
check_transformation(
"""
<datafield tag="500" ind1=" " ind2=" ">
<subfield code="a">
Translated from ...
</subfield>
</datafield>
<datafield tag="500" ind1=" " ind2=" ">
<subfield code="a">No CD-ROM</subfield>
</datafield>
""",
{"note": """Translated from ... / No CD-ROM"""},
)
check_transformation(
"""
<datafield tag="500" ind1=" " ind2=" ">
<subfield code="9">arXiv</subfield>
<subfield code="a">
Comments: Book, 380 p.,
</subfield>
</datafield>
""",
{"note": """Comments: Book, 380 p.,"""},
)
def test_table_of_contents(app):
"""Test table of contents."""
with app.app_context():
check_transformation(
"""
<datafield tag="505" ind1="0" ind2=" ">
<subfield code="a">
2nd Advanced School on Exoplanetary Science: Astrophysics of Exoplanetary Atmospheres -- Chapter 1: Modeling Exoplanetary Atmospheres, by Jonathan J. Fortney -- Chapter 2: Observational Techniques, by David Sing -- Chapter 3: Molecular spectroscopy for Exoplanets by Jonathan Tennyson -- Chapter 4: Solar system atmospheres by Davide Grassi.
</subfield>
</datafield>
""",
{
"table_of_content": [
"2nd Advanced School on Exoplanetary Science: Astrophysics of Exoplanetary Atmospheres",
"Chapter 1: Modeling Exoplanetary Atmospheres, by Jonathan J. Fortney",
"Chapter 2: Observational Techniques, by David Sing",
"Chapter 3: Molecular spectroscopy for Exoplanets by Jonathan Tennyson",
"Chapter 4: Solar system atmospheres by Davide Grassi.",
]
},
)
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="505" ind1="0" ind2=" ">
<subfield code="a">
</subfield>
</datafield>
""",
{"table_of_content": []},
)
def test_standard_numbers(app):
"""Tests standard number field translation."""
with app.app_context():
check_transformation(
"""
<datafield tag="021" ind1=" " ind2=" ">
<subfield code="a">FD-X-60-000</subfield>
</datafield>
<datafield tag="021" ind1=" " ind2=" ">
<subfield code="a">NF-EN-13306</subfield>
</datafield>
<datafield tag="021" ind1=" " ind2=" ">
<subfield code="b">BS-EN-ISO-6507-2</subfield>
</datafield>
""",
{
"identifiers": [
{"value": "FD-X-60-000", "scheme": "STANDARD_NUMBER"},
{"value": "NF-EN-13306", "scheme": "STANDARD_NUMBER"},
{
"value": "BS-EN-ISO-6507-2",
"hidden": True,
"scheme": "STANDARD_NUMBER",
},
]
},
)
with pytest.raises(MissingRequiredField):
check_transformation(
"""
<datafield tag="021" ind1=" " ind2=" ">
<subfield code="c">FD-X-60-000</subfield>
</datafield>
""",
{
"identifiers": [
{"value": "FD-X-60-000", "scheme": "STANDARD_NUMBER"},
{"value": "NF-EN-13306", "scheme": "STANDARD_NUMBER"},
{
"value": "BS-EN-ISO-6507-2",
"hidden": True,
"scheme": "STANDARD_NUMBER",
},
]
},
)
def test_book_series(app):
"""Tests book series field translation."""
with app.app_context():
check_transformation(
"""
<datafield tag="490" ind1=" " ind2=" ">
<subfield code="a">Minutes</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"serials": [
{"title": "Minutes", "issn": None, "volume": None}
],
"has_serial": True,
}
},
)
check_transformation(
"""
<datafield tag="490" ind1=" " ind2=" ">
<subfield code="a">
De Gruyter studies in mathematical physics
</subfield>
<subfield code="v">16</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"serials": [
{
"title": "De Gruyter studies in mathematical physics",
"issn": None,
"volume": "16",
}
],
"has_serial": True,
}
},
)
check_transformation(
"""
<datafield tag="490" ind1=" " ind2=" ">
<subfield code="a">Springer tracts in modern physics</subfield>
<subfield code="v">267</subfield>
<subfield code="x">0081-3869</subfield>
</datafield>
""",
{
"_migration": {
**get_helper_dict(),
"serials": [
{
"title": "Springer tracts in modern physics",
"issn": "0081-3869",
"volume": "267",
}
],
"has_serial": True,
}
},
)
def test_541(app):
"""Test 541."""
with app.app_context():
with pytest.raises(MissingRule):
check_transformation(
"""
<record>
<controlfield tag="001">2654497</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">BOOK</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Cai, Baoping</subfield>
<subfield code="e">ed.</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Liu, Yonghong</subfield>
<subfield code="e">ed.</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Hu, Jinqiu</subfield>
<subfield code="e">ed.</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Liu, Zengkai</subfield>
<subfield code="e">ed.</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Wu, Shengnan</subfield>
<subfield code="e">ed.</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Ji, Renjie</subfield>
<subfield code="e">ed.</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SCEM</subfield>
<subfield code="a">90.20.00.192.6</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">BOOK</subfield>
</datafield>
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">BOOKSHOP</subfield>
</datafield>
<datafield tag="541" ind1=" " ind2=" ">
<subfield code="9">85.00</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="d">201901</subfield>
<subfield code="s">h</subfield>
<subfield code="w">201904</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">401 p</subfield>
</datafield>
<datafield tag="080" ind1=" " ind2=" ">
<subfield code="a">519.226</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">
Bayesian networks in fault diagnosis
</subfield>
<subfield code="b">practice and application</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="a">Singapore</subfield>
<subfield code="b">World Scientific</subfield>
<subfield code="c">2019</subfield>
</datafield>
<datafield tag="020" ind1=" " ind2=" ">
<subfield code="a">9789813271487</subfield>
<subfield code="u">print version, hardback</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">21</subfield>
</datafield>
</record>
""",
{
"agency_code": "SzGeCERN",
# 'acquisition_source': {
# 'datetime': "2019-01-21"
# },
"creation_date": "2019-01-21",
"_collections": ["BOOKSHOP"],
"number_of_pages": 401,
"subject_classification": [
{"value": "519.226", "schema": "UDC"}
],
"languages": ["en"],
"title": "Bayesian networks in fault diagnosis",
"alternative_titles": [
{
"value": "practice and application",
"type": "SUBTITLE",
}
],
"legacy_recid": 2654497,
"isbns": [
{
"medium": "print version, hardback",
"value": "9789813271487",
}
],
"authors": [
{"role": "editor", "full_name": "Cai, Baoping"},
{"role": "editor", "full_name": "Liu, Yonghong"},
{"role": "editor", "full_name": "Hu, Jinqiu"},
{"role": "editor", "full_name": "Liu, Zengkai"},
{"role": "editor", "full_name": "Wu, Shengnan"},
{"role": "editor", "full_name": "Ji, Renjie"},
],
"original_source": None,
"external_system_identifiers": [
{"value": "90.20.00.192.6", "schema": "SCEM"}
],
"$schema": {
"$ref": "records/books/book/book-v.0.0.1.json"
},
"document_type": "BOOK",
"imprints": [
{
"date": "2019",
"publisher": "World Scientific",
"place": "Singapore",
}
],
},
)
def test_keywords(app):
"""Test public notes."""
with app.app_context():
check_transformation(
"""
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="g">PACS</subfield>
<subfield code="a">Keyword Name 1</subfield>
</datafield>
""",
{
"keywords": [
{"value": "Keyword Name 1", "source": "PACS"},
],
},
)
def test_volume_barcodes(app):
"""Test volume barcodes (088__)."""
with app.app_context():
check_transformation(
"""
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Mathematische Methoden der Physik</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="n">v.1</subfield>
<subfield code="x">80-1209-8</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="n">v.1</subfield>
<subfield code="x">B00004172</subfield>
</datafield>
""",
dict(
title="Mathematische Methoden der Physik",
_migration={
**get_helper_dict(),
**dict(
volumes=[
dict(barcode="80-1209-8", volume="1"),
dict(barcode="B00004172", volume="1"),
],
),
},
),
)
def test_conference_info_multiple_series_number(app):
"""Test conference info with multiple series numbers."""
with app.app_context():
with pytest.raises(UnexpectedValue):
check_transformation(
"""
<datafield tag="111" ind1=" " ind2=" ">
<subfield code="9">20150708</subfield>
<subfield code="a">
3rd Singularity Theory Meeting of Northeast region & the Brazil-Mexico 2nd Meeting on Singularities
</subfield>
<subfield code="c">Salvador, Brazil</subfield>
<subfield code="d">8 - 11 & 13 - 17 Jul 2015</subfield>
<subfield code="f">2015</subfield>
<subfield code="g">salvador20150708</subfield>
<subfield code="n">3</subfield>
<subfield code="n">2</subfield>
<subfield code="w">BR</subfield>
<subfield code="z">20150717</subfield>
</datafield>
""",
dict(),
)
|
from src.controller import EventDispatcher as Ed
from src.events import ExitMaze
from src.model.entities import Entity
class Ladder(Entity):
def __init__(self):
super().__init__(walkable=True)
def interact(self):
Ed.post(ExitMaze()) |
# MIT License
#
# Copyright (c) 2018, Andrew Warrington.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
particleFilter.py
AW
TL;DR -- This module contains the necessary functions for adding particle-filter
like behaviours to a generic state space model.
This script contains the basic functionality for performing (sequential) importance
sampling. The core function is the `iterate' function. This function takes a
vector of particles, an observation, and the standard deviation of this observation
(under the observation model) and resamples the particles according to their
likelihood. This function, in conjunction with a plant model provided outside of
this script, allows you to write a particle filter.
The key `flaw' in this script is that it assumes the observation is zero mean error
about the true state. If the observation function is more complex, then this will
need to be updated. This assumption was made to make the code easier to use.
The permutation matrix must also be provided, that maps states onto observations.
"""
# Import modules.
import numpy as np
import matplotlib.pyplot as plt
import time
import scipy.stats as scis
def iterate(_particles, _observation, _observation_sd):
'''
particleFilter.iterate:
Function takes in the current particles as an NxM length vector (where N is
the number of particles and M is the dimensionality) of the state and a
single observation with dimensionality Hx1 (where H is the dimensionality
of the observation.
Assumes the observations are normally distributed about the true value.
:param _particles: NxM length vector of particles
:param _observation: single observation.
:param _observation_sd: positive float containing the standard deviation of the observation.
:param _new_particle_count: Default: None, how many particles to resample.
:return: Dictionary:
{
'log_weights': the log weight of each of the input
N particles.
'resampled_particles': the vector of newParticleCount
resampled particle indicies.
}
'''
# Retrieve the number of particles, dimensionality of state and dimensionality
# of the observation.
[N, _] = np.shape(_particles)
# Calculate the log probability of each particle under a Gaussian observation
# model.
_log_weights = norm_log_pdf(_particles, _observation, _observation_sd)
# Make the weights zero-mean to improve the numerical stability.
zeroed_log_weights = _log_weights - np.nanmax(_log_weights)
zeroed_weights = np.exp(zeroed_log_weights)
zeroed_weights_sum = np.nansum(zeroed_weights)
zeroed_weights_normalized = zeroed_weights / zeroed_weights_sum
# If we are resampling the same number of particles, we can use TuanAnhLes
# fast systematic resampling code.
uniforms = np.random.rand() / N + np.arange(N) / float(N)
resampled_indexes = np.digitize(uniforms, bins=np.nancumsum(zeroed_weights_normalized))
return {'log_weights': _log_weights, 'resampled_indices': resampled_indexes}
def norm_log_pdf(x, loc=0, sd=1):
'''
particleFilter.normpdf:
Calculate the probability density for a set of particles, given the
normal distribution.
:param x: Input particles.
:param loc: Mean of normal distribution.
:param sd: Standard deviation of normal distribution.
:return: Vector of log-probabilities.
'''
ll = np.sum(scis.norm(loc, sd).logpdf(x), axis=1)
return ll
if __name__ == "__main__":
'''
Define a main function for demonstrative purposes.
'''
print('Particle filter demonstration.')
start = time.time()
steps = 100
observations = np.zeros((steps, 1))
states = np.zeros((steps, 2))
states[0, 0] = np.random.normal(0, 1)
states[0, 1] = np.random.normal(1, 0.1)
for i in range(steps):
if i > 1:
velocity = np.random.normal(states[0, 1], 0.1)
states[i, 0] = states[i-1, 0] + velocity
observations[i] = np.random.normal(states[i, 0], 0.5)
particles = np.random.rand(500, 2)
state_estimate = np.zeros((steps, 2))
for i in range(0, steps):
# Iterate the plant model.
velocities = np.random.normal(particles[:, 1], 0.1)
particles[:, 1] = velocities
particles[:, 0] = particles[:, 0] + velocities
p = 0
# Do the re-sampling step.
it = iterate(np.expand_dims(particles[:, 0], axis=1), observations[i], 0.5)
particles = particles[it['resampled_indices'], :]
log_weights = it['log_weights']
state_estimate[i, :] = np.mean(particles, 0)
end = time.time()
print(end - start)
# Plot some stuff.
plt.plot(state_estimate[:, 0])
plt.plot(observations)
plt.pause(0.001)
print('test complete.')
|
a, b = input().split()
while (a, b) != ("0", "0"):
c = b.replace(a, '')
if c == '':
print(0)
else:
print(int(c))
a, b = input().split()
|
# Created by Patrick Kao
from agents.random_agent import RandomAgent
from environments.hearts import SimpleHearts
from game import Game
def test_hearts():
game = Game(SimpleHearts, [RandomAgent]*4)
result = game.run()
print(result)
if __name__ == "__main__":
test_hearts()
|
"""
Extensions to serve static and media files
"""
from ._base import SimpleTag
from django.contrib.staticfiles.storage import staticfiles_storage
from django.conf import settings as dj_settings
class StaticExtension(SimpleTag):
"""Django-like static tag"""
tags = set(['static'])
def get_url(self, path, prefix=None):
is_absolute = path.startswith('/') or ':' in path.split('/')[0]
if not is_absolute:
if prefix:
path = '%s/%s' % (prefix, path)
return staticfiles_storage.url(path)
else:
return path
def tag_func(self, path):
return self.get_url(path)
class StaticCSSExtension(StaticExtension):
tags = set(['css'])
def __init__(self, environment):
super(StaticCSSExtension, self).__init__(environment)
self.environment.css_dir = \
getattr(dj_settings, 'JINJA2_STATIC_CSS', 'css')
def tag_func(self, path):
return '<link rel="stylesheet" type="text/css" href="%s">' % \
self.get_url(path, self.environment.css_dir)
class StaticJSExtension(StaticExtension):
tags = set(['js'])
def __init__(self, environment):
super(StaticJSExtension, self).__init__(environment)
self.environment.js_dir = \
getattr(dj_settings, 'JINJA2_STATIC_JS', 'js')
def tag_func(self, path):
return '<script type="text/javascript" src="%s"></script>' % \
self.get_url(path, self.environment.js_dir)
class MediaExtension(SimpleTag):
tags = set(['media'])
def tag_func(self, path):
is_absolute = path.startswith('/') or ':' in path.split('/')[0]
if not is_absolute:
return dj_settings.MEDIA_URL + path
else:
return path
|
import logging
def configure_logging(cfg):
"""
Initialize the root logger from a HayrackConfiguration object
"""
logger = logging.getLogger()
logger.setLevel(cfg.logging.verbosity)
if cfg.logging.logfile:
logger.addHandler(logging.FileHandler(cfg.logging.logfile))
if bool(cfg.logging.console):
logger.addHandler(logging.StreamHandler())
|
from appdirs import AppDirs
import unittest
import radiam
import os
import tempfile
import shutil
from radiam_api import RadiamAPI
# copied this from radiam_tray, might not all be necessary for testing
dirs = AppDirs("radiam-agent", "Compute Canada")
os.makedirs(dirs.user_data_dir, exist_ok=True)
tokenfile = os.path.join(dirs.user_data_dir, "token")
dir_path = os.getcwd()
configjson = os.path.join(dir_path, "configsetting.json")
projectsjson = os.path.join(dir_path, "projects.json")
resumefile = os.path.join(dirs.user_data_dir, "resume")
logger = radiam.log_setup("info")
auth = {}
arguments = {'--hostname': "http://127.0.0.1:8100",
'--minsize': 0,
'--mtime': 0,
'--password': "admin",
'--rootdir': None,
'--username': "admin",
'--projectname': "testproject",
'--quitafter': True
}
class TestRadiam(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestRadiam, self).__init__(*args, **kwargs)
self.logger = logger
self.dirs = dirs
self.arguments = arguments
self.resumefile = resumefile
self.tray_options = {}
def test_load_config(self):
self.config, self.load_config_status = radiam.load_config(self.dirs.user_data_dir, self.arguments, self.logger, self.tray_options)
self.assertIsNotNone(self.config)
def test_index_file(self):
self.config, self.load_config_status = radiam.load_config(self.dirs.user_data_dir, self.arguments, self.logger, self.tray_options)
fp = tempfile.TemporaryDirectory()
project = self.config['projects']['project_list'][0]
file_list = radiam.get_list_of_files(fp.name, self.config[project])
self.assertIsNotNone(file_list)
fp.cleanup()
def test_crawl(self):
fp = tempfile.TemporaryDirectory()
self.arguments['--rootdir'] = fp.name
self.config, self.load_config_status = radiam.load_config(self.dirs.user_data_dir, self.arguments, self.logger, self.tray_options)
agent_config = {
"tokenfile": tokenfile,
"baseurl": self.config['api']['host'],
"logger": logger
}
API = RadiamAPI(**agent_config)
temppath = os.path.join(fp.name, "radiamtemp.txt")
with open(temppath, "w") as textfile:
textfile.write("testing")
crawler = radiam.crawl(self.dirs, self.arguments, self.logger, self.config, API, self.tray_options)
self.assertIsNone(crawler)
fp.cleanup()
def test_get_dir_meta(self):
fp = tempfile.TemporaryDirectory()
self.arguments['--rootdir'] = fp.name
self.config, self.load_config_status = radiam.load_config(self.dirs.user_data_dir, self.arguments, self.logger, self.tray_options)
project_key = self.config['projects']['project_list'][0]
temppath = os.path.join(fp.name, "radiamtemp.txt")
with open(temppath, "w") as textfile:
textfile.write("testing")
dir_meta = radiam.get_dir_meta(fp.name, self.config, project_key)
self.assertIsNotNone(dir_meta)
fp.cleanup()
def test_get_file_meta(self):
fp = tempfile.TemporaryDirectory()
self.arguments['--rootdir'] = fp.name
self.config, self.load_config_status = radiam.load_config(self.dirs.user_data_dir, self.arguments, self.logger, self.tray_options)
project_key = self.config['projects']['project_list'][0]
temppath = os.path.join(fp.name, "radiamtemp.txt")
with open(temppath, "w") as textfile:
textfile.write("testing")
file_meta = radiam.get_file_meta(temppath, self.config, project_key)
self.assertIsNotNone(file_meta)
fp.cleanup()
def test_file_excluded(self):
fp = tempfile.TemporaryDirectory()
self.arguments['--rootdir'] = fp.name
self.config, self.load_config_status = radiam.load_config(self.dirs.user_data_dir, self.arguments, self.logger, self.tray_options)
project_key = self.config['projects']['project_list'][0]
temppath = os.path.join(fp.name, "radiamtemp.txt")
with open(temppath, "w") as textfile:
textfile.write("testing")
is_file_excluded = radiam.file_excluded(temppath, self.config[project_key])
self.assertFalse(is_file_excluded)
fp.cleanup()
if __name__ == '__main__':
unittest.main(logger, dirs, arguments, tokenfile, resumefile) |
#!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
label_traindat = lm.load_labels('../data/label_train_dna.dat')
parameter_list=[[traindat,testdat,label_traindat,1,1e1, 1e0],[traindat,testdat,label_traindat,1,1e4,1e4]]
def kernel_histogram_word_string_modular (fm_train_dna=traindat,fm_test_dna=testdat,label_train_dna=label_traindat,order=3,ppseudo_count=1,npseudo_count=1):
from modshogun import StringCharFeatures, StringWordFeatures, DNA, BinaryLabels
from modshogun import HistogramWordStringKernel, AvgDiagKernelNormalizer
from modshogun import PluginEstimate#, MSG_DEBUG
charfeat=StringCharFeatures(DNA)
#charfeat.io.set_loglevel(MSG_DEBUG)
charfeat.set_features(fm_train_dna)
feats_train=StringWordFeatures(charfeat.get_alphabet())
feats_train.obtain_from_char(charfeat, order-1, order, 0, False)
charfeat=StringCharFeatures(DNA)
charfeat.set_features(fm_test_dna)
feats_test=StringWordFeatures(charfeat.get_alphabet())
feats_test.obtain_from_char(charfeat, order-1, order, 0, False)
pie=PluginEstimate(ppseudo_count,npseudo_count)
labels=BinaryLabels(label_train_dna)
pie.set_labels(labels)
pie.set_features(feats_train)
pie.train()
kernel=HistogramWordStringKernel(feats_train, feats_train, pie)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
pie.set_features(feats_test)
pie.apply().get_labels()
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('PluginEstimate w/ HistogramWord')
kernel_histogram_word_string_modular(*parameter_list[0])
|
from .LocallyConnected1d import LocallyConnected1d
from .LocallyConnected2d import LocallyConnected2d |
#
# This file is part of BRANCHPRO
# (https://github.com/SABS-R3-Epidemiology/branchpro.git) which is released
# under the BSD 3-clause license. See accompanying LICENSE.md for copyright
# notice and full license details.
#
"""Processing script for Hainan, China data from [1]_.
It rewrites the data file into the format expected by our app.
References
----------
.. [1] National Risk Management Area Classification and Prevention and Control
Measures, (updated February 01 2022). http://wst.hainan.gov.cn/yqfk/
"""
import datetime
import os
import pandas
from collections import defaultdict
def main():
"""
Rewrite a new csv file for the data in the desired format.
We combine the daily import-related and imported cases as the imported
case, and we add the daily under investigation to the daily locally
acquired cases (with unknown source and epidemiologically linked)
"""
# Read the original data
data = pandas.read_csv(
os.path.join(os.path.dirname(__file__), 'cases.csv'))
# Initialize a dictionary for the new data
new_data = defaultdict(lambda: [0, 0])
for i, row in data.iterrows():
date = row['date']
day, month, year = date.split('/')
date = '{}-{:02d}-{:02d}'.format(year, int(month), int(day))
# Select imported cases
new_data[date][1] += row['imported']
# Select locally acquired cases
new_data[date][0] += row['local']
all_dates = sorted(list(new_data.keys()))
# Create a pandas DataFrame for the data
data = pandas.DataFrame()
data['Incidence Number'] = [new_data[d][0] for d in all_dates]
data['Imported Cases'] = [new_data[d][1] for d in all_dates]
data['date'] = all_dates
start_date = all_dates[0]
start = datetime.date(*map(int, start_date.split('-')))
data['Time'] = [(datetime.date(*map(int, x.split('-'))) - start).days + 1
for x in data['date']]
# Name the columns
data = data[['Time', 'Incidence Number', 'Imported Cases', 'date']]
# Convert the file to csv
data.to_csv(
os.path.join(os.path.dirname(__file__), '{}.csv'.format('HN')),
index=False)
if __name__ == '__main__':
main()
|
import sys, os, glob
from glbase3 import *
sys.path.append('../../')
import shared
[os.remove(f) for f in glob.glob('*.tsv')]
[os.remove(f) for f in glob.glob('*.glb')]
def qcollide(al, ar, bl, br):
return ar >= bl and al <= br # return(self.loc["right"] >= loc.loc["left"] and self.loc["left"] <= loc.loc["right"]) # nice one-liner
def contained(al, ar, bl, br): # Is A in B?
return al > bl and ar < br
# These have the official GENCODE CDS, and the predicted (about ~80% accurate)
canonical = glload('../../gencode/hg38_gencode_v32.pc.glb')
gencode_cds = glload('../../transcript_assembly/get_CDS/gencode_cds.glb')
print(gencode_cds)
canonical_all = canonical.map(genelist=gencode_cds, key='enst')
canonical = {} # convert to a quick look up for speed
for gene in canonical_all:
if gene['name'] not in canonical:
canonical[gene['name']] = []
gene['cds_info'] = True
gene['cds_gencode_loc'] = gene['cds_loc']
gene['cds_local_to_genome'] = gene['cds_loc']
canonical[gene['name']].append(gene)
# get sequences for extra string literal matching;
gencode_peptide_fastas = genelist('../../transcript_assembly/get_CDS/gencode.v32.pc_translations.fa.gz', format=format.fasta, gzip=True)
#print(gencode_peptide_fastas)
gencode_peptide_fastas_lookup = {}
for gene in gencode_peptide_fastas:
name = gene['name'].split('|')[6]
if name not in gencode_peptide_fastas_lookup:
gencode_peptide_fastas_lookup[name] = []
gencode_peptide_fastas_lookup[name].append(gene['seq'])
all_transcripts = glload('../../transcript_assembly/packed/all_genes.glb')
cds = glload('../../transcript_assembly/get_CDS/coding_genes_with_local_CDS-corrected.glb')
cds = {gene['transcript_id']: gene for gene in cds}
#print(cds)
tes = glload('../te_transcripts/transcript_table_merged.mapped.glb') # yes, all as I am measuring PC -> ncRNA as well;
tes = {gene['transcript_id']: gene for gene in tes}
def check_sequence_versus_gencode(seq, gencode_peptide_fastas_lookup):
if name in gencode_peptide_fastas_lookup:
for seq in gencode_peptide_fastas_lookup[gene['name'].split(' ')[0]]:
if seq == aa:
print(name)
print(seq)
print(aa)
print()
found = True
return True
return False
# First I need to bundle them up by their name;
bundles = {}
for transcript in all_transcripts:
symbol = transcript['name'].split(' ')[0]
if symbol not in bundles:
bundles[symbol] = []
if transcript['transcript_id'] in tes:
transcript['doms'] = tes[transcript['transcript_id']]['doms']
transcript['TEs'] = True
else:
transcript['TEs'] = False
transcript['doms'] = []
# Get hte CDS info:
if transcript['transcript_id'] in cds:
transcript['cds_info'] = True # and presumably 'coding' == True?
transcript['cds_local_locs'] = cds[transcript['transcript_id']]['cds_local_locs']
transcript['cds_gencode_loc'] = cds[transcript['transcript_id']]['cds_gencode_loc']
transcript['cds_local_to_genome'] = cds[transcript['transcript_id']]['cds_local_to_genome']
else:
# Probably non-coding;
transcript['cds_info'] = False
transcript['cds_local_locs'] = (-1,-1)
transcript['cds_gencode_loc'] = None
transcript['cds_local_to_genome'] = None
bundles[symbol].append(transcript)
print('Found {:,} bundles of genes'.format(len(bundles)))
res = {'inframe_insertion': [], # Class 1 # numbers are per-transcript;
'frameshift_insertion': [],
'noncoding_to_coding_withTE': [],
'noncoding_to_coding_noTE': [],
'new_STOP': [], # Class 2
'new_ATG': [],
'coding_to_noncoding': [], # Class 3
'insertion_alternate_cds': [], # Class 4
'no_disruption_5prime': [], # Class 5
'no_disruption_3prime': [], # Class 6
'no_disruption_5_3prime': [],
'class_not_found': [],
'variant_coding_but_noTE': [],
'no_coding': []}
total = 0
no_variants = 0
canonical_not_found = 0
for idx, gene_name in enumerate(bundles):
if '-' in gene_name:
continue # Skip these
total += len(bundles[gene_name])
all_types = [i['tags'][-1] for i in bundles[gene_name]]
if '~' not in all_types: # No variants here
no_variants += len(bundles[gene_name])
continue
if len(bundles[gene_name]) == 1 and all_types[0] == '=': # Only the canonical one was found, skip;
continue
# Always add the canonical transcripts from GENCODE, otherwise you just end up guessing existing CDS that have a slightly different transcript
if gene_name in canonical:
can = None
can = canonical[gene_name]
#can = canonical.getRowsByKey(key='name', values=gene_name, silent=True)
if can:
for i in can:
i['tags'] = '='
i['coding'] = 'coding'
bundles[gene_name].append(i)
#print(bundles[gene_name])
else:
#print(gene_name)
canonical_not_found += 1 # probably non-coding
continue
# Don't worry about duplicate removal as we only care about the ~ transcripts anyways
# check there is at least 1 coding in there, coming from either the GENCODE canonical, or internally
if 'coding' not in [i['coding'] for i in bundles[gene_name]]:
res['no_coding'] += bundles[gene_name]
continue
# Add the doms key to all transcripts;
for transcript in bundles[gene_name]:
if transcript['transcript_id'] in tes:
te = tes[transcript['transcript_id']]
transcript['doms'] = te['doms']
else:
transcript['doms'] = []
# divide into known and novel:
known = [t for t in bundles[gene_name] if '=' in t['tags']]
novel = [t for t in bundles[gene_name] if '~' in t['tags']]
#print(known, novel)
# The positions are often slightly off by -2, -1, 1 and 2 bp;
cds_lengths = []
cds_lengths = [(i['cds_local_locs'][1]-i['cds_local_locs'][0]) for i in known if i['coding'] == 'coding']
cds_inframe = set(cds_lengths)
cds_lengths += [l-2 for l in cds_lengths]
cds_lengths += [l-1 for l in cds_lengths]
cds_lengths += [l+1 for l in cds_lengths]
cds_lengths += [l+2 for l in cds_lengths]
cds_lengths = set(cds_lengths)
canonical_cds_edges_genome = []
for t in known:
#print('\n',t)
if t['coding'] != 'coding': continue
if not t['cds_info']: continue
if 'cds_gencode_loc' not in t: continue
if transcript['strand'] == '+':
canonical_cds_edges_genome = []
if t['cds_gencode_loc']:
edge = t['cds_gencode_loc']['left']
canonical_cds_edges_genome += [edge, edge-1, edge+1, edge+2]
edge = t['cds_local_to_genome']['left']
canonical_cds_edges_genome += [edge, edge-1, edge+1, edge+2]
else:
canonical_cds_edges_genome = []
if t['cds_gencode_loc']:
edge = t['cds_gencode_loc']['right']
canonical_cds_edges_genome += [edge, edge-1, edge+1, edge+2]
edge = t['cds_local_to_genome']['right']
canonical_cds_edges_genome += [edge, edge-1, edge+1, edge+2]
canonical_cds_edges_genome = set(canonical_cds_edges_genome)
for transcript in novel:
# calls:
inframe_insertion = False
frameshift_insertion = False
insertion_alternate_cds = False
new_STOP = False
new_ATG = False
no_disruption_5prime = False
no_disruption_3prime = False
noncoding_to_coding_withTE = False
noncoding_to_coding_noTE = False
variant_coding_but_noTE = False
# noncoding to coding
known_coding_status = set([t['coding'] for t in bundles[gene_name] if '=' in t['tags']])
novel_coding_status = transcript['coding']
te = None
if transcript['transcript_id'] in tes:
te = tes[transcript['transcript_id']]
if transcript['coding'] == 'noncoding':
res['coding_to_noncoding'].append(transcript)
continue
if te:
# find out if a TE overlaps the CDS:
for t in te['doms']:
# Collect the parameters:
#print(t, '\n', transcript)
colliding = qcollide(t['span'][0], t['span'][1], transcript['cds_local_locs'][0], transcript['cds_local_locs'][1])
enclosed = contained(t['span'][0], t['span'][1], transcript['cds_local_locs'][0], transcript['cds_local_locs'][1])
te_length = (t['span'][1] - t['span'][0])
expected_cds_length = (transcript['cds_local_locs'][1] - transcript['cds_local_locs'][0])
# cut the te for partially translated TEs:
te_edges = (max(t['span'][0], i['cds_local_locs'][0]), min(t['span'][1], i['cds_local_locs'][1]))
te_span = te_edges[1] - te_edges[0]
cds_lengths_plus_te = [l+te_span for l in cds_inframe]
cds_edges = [i['cds_local_locs'][0] for i in known if i['coding'] == 'coding']
cds_edges += [i['cds_local_locs'][1] for i in known if i['coding'] == 'coding']
if colliding: # with this CDS;
if 'coding' in novel_coding_status and 'coding' not in known_coding_status:
noncoding_to_coding_withTE = True # This will override all classes
if enclosed: # TE is entirely contained in the transcript
if expected_cds_length in cds_lengths_plus_te: # TE is most likely in frame inserted:
inframe_insertion = True
elif expected_cds_length in cds_lengths: # It's probably already annotated as contained, and has no effect on the CDS
frameshift_insertion = True# This will almost certainly get trimmed in the BLAST step so it's safe to leave it in this class;
#pass # I suppose it could get here by chance, but chances are less than 1 in 1e5 assuming ~1000 bp for both transcript and TE.
else: # probably a new CDS
frameshift_insertion = True
else: # It is colliding, but extends past the CDS, i.e. the STOP is inside the TE.
if t['span'][1] >= transcript['cds_local_locs'][1]: # It's STOP the CDS
if expected_cds_length in cds_lengths: # It's probably already annotated as contained, and has no effect on the CDS
pass
else: # probably a novel truncation
new_STOP = True
elif t['span'][0] <= transcript['cds_local_locs'][0]: # It's at the START;
if expected_cds_length in cds_lengths: # It's probably already annotated as contained, and has no effect on the CDS
pass
else: # probably a novel truncation
new_ATG = True
else: # I can't ID it;
1/0 # Never gets here;
frameshift_insertion = True
else: # No collision with this CDS; check it's 5' or 3':
# Check it against the canonical CDSs;
if expected_cds_length in cds_lengths: # It's a simple insertion 5' or 3':
# I know it's not a collision, so just test the edge:
if t['span'][1] < transcript['cds_local_locs'][0]: # 5'
no_disruption_5prime = True
elif t['span'][0] > transcript['cds_local_locs'][1]:
no_disruption_3prime = True
else:
frameshift_insertion = True # It would just get trimmed in the BLAST step;
elif transcript['cds_local_to_genome']['left'] in canonical_cds_edges_genome and transcript['cds_local_to_genome']['right'] in canonical_cds_edges_genome:
no_disruption_5prime = True # No disruption to CDS
no_disruption_3prime = True
else:
if 'coding' in novel_coding_status and 'coding' not in known_coding_status:
noncoding_to_coding_noTE = True # No TE IN CDS! This will override all classes
#print('\n', transcript)
# see if one of the cds edges perfectly matches a canonical edge: Most likey a mid_CDS_insertion, that results in a STOP before the TE (hence no collision)'
if transcript['cds_local_locs'][0] in cds_edges or transcript['cds_local_locs'][1] in cds_edges:
insertion_alternate_cds = True
if transcript['cds_local_to_genome']['left'] in canonical_cds_edges_genome or transcript['cds_local_to_genome']['right'] in canonical_cds_edges_genome:
insertion_alternate_cds = True
else:
insertion_alternate_cds = True
# I find this category to be a bit dubious, and seems to have too many False+
else: # No TE
if 'coding' in novel_coding_status and 'coding' not in known_coding_status:
noncoding_to_coding_noTE = True # This will override all classes
else:
variant_coding_but_noTE = True
# transcripts only get called once. Add it here based ona hierarchy:
# Use the calls above to assign to the preferred classes:
if noncoding_to_coding_withTE: res['noncoding_to_coding_withTE'].append(transcript)
elif noncoding_to_coding_noTE: res['noncoding_to_coding_noTE'].append(transcript)
elif inframe_insertion: res['inframe_insertion'].append(transcript)
elif new_ATG: res['new_ATG'].append(transcript)
elif new_STOP: res['new_STOP'].append(transcript)
elif frameshift_insertion: res['frameshift_insertion'].append(transcript)
elif no_disruption_5prime and no_disruption_3prime: res['no_disruption_5_3prime'].append(transcript)
elif no_disruption_5prime: res['no_disruption_5prime'].append(transcript)
elif no_disruption_3prime: res['no_disruption_3prime'].append(transcript)
elif insertion_alternate_cds: res['insertion_alternate_cds'].append(transcript)
elif variant_coding_but_noTE: res['variant_coding_but_noTE'].append(transcript)
else:
res['class_not_found'].append(transcript)
#print(transcript)
#if idx > 5000:
# break
for k in res:
gl = genelist()
if res[k]:
gl.load_list(res[k])
gl.saveTSV('table_{}.tsv'.format(k))
gl.save('table_{}.glb'.format(k))
print()
for k in res:
print(k, len(res[k]))
print('No variants', no_variants)
print('Total', total)
print('canonical_not_found', canonical_not_found)
|
import re
import numpy as NP
from matplotlib import pyplot as PLT
import sys, getopt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import os
import statistics
SERVICE_NUMBERS = [0, 1, 2, 4, 8, 16]
path = ""
class FileParser:
def __init__(self):
self.cbench_filenames = []
def getAllResults(self):
regex = "^RESULT: \d switches \d+ tests min\/max\/avg\/stdev = ([0-9\.]*)\/([0-9\.]*)\/([0-9\.]*)\/([0-9\.]*) responses\/s$"
result = {}
for i in SERVICE_NUMBERS:
with open('cbench_throughput_{}.log'.format(i)) as currentFile:
lines = currentFile.read()
match = re.search(regex, lines, re.MULTILINE)
res = [float(match.group(1)), float(match.group(2)), float(match.group(3)), float(match.group(4))]
result[str(i)] = [int(res[2]), int(res[3])]
return result
class Plotter:
def __init__(self, data):
self.data = data
#font = {'family': 'normal', 'size': 18}
#PLT.rc('font', **font)
#PLT.rcParams['hatch.linewidth'] = 1.5
def getDataWithIndex(self, i, x):
return NP.array(list(map(lambda resultSet: resultSet[i][x], self.data)))
def plot(self, outputfileName):
fig = PLT.figure(figsize=(3.5, 3))
PLT.xlabel('Number of GCMI Apps')
PLT.ylabel('Responses per s')
# y axis between 0 and 4000
#PLT.yticks(NP.arange(0, 60000, 10000.0))
PLT.gca().set_ylim([0, 65000])
blue = "#1f77b4"
orange = "#ff7f0e"
y_values = []
x_values = NP.arange(len(SERVICE_NUMBERS))
x = 0
for i in SERVICE_NUMBERS:
avg = self.data[str(i)][0]
stdev = self.data[str(i)][1]
PLT.errorbar(x, avg, stdev, fmt='', color=orange, elinewidth=4)
PLT.plot(x, avg, color=blue, marker='o', markersize=4, linestyle='-')
y_values.append(avg)
x += 1
PLT.gca().set_xticks(x_values)
PLT.gca().set_xticklabels(['direct', 1, 2, 4, 8, 16])
PLT.gca().yaxis.grid(True)
PLT.plot(x_values, y_values, linewidth=2)
# Draw legend
PLT.rcParams['legend.fontsize'] = '10'
blue_line = mlines.Line2D([], [], color=blue, marker='o', markersize=4, label='mean')
orange_error = mpatches.Patch(color=orange, label='standard deviation', linewidth=1)
PLT.legend(handles=[blue_line, orange_error], loc='upper right')
PLT.tight_layout()
outputfileName = "" + outputfileName
fig.savefig(path + "/" + outputfileName, bbox_inches='tight')
print("saved plot to " + outputfileName)
def main(argv):
try:
opts, args = getopt.getopt(argv, "hp:", ["path="])
except getopt.GetoptError:
print('plot_throughput_results.py -p <path>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('plot_throughput_results.py -p <path>')
sys.exit()
elif opt in ("-p", "--path"):
global path
path = arg
outputfile = "throughput_plot.pdf"
fileParser = FileParser()
data = fileParser.getAllResults()
plotter = Plotter(data)
plotter.plot(outputfile)
if __name__ == "__main__":
main(sys.argv[1:])
|
#!/usr/bin/env python
from _winreg import *
from subprocess import check_output
regKeys = []
regKeys.append(['SOFTWARE\\Policies\\Microsoft\\Windows\\WindowsUpdate', '',REG_SZ, None])
regKeys.append(['SOFTWARE\\Policies\\Microsoft\\Windows\\WindowsUpdate\\AU', '',REG_SZ, None])
regKeys.append(['SOFTWARE\\Policies\\Microsoft\\Windows\\WindowsUpdate\\AU', 'NoAutoRebootWithLoggedOnUsers',REG_DWORD, 1])
for key,name,key_type,val in regKeys:
print("writing key %s to registry" % key)
registryKey = CreateKey(HKEY_LOCAL_MACHINE, key)
SetValueEx(registryKey, name, 0, key_type, val)
CloseKey(registryKey)
print("forcing reload of Windows Update configuration...")
check_output("gpupdate /force", shell=True)
print("done.")
|
from .mongo_connector import db
def get_client():
return db
|
"""
Running example 6.9 from Ang & Tang 1984
"""
import paransys
import numpy as np
# Call ParAnsys
mc = paransys.MonteCarlo()
# Console log On
mc.Info(True)
# Create random variables
mc.CreateVar('y', 'gauss', 40, cv=0.125)
mc.CreateVar('z', 'gauss', 50, cv=0.050)
mc.CreateVar('m', 'gauss', 1000, std=0.200*1000)
# An Python function that will be the Implicit function
def myFunc(y, z, m):
# Just to show that we can do a lot of things here...
# Some conditional
if m > 1e27:
m = 1e27
# Determine the result
result = y * z - m
# Print the result externally to PARANSYS
print('~~~The LS result is: {}'.format(result))
# Send return
return result
# There are two ways to use implicit functions:
# Setting a Python function in the place of the LS string, as next line:
mc.CreateLimState(myFunc)
# Or setting the Python function as userf and using it inside LS string:
#mc.CreateLimState("userf(y=y, z=z, m=m)", userf=myFunc)
# Define correlation
mc.SetCorrel('y', 'z', 0.40)
# Sampling for first limit state (0)
# It create failures before =)
k = 2
# For GAUSS and GUMBEL is better to use STD and for LOGN CV ;)
mc.SetRandomVarSampl('y', 0, 'gauss', 40*(1-k*0.125), std=0.125*40)
mc.SetRandomVarSampl('z', 0, 'gauss', 50*(1-k*0.050), std=0.050*50)
mc.SetRandomVarSampl('m', 0, 'gauss', 1000*(1+k*0.200), std=0.200*1000)
# Running
values = mc.Run(100, 1000, 0.05, 0.005)
# Export
#mc.ExportDataCSV('AngTang-MCAI-69', 'Comentarios?')
# Figures
#mc.Graph(['N_Pf', 'N_Beta', 'N_CVPf'], show=True)
|
# -*- coding: utf-8 -*-
# @Time : 2020-10-07 13:33
# @Author : Zhiwei Yang
|
import random
class Counter:
def __init__(self):
self.reset()
def get(self):
return self.count
def increment(self):
self.count += 1
def reset(self):
self.count = 0
class ProbabilisticCounter(Counter):
def __init__(self, a = 2):
super().__init__()
self._probability = 1 / a
self._a = a
def get(self):
return super().get() * self._a
def increment(self):
if random.random() < self._probability:
self.count += 1
class LogarithmicCounter(ProbabilisticCounter):
def get(self):
b = self._a - 1
return (self._a ** self.count - b) / b
def increment(self):
if random.random() < self._probability ** self.count:
self.count += 1 |
class Colors:
BLACK_COLOR = (0, 0, 0)
WHITE_COLOR = (255, 255, 255)
DARK_BLUE = (0, 0, 100)
DARK_GRAY = (75, 75, 75)
TROLL_GREEN = (100, 180, 150)
ORC_GREEN = (150, 250, 230)
BLOOD_RED = (255, 50, 50) |
# Copyright 2020-2021 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IUT provider module."""
import logging
import time
from environment_provider.iut.list import List
from environment_provider.iut.checkout import Checkout
from environment_provider.iut.checkin import Checkin
from environment_provider.iut.prepare import Prepare
from .exceptions import (
NoIutFound,
IutNotAvailable,
IutCheckoutFailed,
NotEnoughIutsAvailable,
)
class IutProvider:
"""Item under test (IUT) provider."""
logger = logging.getLogger("IUTProvider")
def __init__(self, etos, jsontas, ruleset):
"""Initialize IUT provider.
:param etos: ETOS library instance.
:type etos: :obj:`etos_lib.etos.Etos`
:param jsontas: JSONTas instance used to evaluate the rulesets.
:type jsontas: :obj:`jsontas.jsontas.JsonTas`
:param ruleset: JSONTas ruleset for handling IUTs.
:type ruleset: dict
"""
self.etos = etos
self.etos.config.set("iuts", [])
self.jsontas = jsontas
self.ruleset = ruleset
self.id = self.ruleset.get("id") # pylint:disable=invalid-name
self.logger.info("Initialized IUT provider %r", self.id)
@property
def identity(self):
"""IUT Identity.
:return: IUT identity as PURL object.
:rtype: :obj:`packageurl.PackageURL`
"""
return self.jsontas.dataset.get("identity")
def checkout(self, available_iuts):
"""Checkout a number of IUTs from an IUT provider.
:param available_iuts: IUTs to checkout.
:type available_iuts: list
:return: Checked out IUTs.
:rtype: list
"""
checkout_iuts = Checkout(self.jsontas, self.ruleset.get("checkout"))
return checkout_iuts.checkout(available_iuts)
def list(self, amount):
"""List IUTs in order to find out which are available or not.
:param amount: Number of IUTs to list.
:type amount: int
:return: Available IUTs in the IUT provider.
:rtype: list
"""
list_iuts = List(self.id, self.jsontas, self.ruleset.get("list"))
return list_iuts.list(self.identity, amount)
def checkin_all(self):
"""Check in all checked out IUTs."""
checkin_iuts = Checkin(self.jsontas, self.ruleset.get("checkin"))
checkin_iuts.checkin_all()
def checkin(self, iut):
"""Check in a single IUT, returning it to the IUT provider.
:param iut: IUT to checkin.
:type iut: :obj:`environment_provider.iut.iut.Iut`
"""
checkin_iuts = Checkin(self.jsontas, self.ruleset.get("checkin"))
checkin_iuts.checkin(iut)
def prepare(self, iuts):
"""Prepare all IUTs in the IUT provider.
:param iuts: IUTs to prepare.
:type iuts: list
:return: Prepared IUTs
:rtype: list
"""
prepare_iuts = Prepare(self.jsontas, self.ruleset.get("prepare"))
return prepare_iuts.prepare(iuts)
def _fail_message(self, last_exception):
"""Generate a fail message for IUT provider.
:param last_exception: Latest exception that was raised within the wait method.
:type last_exception: :obj:`BaseException`
:return: A failure reason.
:rtype: str
"""
timeout = self.etos.config.get("WAIT_FOR_IUT_TIMEOUT")
fail_reason = "Unknown"
if isinstance(last_exception, NoIutFound):
fail_reason = f"IUT not found using IUT provider '{self.id}'"
elif isinstance(last_exception, IutNotAvailable):
fail_reason = f"No IUT became available within {timeout}s."
elif isinstance(last_exception, IutCheckoutFailed):
fail_reason = str(last_exception)
return f"Failed to checkout {self.identity.to_string()}. Reason: {fail_reason}"
# pylint: disable=too-many-branches
def wait_for_and_checkout_iuts(self, minimum_amount=0, maximum_amount=100):
"""Wait for and checkout IUTs from an IUT provider.
:raises: IutNotAvailable: If there are no available IUTs after timeout.
:param minimum_amount: Minimum amount of IUTs to checkout.
:type minimum_amount: int
:param maximum_amount: Maximum amount of IUTs to checkout.
:type maximum_amount: int
:return: List of checked out IUTs.
:rtype: list
"""
timeout = time.time() + self.etos.config.get("WAIT_FOR_IUT_TIMEOUT")
last_exception = None
prepared_iuts = []
while time.time() < timeout:
time.sleep(5)
try:
available_iuts = self.list(maximum_amount)
self.logger.info("Available IUTs:")
for iut in available_iuts:
self.logger.info(iut)
if len(available_iuts) < minimum_amount:
self.logger.critical(
"Not enough available IUTs %r in the IUT provider!",
self.identity.to_string(),
)
raise NotEnoughIutsAvailable(self.identity.to_string())
checked_out_iuts = self.checkout(available_iuts)
self.logger.info("Checked out IUTs:")
for iut in checked_out_iuts:
self.logger.info(iut)
if len(checked_out_iuts) < minimum_amount:
raise IutNotAvailable(self.identity.to_string())
prepared_iuts, unprepared_iuts = self.prepare(checked_out_iuts)
for iut in unprepared_iuts:
self.checkin(iut)
self.logger.info("Prepared IUTs:")
for iut in prepared_iuts:
self.logger.info(iut)
if len(prepared_iuts) < minimum_amount:
raise IutNotAvailable(
f"Preparation of {self.identity.to_string()} failed"
)
break
except NoIutFound as not_found:
self.logger.critical(
"%r does not exist in the IUT provider!", self.identity.to_string()
)
prepared_iuts = []
last_exception = not_found
break
except IutNotAvailable as not_available:
self.logger.warning("IUT %r is not available yet.", self.identity)
last_exception = not_available
continue
except IutCheckoutFailed as checkout_failed:
self.logger.critical(
"Checkout of %r failed with reason %r!",
self.identity.to_string(),
checkout_failed,
)
self.checkin_all()
prepared_iuts = []
last_exception = checkout_failed
break
else:
self.logger.error(
"IUT %r did not become available in %rs",
self.identity.to_string(),
self.etos.config.get("WAIT_FOR_IUT_TIMEOUT"),
)
prepared_iuts = []
if len(prepared_iuts) < minimum_amount:
raise IutNotAvailable(self._fail_message(last_exception))
return prepared_iuts
|
orders = [
{
'name': 'Mario',
'flavor': 'pepperoni'
},
{
'name': 'Marcolino',
'flavor': 'barbecue'
}
]
for order in orders:
s = 'Name: {0}, Flavor: {1}'
print(s.format(order['name'], order['flavor']))
|
"""
jupylet/audio/__init__.py
Copyright (c) 2020, Nir Aides - nir@winpdb.org
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import asyncio
import pathlib
import time
import os
from ..utils import callerframe, callerpath
def sonic_py(resource_dir='.'):
from ..app import App
red = os.path.join(callerpath(), resource_dir)
red = pathlib.Path(red).absolute()
app = App(32, 32, resource_dir=str(red))
app.run(0)
return app
FPS = 44100
def t2frames(t):
"""Convert time in seconds to frames at 44100 frames per second.
Args:
t (float): The time duration in seconds.
Returns:
int: The number of frames.
"""
return int(FPS * t)
def frames2t(frames):
"""Convert frames at 44100 frames per second to time in seconds.
Args:
frames (int): The number of frames.
Returns:
float: The time duration in seconds.
"""
return frames / FPS
def get_time():
return time.time()
_note_value = 4
def set_note_value(v=4):
global _note_value
_note_value = v
def get_note_value():
return _note_value
_bpm = 240
def set_bpm(bpm=4):
global _bpm
_bpm = bpm
def get_bpm():
return _bpm
dtd = {}
syd = {}
def use(synth, **kwargs):
if kwargs:
synth = synth.copy().set(**kwargs)
cf = callerframe()
cn = cf.f_code.co_name
hh = cn if cn == '<module>' else hash(cf)
syd[hh] = synth
PLAY_EXTRA_LATENCY = 0.150
def play(note, *args, **kwargs):
cf = callerframe()
cn = cf.f_code.co_name
hh = cn if cn == '<module>' else hash(cf)
sy = syd[hh]
tt = dtd.get(hh) or get_time()
tt += PLAY_EXTRA_LATENCY
return sy.play_new(note, t=tt, *args, **kwargs)
def sleep(dt=0):
tt = get_time()
cf = callerframe()
cn = cf.f_code.co_name
hh = cn if cn == '<module>' else hash(cf)
sy = syd.get(hh)
if sy is not None:
dt = dt * get_note_value() * 60 / get_bpm()
t0 = dtd.get(hh)
if not t0 or t0 + 1 < tt:
t0 = tt
t1 = dtd[hh] = max(t0 + dt, tt)
return asyncio.sleep(t1 - tt)
def stop():
from .device import stop_sound
stop_sound()
|
from setuptools import setup, find_packages
with open("README.md", "rt", encoding="utf8") as f:
readme = f.read()
setup(
name="eth2_api_testgen",
description="Eth2 API test gen",
version="0.0.1",
long_description=readme,
long_description_content_type="text/x-markdown",
author="protolambda",
author_email="proto+pip@protolambda.com",
url="https://github.com/protolambda/eth2-api-testgen",
python_requires=">=3.8, <4",
license="MIT",
packages=find_packages(),
tests_require=[],
install_requires=[
"requests>=2.25.0"
],
include_package_data=True,
keywords=["eth2", "ethereum", "serenity", "api", "test"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: PyPy",
"Operating System :: OS Independent",
],
)
|
# tests/test_basic.py
import unittest
from base import BaseTestCase
from app import db
from app.models import Command
from app.core.helpers import getUserId, getGroupId, objectToJson
class DashboardTests(BaseTestCase):
# ensure normal behavior
def test_normal_behavior(self):
with self.client:
self.client.post(
'/login',
data=dict(username="ghooo", password="ghooo"),
follow_redirects=True
)
response = self.client.get(
'/dashboard'
)
self.assertIn(b'Global Commands', response.data)
self.assertEqual(response.status_code, 200)
# ensure only accessible via logged in users
def test_unauthenticated_access(self):
response = self.client.get('/dashboard', follow_redirects=True)
self.assertIn(b'Please log in to access this page.', response.data)
def test_commands_displayed(self):
with self.client:
self.client.post(
'/login',
data=dict(username="ghooo", password="ghooo"),
follow_redirects=True
)
db.session.add(Command(
"s",
"http://www.google.com/search?q=%s",
"Google Search",
getGroupId("global"),
getUserId("ghooo")
))
db.session.add(Command(
"p",
"http://pastie.org/",
"Pastie",
getUserId("ghooo"),
getUserId("ghooo")
))
db.session.commit()
response = self.client.get(
'/dashboard'
)
self.assertIn(b'Global Commands', response.data)
self.assertIn(b'User Commands', response.data)
self.assertIn(b'Google Search', response.data)
class DatabaseURLCommandsTests(BaseTestCase):
def basic_check(self, test_url, cmd_dict, missing_error_msg):
# get request not available
response = self.client.get(test_url, follow_redirects=True)
self.assertEqual(response.status_code, 405)
# unauthenticated access
response = self.client.post(test_url, follow_redirects=True)
self.assertIn(b'Please log in to access this page.', response.data)
with self.client:
self.client.post(
'/login',
data=dict(username="ghooo", password="ghooo"),
follow_redirects=True
)
# missing values
for key in cmd_dict:
for new_value in ['',None]:
tmp_dict = cmd_dict
tmp_dict[key] = new_value
response = self.client.post(
test_url,
data=tmp_dict
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data,missing_error_msg)
# ensure addusercommand working according to expectations
def test_addusercommand(self):
test_url = '/dashboard/_addusercommand'
self.basic_check(
test_url,
dict(cmd_id='p',url="http://pastie.org",name="Pastie"),
'A value is missing'
)
with self.client:
self.client.post(
'/login',
data=dict(username="ghooo", password="ghooo"),
follow_redirects=True
)
# normal behavior
response = self.client.post(
test_url,
data=dict(cmd_id='g',url="http://www.google.com/search?q=%s",
name="Google Search")
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data,'{"name": "Google Search", '
'"creator": 2, "url": "http://www.google.com/search?q=%s",'
' "cmd_id": "g", "owner": 2, "id": 1}')
# inserting duplicates
response = self.client.post(
test_url,
data=dict(cmd_id='g',url="http://www.google.com/search?q=%s",
name="Google Search")
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data,'Command ID already exists')
# ensure updateusercommand working according to expectations
def test_updateusercommand(self):
test_url = '/dashboard/_updateusercommand'
self.basic_check(
test_url,
dict(cmd_id='p'),
'A value is missing'
)
with self.client:
self.client.post(
'/login',
data=dict(username="ghooo", password="ghooo"),
follow_redirects=True
)
# updating non-existent command
response = self.client.post(
test_url,
data=dict(cmd_id='g',url="http://www.google.com/search?q=%s",
name="Google Search")
)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data,'Unexpected Error')
# normal behavior
response = self.client.post(
'dashboard/_addusercommand',
data=dict(cmd_id='g',url="http://www.google.com/search?q=%s",
name="Google Search")
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data,'{"name": "Google Search", '
'"creator": 2, "url": "http://www.google.com/search?q=%s",'
' "cmd_id": "g", "owner": 2, "id": 1}')
response = self.client.post(
test_url,
data=dict(cmd_id='g',url="http://www.bing.com/search?q=%s",
name="Trolling")
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data,'{"name": "Trolling", "creator": 2, '
'"url": "http://www.bing.com/search?q=%s", "cmd_id": "g", '
'"owner": 2, "id": 1}')
self.assertEqual(response.data,
objectToJson(Command.query.filter_by().first()))
# ensure deleteusercommand working according to expectations
def test_deleteusercommand(self):
test_url = '/dashboard/_deleteusercommand'
self.basic_check(
test_url,
dict(cmd_id='p'),
'Command ID is missing.'
)
with self.client:
self.client.post(
'/login',
data=dict(username="ghooo", password="ghooo"),
follow_redirects=True
)
# deleting non-existent command
response = self.client.post(
test_url,
data=dict(cmd_id='g',url="http://www.google.com/search?q=%s",
name="Google Search")
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data,'success')
# normal behavior
response = self.client.post(
'dashboard/_addusercommand',
data=dict(cmd_id='g',url="http://www.google.com/search?q=%s",
name="Google Search")
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data,'{"name": "Google Search", '
'"creator": 2, "url": "http://www.google.com/search?q=%s",'
' "cmd_id": "g", "owner": 2, "id": 1}')
response = self.client.post(
test_url,
data=dict(cmd_id='g')
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data,'success')
# ensure loadusercommand working according to expectations
def test_loadusercommands(self):
test_url = '/dashboard/_loadusercommands'
# unauthenticated access
response = self.client.get(test_url, follow_redirects=True)
self.assertIn(b'Please log in to access this page.', response.data)
with self.client:
self.client.post(
'/login',
data=dict(username="ghooo", password="ghooo"),
follow_redirects=True
)
# normal behavior
self.client.post(
'dashboard/_addusercommand',
data=dict(cmd_id='g',url="http://www.google.com/search?q=%s",
name="Google Search")
)
self.client.post(
'dashboard/_addusercommand',
data=dict(cmd_id='p',url="http://pastie.org",
name="Pastie")
)
self.client.post(
'dashboard/_addusercommand',
data=dict(cmd_id='map',
url="https://www.google.com.eg/maps/search/%s",
name="Google Maps")
)
response = self.client.get(
test_url,
data=dict()
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data,
'[{"name": "Google Search", "creator": 2, '
'"url": "http://www.google.com/search?q=%s", "cmd_id": "g", '
'"owner": 2, "id": 1}, {"name": "Pastie", "creator": 2, '
'"url": "http://pastie.org", "cmd_id": "p", "owner": 2, '
'"id": 2}, {"name": "Google Maps", "creator": 2, '
'"url": "https://www.google.com.eg/maps/search/%s", '
'"cmd_id": "map", "owner": 2, "id": 3}]')
if __name__ == '__main__':
unittest.main()
|
from dataclasses import dataclass
from functools import lru_cache
from pathlib import Path
from typing import List, Set
@dataclass(frozen=True)
class Location:
x: int
y: int
def __add__(self, other: "Location") -> "Location":
return Location(
x=self.x + other.x,
y=self.y + other.y,
)
def neighbours(self) -> Set["Location"]:
return {
self + direction
for direction in DIRECTIONS
}
DIRECTIONS = (
Location(0, 1),
Location(0, -1),
Location(1, 0),
Location(-1, 0),
)
@dataclass(frozen=True)
class Grid:
grid: List[List[int]]
def get(self, location: Location) -> int:
return self.grid[location.y][location.x]
@property
def height(self) -> int:
return len(self.grid)
@property
def width(self) -> int:
return len(self.grid[0])
def sum_risk_levels(self):
return sum(
[
self.get_risk_level(low_point)
for low_point in self.low_points()
]
)
def get_risk_level(self, location: Location) -> int:
return self.get(location) + 1
def low_points(self):
return [
Location(x, y)
for x in range(self.width)
for y in range(self.height)
if self.is_low_point(Location(x, y))
]
def is_low_point(self, location: Location) -> bool:
for direction in DIRECTIONS:
neighbour = location + direction
if (
self.is_inside(neighbour)
and
(self.get(neighbour) <= self.get(location))
):
return False
return True
def is_inside(self, location: Location) -> bool:
return (
(0 <= location.x < self.width)
and
(0 <= location.y < self.height)
)
@staticmethod
def parse(description: str) -> "Grid":
return Grid(
[
[
int(cell)
for cell in line
]
for line in description.split("\n")
]
)
def find_basin_from_seed(self, location: Location) -> Set[Location]:
to_expand = {location}
basin = {location}
while len(to_expand) > 0:
current_location = to_expand.pop()
neighbours = current_location.neighbours()
basin_neighbours = self.non_9_locations().intersection(neighbours)
to_expand = to_expand.union(basin_neighbours - basin)
basin = basin.union(basin_neighbours)
return basin
def non_9_locations(self) -> Set[Location]:
return {
Location(x, y)
for x in range(self.width)
for y in range(self.height)
if self.get(Location(x, y)) != 9
}
def silver(input_file_path: Path) -> int:
grid = Grid.parse(input_file_path.read_text())
return grid.sum_risk_levels()
def gold(input_file_path: Path) -> int:
grid = Grid.parse(input_file_path.read_text())
basins = []
potential_seeds = grid.non_9_locations()
while len(potential_seeds) > 0:
seed_location = potential_seeds.pop()
new_basin = grid.find_basin_from_seed(seed_location)
basins.append(new_basin)
potential_seeds -= new_basin
basins.sort(key=lambda basin: len(basin))
return len(basins[-1]) * len(basins[-2]) * len(basins[-3])
|
import setuptools
import yaml
meta = yaml.load(open('./meta.yaml'), Loader=yaml.FullLoader)
setuptools.setup(
name=meta['package']['name'],
author='aster',
author_email='galaster@foxmail.com',
url=meta['source']['url'],
version=meta['package']['version'],
description='',
packages=['sgan'],
install_requires=[
# no pytorch
'matplotlib',
'numpy',
'wolframclient'
],
)
|
"""All functions in this script are scraped via the :py:mod:`inspect` module when checking the arguments given in the config under the 'constraints' object/subdict.
A single value should be returned for any constraint.
"""
import numpy as np
import hawks.objectives
def overlap(indiv):
"""Calculate the amount of overlap (the percentage of points whose nearest neighbour is in a different cluster).
Args:
indiv (:class:`~hawks.genotype.Genotype`): A single individual (i.e. a dataset).
Returns:
float: The percentage of overlap between clusters.
"""
# Calculate the distances
# They may be there already, but in some cases need recalculation
# More sophisticated checks may be able to streamline this?
indiv.distances = hawks.objectives.Silhouette.calc_distances(indiv.all_values)
# Use a masked array to ignore the 0s on the diagonal (but retain shape)
masked_dists = np.ma.argmin(
np.ma.MaskedArray(indiv.distances, mask=indiv.distances == 0), axis=1)
# Sum the number of Trues we get for this condition
# The Falses are the overlaps (cluster numbers are different), so return 1 minus the number of Trues
return 1 - (np.sum(indiv.labels == indiv.labels[masked_dists])/len(indiv.labels))
def eigenval_ratio(indiv):
"""Calculate the eigenvalue ratio (or amount of eccentricity). This is ratio between the largest and smallest eigenvalues of the diagonal covariance matrix.
Args:
indiv (:class:`~hawks.genotype.Genotype`): A single individual (i.e. a dataset).
Returns:
float: The ratio of the largest to smallest eigenvalues.
"""
# Need to make diagonal to get rid of the 0s
# Take the maximum of all the eigenvalue ratios
# i.e. the cluster with the highest eigenvalue ratio
return np.max([np.max(np.diag(clust.cov))/np.min(np.diag(clust.cov)) for clust in indiv])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Graph
Contains methods relating to graph image exporting
"""
import urllib3
import requests
import os
import logging
from datetime import datetime
from requests import Response
from pathlib import PurePath
from pybix.api import ZabbixAPI
logger = logging.getLogger(__name__)
class GraphImage(object):
"""Class that handles getting/saving Zabbix Graph Images directly
Note: This is not a Zabbix API object
"""
def __init__(self,
url: str = None,
username: str = None,
password: str = None,
ssl_verify: bool = True):
"""Initialise the GraphImage session (including login)
Arguments:
url {str} -- Base URL to Zabbix (default: ZABBIX_SERVER environment variable or
https://localhost/zabbix)
username {str} -- Zabbix Username (default: ZABBIX_USER environment variable or 'Admin')
password {str} -- Zabbix Password (default: ZABBIX_PASSWORD environment variable or 'zabbix')
ssl_verify {bool} -- Whether to attempt SSL verification during call (default: True)
"""
url = url or os.environ.get(
'ZABBIX_SERVER') or 'http://localhost/zabbix'
self.BASE_URL = url.replace(
"/api_jsonrpc.php",
"") if not url.endswith('/api_jsonrpc.php') else url
payload = {
'name': username or os.environ.get('ZABBIX_USER') or 'Admin',
'password': password or os.environ.get('ZABBIX_PASSWORD')
or 'zabbix', # noqa: W503
'enter': 'Sign in'
}
self.SESSION = requests.Session()
self.SSL_VERIFY = ssl_verify
# Perform Login (note: not via Zabbix API since it doesn't
# expose graph exports, only configuration)
logger.debug(
f"GraphImage: Attempting to login to Zabbix server at {self.BASE_URL}/index.php"
)
if not self.SSL_VERIFY:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
self.SESSION.post(f"{self.BASE_URL}/index.php",
data=payload,
verify=self.SSL_VERIFY)
def _get_by_graph_id(self,
graph_id: str,
from_date: str = "now-1d",
to_date: str = "now",
width: str = "1782",
height: str = "452",
output_path: str = None) -> str:
"""Gets the Zabbix Graph by Graph ID and save to file based on output_path
Arguments:
graph_id {str} -- Zabbix Graph object ID
from_date {str} -- Time to graph from like "now-x", "2019-08-03 16:20:04" etc (default: now-1d)
to_date {str} -- Time to graph until like "now", "2019-08-03 16:20:04" etc (default: now)
width {str} -- Width of graph (default: 1782)
height {str} -- Height of graph (default: 452)
output_path {str} -- (default: os.getcwd())
Returns:
file_name {str} -- The name of the saved graph image
"""
# TODO provide some input validation
with self.SESSION.get(
f"{self.BASE_URL}/chart2.php?graphid={graph_id}&from={from_date}&to={to_date}"
f"&profileIdx=web.graphs.filter&width={width}&height={height}",
stream=True) as image:
file_name = self._save(
image, f"graph-{graph_id}-from-{from_date}-to-{to_date}",
output_path)
return file_name
def _get_by_item_ids(self,
item_ids: list,
from_date: str = "now-1d",
to_date: str = "now",
width: str = "1782",
height: str = "452",
batch: str = "1",
graph_type: str = "0",
output_path: str = None) -> str:
"""Gets the Zabbix adhoc Graph by Item ID(s) and save to file based on output_path
Arguments:
item_ids {list(str)} -- Zabbix Item object ID(s)
from_date {str} -- Time to graph from like "now-x", "2019-08-03 16:20:04" etc (default: now-1d)
to_date {str} -- Time to graph until like "now", "2019-08-03 16:20:04" etc (default: now)
width {str} -- Width of graph (default: 1782)
height {str} -- Height of graph (default: 452)
batch {str} -- Whether to get all values (0) or averages (1) (default: 1)
type {str} -- Whether to get normal overlay graph (0) or stacked graph (1) (default: 0)
output_path {str} -- Path to save to (default: None)
Returns:
file_name {str} -- The name of the saved graph image
"""
# TODO provide some input validation
encoded_itemids = "&".join(
[f"itemids%5B{item_id}%5D={item_id}" for item_id in item_ids])
formatted_itemids = "-".join(item_ids)
with self.SESSION.get(
f"{self.BASE_URL}/chart.php?from={from_date}&to={to_date}&{encoded_itemids}"
f"&type={graph_type}&batch={batch}&profileIdx=web.graphs.filter&width={width}&height={height}"
f"",
stream=True) as image:
file_name = self._save(
image,
f"items-{formatted_itemids}-from-{from_date}-to-{to_date}",
output_path)
return file_name
def _save(self,
image: Response,
graph_details: str,
output_path: str = None) -> str:
"""Saves Image to file in format 'graphimage-<graph_details>-<<yearmonthday>.png'
Arguments:
image {Response} -- Binary stream representing image to be saved
graph_details {str} -- Either Zabbix Graph or Item ID
output_path {str} -- Path to save to (default: os.getcwd())
Returns:
file_name {str} -- The name of the saved graph image
"""
output_path = output_path or os.getcwd()
file_name = PurePath(
output_path,
f"zabbix_{graph_details}_{datetime.now().strftime('%Y%m%d-%H%M%S')}.png"
).__str__()
try:
with open(file_name, 'wb') as f:
for chunk in image.iter_content(chunk_size=8192):
if chunk: # Filter out keep-alive new chunks
f.write(chunk)
except FileNotFoundError as ex:
logger.error(
f"_save(): Unable to save to output_path:{output_path}")
logger.error(f" Exception:{ex}")
return ""
logger.debug(f"_save(): Saved GraphImage to {file_name}")
return file_name
class GraphImageAPI(GraphImage):
"""Helper class for easier Zabbix Graph Image calls"""
def __init__(self,
url: str = None,
user: str = None,
password: str = None,
output_path: str = None,
ssl_verify: bool = True):
"""Initialise the GraphImage session (including login)
Arguments:
url {str} -- Base URL to Zabbix (default: ZABBIX_SERVER environment variable or
https://localhost/zabbix)
username {str} -- Zabbix Username (default: ZABBIX_USER environment variable or 'Admin')
password {str} -- Zabbix Password (default: ZABBIX_PASSWORD environment variable or 'zabbix')
output_path {str} -- Path of directory to save to (default: os.getcwd())
ssl_verify {bool} -- Whether to attempt SSL verification during call (default: True)
"""
super().__init__(url, user, password, ssl_verify=ssl_verify)
self.ZAPI = ZabbixAPI(url, ssl_verify=ssl_verify)
self.ZAPI.login(user, password)
self.OUTPUT_PATH = output_path
def get(self, search_type, **kwargs):
"""Pass through method that calls appropriate get based on search type
Arguments:
kwargs {dict} -- Key/values to pass through as parameters
Returns:
file_name {str} -- The name of the saved graph image
"""
logger.debug(f"type: {search_type} - kwargs: {kwargs}")
if search_type == "graph_id":
return self.get_by_graph_id(**kwargs)
elif search_type == "graph_name":
return self.get_by_graph_name(**kwargs)
elif search_type == "item_names":
return self.get_by_item_names(**kwargs)
elif search_type == "item_keys":
return self.get_by_item_keys(**kwargs)
elif search_type == "item_ids":
return self.get_by_item_ids(**kwargs)
else:
raise ValueError("Invalid search type. Expecting (graph_id, graph_name, item_names, "
"item_keys, item_ids")
def get_by_graph_id(self,
graph_id: str,
from_date: str = "now-1d",
to_date: str = "now",
width: str = "1782",
height: str = "452") -> str:
"""Get by Zabbix Graph ID and save to file based on output_path
Arguments:
graph_id {str} -- Zabbix Graph object ID
from_date {str} -- Time to graph from like "now-x", "2019-08-03 16:20:04" etc (default: now-1d)
to_date {str} -- Time to graph until like "now", "2019-08-03 16:20:04" etc (default: now)
width {str} -- Width of graph (default: 1782)
height {str} -- Height of graph (default: 452)
Returns:
file_name {str} -- The name of the saved graph image
"""
return self._get_by_graph_id(graph_id=graph_id,
from_date=from_date,
to_date=to_date,
width=width,
height=height,
output_path=self.OUTPUT_PATH)
def get_by_item_ids(self,
item_ids: list,
host_names: list = None,
from_date: str = "now-1d",
to_date: str = "now",
width: str = "1782",
height: str = "452",
batch: str = "1",
graph_type: str = "0"):
"""Gets the Zabbix adhoc Graph by Item ID(s) and save to file based on output_path
Arguments:
item_ids {list(str)} -- Zabbix Item object ID(s)
from_date {str} -- Time to graph from like "now-x", "2019-08-03 16:20:04" etc (default: now-1d)
to_date {str} -- Time to graph until like "now", "2019-08-03 16:20:04" etc (default: now)
width {str} -- Width of graph (default: 1782)
height {str} -- Height of graph (default: 452)
batch {str} -- Whether to get all values (0) or averages (1) (default: 1)
type {str} -- Whether to get normal overlay graph (0) or stacked graph (1) (default: 0)
Returns:
file_name {str} -- The name of the saved graph image
"""
return self._get_by_item_ids(item_ids=item_ids,
from_date=from_date,
to_date=to_date,
width=width,
height=height,
batch=batch,
graph_type=graph_type,
output_path=self.OUTPUT_PATH)
def get_by_item_keys(self,
item_keys: list,
host_names: list = None,
from_date: str = "now-1d",
to_date: str = "now",
width: str = "1782",
height: str = "452",
graph_type: str = "0"):
"""Gets the Zabbix Graph by Item key(s) and save to file based on output_path. E.g. 'agent.ping'
Arguments:
item_keys {list(str)} -- Zabbix Item object key(s)
from_date {str} -- Time to graph from like "now-x", "2019-08-03 16:20:04" etc (default: now-1d)
to_date {str} -- Time to graph until like "now", "2019-08-03 16:20:04" etc (default: now)
width {str} -- Width of graph (default: 1782)
height {str} -- Height of graph (default: 452)
batch {str} -- Whether to get all values (0) or averages (1) (default: 1)
type {str} -- Whether to get normal overlay graph (0) or stacked graph (1) (default: 0)
Returns:
file_name {str} -- The name of the saved graph image
"""
if not item_keys:
raise ValueError("item_keys cannot be an empty string")
if host_names:
host_ids = [
host['hostid']
for host in self.ZAPI.host.get(filter={'host': host_names})
]
items = [
item for item in self.ZAPI.item.get(hostids=host_ids,
filter={'key_': item_keys})
]
else:
items = [
item for item in self.ZAPI.item.get(search={'key_': item_keys})
]
if not items:
logger.warn("get_by_graphname: No graphs returned")
return [""]
else:
return self.get_by_item_ids(
item_ids=[item['itemid'] for item in items],
from_date=from_date,
to_date=to_date,
width=width,
height=height,
graph_type=graph_type)
def get_by_item_names(self,
item_names: list,
host_names: list = None,
from_date: str = "now-1d",
to_date: str = "now",
width: str = "1782",
height: str = "452",
batch: str = "1",
graph_type: str = "0"):
"""Gets the Zabbix Graph by Item name(s) and save to file based on output_path. E.g. 'CPU'
Arguments:
item_names {list(str)} -- Zabbix Item object name(s)
from_date {str} -- Time to graph from like "now-x", "2019-08-03 16:20:04" etc (default: now-1d)
to_date {str} -- Time to graph until like "now", "2019-08-03 16:20:04" etc (default: now)
width {str} -- Width of graph (default: 1782)
height {str} -- Height of graph (default: 452)
batch {str} -- Whether to get all values (0) or averages (1) (default: 1)
type {str} -- Whether to get normal overlay graph (0) or stacked graph (1) (default: 0)
Returns:
file_name {str} -- The name of the saved graph image
"""
if not item_names:
raise ValueError("item_names cannot be an empty string")
if host_names:
host_ids = [
host['hostid']
for host in self.ZAPI.host.get(filter={'host': host_names})
]
items = [
item
for item in self.ZAPI.item.get(hostids=host_ids,
search={'name': item_names})
]
else:
items = [
item
for item in self.ZAPI.item.get(search={'name': item_names})
]
if not items:
logger.warn("get_by_graphname: No graphs returned")
return [""]
else:
return self.get_by_item_ids(
item_ids=[item['itemid'] for item in items],
from_date=from_date,
to_date=to_date,
width=width,
height=height,
graph_type=graph_type)
def get_by_graph_name(self,
graph_name: str,
host_names: list = None,
from_date: str = "now-1d",
to_date: str = "now",
width: str = "1782",
height: str = "452") -> list:
"""Get graph images by graph name (e.g. 'CPU')
Arguments:
graph_name {str} == filter by graph name
host_names {list} == filter by host names (default: None, so get for ALL hosts),
from_date {str} -- Time to graph from like "now-x", "2019-08-03 16:20:04" etc (default: now-1d)
to_date {str} -- Time to graph until like "now", "2019-08-03 16:20:04" etc (default: now)
width {str} -- Width of graph (default: 1782)
height {str} -- Height of graph (default: 452)
"""
if not graph_name:
raise ValueError("graph_name cannot be an empty string")
if host_names:
host_ids = [
host['hostid']
for host in self.ZAPI.host.get(filter={'host': host_names})
]
graphs = [
graph for graph in self.ZAPI.graph.get(hostids=host_ids)
if graph_name.lower() in graph['name'].lower()
]
else:
graphs = [
graph for graph in self.ZAPI.graph.get()
if graph_name.lower() in graph['name'].lower()
]
if not graphs:
logger.warn("get_by_graphname: No graphs returned")
return [""]
else:
return [
self.get_by_graph_id(graph_id=graph['graphid'],
from_date=from_date,
to_date=to_date,
width=width,
height=height) for graph in graphs
]
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
linprog.py: Linear program to empirically find values of a, b, c.
a is how much a constraint going from the whole plain to a circle is worth.
b is how much a constraint going from a circle to two points is worth.
c is how much a constrint going from two points to a single point is worth.
"""
import numpy as np
from scipy.optimize import linprog
def setup_constraints(c, eps):
""" In this attempt, I am adding constraints for successful reconstructions
and the corresponding constraints form failures ('if I remove any measurement,
then the reconstruction will fail..
"""
A = []
b = []
A.append([-3.0, -3, 1])
b.append(3 * c) # success [3, 3, 3]
A.append([3.0, 3, -1])
b.append(-2 * c - eps) # fail [3, 3, 2]
A.append([-4.0, -3, 1])
b.append(2 * c) # success [3, 3, 2, 1]
A.append([4.0, 2, -1])
b.append(-2 * c - eps) # fail [3, 3, 1, 1]
A.append([-4.0, -3, 1])
b.append(c) # success [3, 2, 2, 1]
A.append([-4.0, -3, 1])
b.append(0.0) # success [2, 2, 2, 1]
A.append([3.0, 3, -1])
b.append(-eps) # fail [2, 2, 2]
A.append([4.0, 2, -1])
b.append(-eps) # fail [2, 2, 1, 1]
A.append([-5.0, -2, 1])
b.append(0) # success [2, 2, 1, 1, 1]
A.append([5.0, 1, -1])
b.append(-eps) # fail [2, 1, 1, 1, 1]
A.append([6.0, 0, -1])
b.append(-eps) # fail [1, 1, 1, 1, 1, 1]
A.append([-7.0, 0, 1])
b.append(0) # success [1, 1, 1, 1, 1, 1, 1]
A.append([-1.0, 1, 0]), b.append(0) # a >= b
A.append([0.0, -1, 0]), b.append(-c) # b >= c
f = [1, 1, 1] # minimize K
return A, b, f
if __name__ == "__main__":
import sys
n_complexity = 3
dim = 2
# TODO: the results depends on the value of the slack variable epsilon.
# it shouldn't....
eps = 0.1
# TODO chosen like this so that the resulting values for a, b == 1.0.
c = 0.45
A, b_vec, f = setup_constraints(c, eps)
#equality_matrix = [[1, 1, 0]] # a + b = 2
#equality_vector = [2]
#res = linprog(f, A_ub=A, b_ub=b_vec, A_eq=equality_matrix,
# b_eq=equality_vector)
res = linprog(f, A_ub=A, b_ub=b_vec)
if not res.success:
print('ERROR: no solution found!')
print(res)
a, b, K = res.x
print('a={:2.2f}, b={:2.2f}, c={:2.2f}, K={:2.2f}'.format(a, b, c, K))
A = np.array(A)
x = np.array(res.x)
b_vec = np.array(b_vec)
print('constraints satisfied: should all be negative \n', A.dot(x) - b_vec)
|
''' My Application '''
import re
import yaml
from quick_scheme import KeyBasedList
from quick_scheme import ListOfReferences, ListOfNodes
from quick_scheme import SchemeNode, Field
from quick_scheme import qs_yaml
DATA = '''
version: 1
updates:
# this is our update log to demonstrate lists
- '2019-08-14: initial version'
- '2019-08-15: added user3'
- '2019-08-15: added project2'
users:
user1:
first_name: User
last_name: One
user2:
first_name: Another
last_name: User
email: another.user@mydomain.com
desc: Another User
user3:
first_name: Another
last_name: User
email: another.user@mydomain.com
desc: Another User
groups:
users:
desc: Regular Users
admins: Admins
projects:
project1:
desc: My First Project
order: 1
users:
- user1
groups:
- admins
project2:
desc: My Other Project
order: 3
groups:
- users
'''
EMAIL_REGEX = re.compile(r"[^@]+@[^@]+\.[^@]+")
def email_validator(field_value):
''' Validate email '''
email = field_value.get()
if not email and not field_value.is_required:
return True
return EMAIL_REGEX.match(field_value.get())
class Group(SchemeNode):
FIELDS = [
Field('groupname', identity=True),
Field('desc', brief=True),
]
class User(SchemeNode):
FIELDS = [
Field('username', identity=True),
Field('first_name', type=str, default="", required=True),
Field('last_name', type=str, default="", required=True),
Field('email', type=str, default="",
required=False, validator=email_validator),
Field('desc', type=str, default="No Description",
required=False, brief=True),
Field('groups', ftype=ListOfReferences(
Group, ".groups", False), required=False),
]
class Project(SchemeNode):
FIELDS = [
Field('name', identity=True),
Field('desc', default="No Description"),
Field('order', ftype=int, required=True),
Field('users', ftype=ListOfReferences(User, ".users"), required=False),
Field('groups', ftype=ListOfReferences(
Group, ".groups"), required=False),
]
class Data(SchemeNode):
FIELDS = [
Field('version', ftype=str, default='1'),
Field('updates', ftype=ListOfNodes(str)),
Field('groups', ftype=KeyBasedList(Group)),
Field('users', ftype=KeyBasedList(User)),
Field('projects', ftype=KeyBasedList(Project))
]
PRESERVE_ORDER = True
ALLOW_UNDEFINED = True
def main():
''' Main test '''
data = Data(data=qs_yaml.safe_load(DATA))
print("=====\nData:\n=====\n%s=====\n" %
qs_yaml.pretty_dump(data.quick_scheme.get_data()))
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
main()
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
from logproj.P8_performanceAssessment.utilities_movements import getCoverageStats
from logproj.P9_workloadPrediction.demand_assessment import getAdvanceInPlanning
from logproj.ml_graphs import plotGraph
#%%
def createTabellaMovimenti( D_mov,
locfrom = 'LOADING_NODE',
locto= 'DISCHARGING_NODE',
capacityField='QUANTITY',
timeColumns={}
):
#Sdoppio ogni movimento in due righe una in e una out
print("**** DEFINISCO D MOV IN/OUT ****")
# verifico quali campi data sono presenti e identifico le colonne di raggruppamento
columnsCompleteFrom = ['loadingpta', 'loadingptd', 'loadingata', 'loadingatd']
columnsCompleteTo= ['dischargingpta','dischargingptd','dischargingata','dischargingatd']
columnsPresentFrom = [ timeColumns[col] for col in list(timeColumns) if col in columnsCompleteFrom ]
columnsPresentTo= [ timeColumns[col] for col in list(timeColumns) if col in columnsCompleteTo ]
selectColumnFrom=list(D_mov.columns)
for col in [locto, *columnsPresentTo]:
if col in selectColumnFrom: selectColumnFrom.remove(col)
selectColumnTo=list(D_mov.columns)
for col in [locfrom, *columnsPresentFrom]:
if col in selectColumnTo: selectColumnTo.remove(col)
#identifico quali colonne sono presenti e come andarle a rinominare
allcolumnstorename = {'loadingpta':'PTA',
'loadingptd':'PTD',
'loadingata':'ATA',
'loadingatd':'ATD',
'dischargingpta':'PTA',
'dischargingptd':'PTD',
'dischargingata':'ATA',
'dischargingatd':'ATD',}
renameDictionarycomplete = {locto:'Location',
locfrom:'Location'
}
for col in allcolumnstorename.keys():
if col in timeColumns.keys():
renameDictionarycomplete[timeColumns[col]]=allcolumnstorename[col]
#sdoppio i movimenti e rinomino
D1=D_mov[selectColumnFrom]
D1=D1.rename(columns=renameDictionarycomplete)
D1['InOut']='IN'
D2=D_mov[selectColumnTo]
D2=D2.rename(columns=renameDictionarycomplete)
D2['InOut']='OUT'
#Creo la tabella dei movimenti
D=pd.concat([D1,D2])
#assegno quantita' e segni ai movimenti
MovimentiIN=(D.InOut=='IN')*1
MovimentiOUT=(D.InOut=='OUT')*(-1)
D['Movementquantity']=MovimentiIN+MovimentiOUT
D['Movementquantity']=D.Movementquantity*D[capacityField]
return D
# %%
def defineRouteTable(D,agregationVariables=['VEHICLE_CODE','VOYAGE_CODE'],actual='PROVISIONAL'):
#import a dataframe D containing movements and defines a route dataframe
print("**** DEFINISCO ROUTE ****")
#costruisco un dizionario di aggregazione sul groupby
#la capacita' verra' sommata
#tutte le altre variabili diventeranno una lista
#aggregationVariable
aggregation_dictionary = {'Movementquantity':np.sum}
if actual=='PROVISIONAL':
listCol = [*agregationVariables,'Location','PTA','PTD','Movementquantity','_id']
elif actual=='ACTUAL':
listCol = [*agregationVariables,'Location','ATA','ATD','Movementquantity','_id']
aggregation_columns = [col for col in D.columns if col not in listCol ]
for col in aggregation_columns:
aggregation_dictionary[col] = lambda group_series: list(set(group_series.tolist()))
#rimuovo eventuali colonne contenenti dizionari
#listKeys = aggregation_dictionary.keys()
for col in list(aggregation_dictionary):
if any([ isinstance(i,dict) for i in D[col] ]):
print(col)
aggregation_dictionary.pop(col)
#Ricostruisco la route effettuata
if actual=='PROVISIONAL':
D_route=D.groupby([*agregationVariables,'Location','PTA','PTD']).agg(aggregation_dictionary).reset_index()
timestartfield='PTA'
timeendfield='PTD'
elif actual=='ACTUAL':
D_route=D.groupby([*agregationVariables,'Location','ATA','ATD']).agg(aggregation_dictionary).reset_index()
timestartfield='ATA'
timeendfield='ATD'
return D_route, timestartfield, timeendfield
# In[4]: #Statistiche sui voyage
def D_voyageStatistics( D_mov,
timefield='TIMESTAMP_IN',
locfrom = 'LOADING_NODE',
locto= 'DISCHARGING_NODE',
timeColumns={},
capacityField='QUANTITY',
censoredData=False,
voyagefield ='VOYAGE_CODE',
actual='PROVISIONAL'):
#ritorna due dataframe
#D_route con tutti gli attributi basati sui singoli movimenti
#D_arcs_route con tutti gli attributi basati sugli archi percorsi
#inizializzo a vuoto
D_route = D_arcs_route = D_coverages = pd.DataFrame()
#calcolo coperture e verifico colonne in input
if actual=='PROVISIONAL':
colonneNecessarie = ['loadingpta','loadingptd','dischargingpta','dischargingptd']
if all([column in timeColumns.keys() for column in colonneNecessarie ]):
allcolumns = [locfrom,locto, timeColumns['loadingpta'],timeColumns['loadingptd'],timeColumns['dischargingpta'],timeColumns['dischargingptd']]
accuracy, _ = getCoverageStats(D_mov,analysisFieldList=allcolumns,capacityField='QUANTITY')
else:
colonneMancanti=[column for column in colonneNecessarie if column not in timeColumns.keys()]
D_coverages=pd.DataFrame([f"NO columns {colonneMancanti} in timeColumns"])
elif actual == 'ACTUAL':
colonneNecessarie = ['loadingata','loadingatd','dischargingata','dischargingatd']
if all([column in timeColumns.keys() for column in colonneNecessarie ]):
allcolumns = [locfrom,locto, timeColumns['loadingata'],timeColumns['loadingatd'],timeColumns['dischargingata'],timeColumns['dischargingatd']]
accuracy, _ = getCoverageStats(D_mov,analysisFieldList=allcolumns,capacityField='QUANTITY')
else:
colonneMancanti=[column for column in colonneNecessarie if column not in timeColumns.keys()]
D_coverages=pd.DataFrame([f"NO columns {colonneMancanti} in timeColumns"])
#assegno accuratezza
D_coverages = pd.DataFrame(accuracy)
D_arcs_route=pd.DataFrame()
D=createTabellaMovimenti(D_mov=D_mov,
locfrom = locfrom,
locto= locto,
capacityField=capacityField,
timeColumns=timeColumns)
#ricostruisco le rotte
D_route, timestartfield, timeendfield = defineRouteTable(D,agregationVariables =[voyagefield],actual=actual)
#identifico i possibili viaggi
Voyages=np.unique(D_route[voyagefield])
#verifico se vi siano dei dati censurati da segnalare
#identifico il primo giorno di planning
firstPlanningDay=min(D_mov[timefield].dt.date)
#Identifico l'intervallo medio di anticipo in giorni sulle prenotazioni
_, df_advance = getAdvanceInPlanning(D_mov,loadingptafield=timeColumns['loadingpta'])
mean_advanceInPlanning=df_advance.loc['ADVANCE_PLANNING_MEAN']['VALUE']
std_advanceInPlanning=df_advance.loc['ADVANCE_PLANNING_STD']['VALUE']
lowerBoundDataCensored=firstPlanningDay+pd.Timedelta(days=(mean_advanceInPlanning+std_advanceInPlanning))
#Identifico l'ultimo giorno di planning
lastPlanningDay=max(D_mov[timefield].dt.date)
#rimuovo i movimenti al di fuori dell'orizzonte di riferimento
if(not(censoredData)): #se non voglio tenere conto dei dati censurati
D_route=D_route[(D_route[timestartfield]>pd.to_datetime(lowerBoundDataCensored)) & (D_route[timeendfield]<pd.to_datetime(lastPlanningDay))]
D_route = D_route.reset_index(drop=True)
#proseguo solo se ho dati non censurati
if len(D_route)==0:
D_route=pd.DataFrame(["No uncensored data"])
return D_route, D_arcs_route, D_coverages
D_route['inventory']=np.nan
print("**** RICOSTRUZIONE DEGLI INVENTARI ****")
#scorro sui singoli viaggi e vado a calcolare la capacita' residua
for i in range(0,len(Voyages)):
#i=0
voyage=Voyages[i]
route = D_route[D_route[voyagefield]==voyage]
print(f"==RICOSTRUISCO INVENTARIO VIAGGIO {voyage}, con {len(route)} MOVIMENTI")
#se ho una rotta
if len(route)>0:
#ordino in base al tempo
route=route.sort_values([timeendfield])
#Identifico la capacità dai dati
#vessel=np.unique(route[vehiclefield])[0] # c'e' un solo veicolo associato ad ogni viaggio
#capDati=np.double(capBarges[capBarges[vehiclefield]==vessel][capfield])
#definisco i movimenti pianificati sulla chiatta
counter=0
allIndex=[] #creo una lista di indici che poi uso per aggiornare solo questa porzione di tabella
for index, row in route.iterrows(): #gli indici di route sono gli stessi in D_route
if counter==0:
D_route['inventory'].loc[index]=row['Movementquantity']
allIndex.append(index)
else:
D_route['inventory'].loc[index]=row['Movementquantity'] + D_route['inventory'].loc[allIndex[counter-1]]
allIndex.append(index)
counter=counter+1
#Calcolo la stima della capacità andando a portare tutto sopra lo zero
allCapacities = D_route[D_route[voyagefield]==voyage]['inventory']
slack=np.double(-min(allCapacities))
D_route['inventory'].loc[allIndex]=D_route[D_route[voyagefield]==voyage]['inventory']+slack
capMax=max(D_route['inventory'].loc[allIndex])
#riassegno il valore di route alla tabella aggiornata con i valori di inventario e riordino
route=D_route[D_route[voyagefield]==voyage]
route=route.sort_values([timeendfield])
#scorro la rotta per creare il dataframe dei movimenti from-to
for k in range(0,len(route)-1):
#k=0
#identifico il movimento corrente e il successivo
currentMovement=route.iloc[k]
nextMovement=route.iloc[k+1]
rowDictionary={'arcFrom':currentMovement.Location,
'arcTo': nextMovement.Location,
'departureFromALAP': currentMovement[timeendfield],
'arrivalToASAP':nextMovement[timestartfield],
'inventory':currentMovement.inventory,
'capacity':capMax-currentMovement.inventory}
#appendo tutte le altre del from
add_columns_from = [col for col in currentMovement.index if col not in ['Location','timeendfield','inventory'] ]
for col in add_columns_from:
rowDictionary[f"{col}_from"] = currentMovement[col]
#appendo tutte le altre del to
add_columns_to = [col for col in nextMovement.index if col not in ['Location','timestartfield','inventory'] ]
for col in add_columns_to:
rowDictionary[f"{col}_to"] = nextMovement[col]
#aggiungo al dataframe dei risultati
D_arcs_route = D_arcs_route.append(pd.DataFrame([rowDictionary]))
return D_route, D_arcs_route, D_coverages
# %%
def returnFigureVoyage(D_route, D_arcs_route, lastPlanningDay=[], lowerBoundDataCensored=[], filteringfield='VOYAGE_CODE',sortTimefield='PTD'):
#ritorna un dizionario di figure di inventario e connessioni su grafo per ogni codice di viaggio
figure_results={}
for voyage in set(D_route[filteringfield]):
#voyage='Vessel 10'
#genero grafici di inventario
D_plannedRouteVessel = D_route[D_route[filteringfield]==voyage]
if len(D_plannedRouteVessel)>0:
D_plannedRouteVessel = D_plannedRouteVessel.sort_values(by=sortTimefield)
#Genero grafici di capacità
figure=plt.figure(figsize=(20,10))
plt.step(D_plannedRouteVessel[sortTimefield],D_plannedRouteVessel['inventory'], color='orange')
plt.title(str(voyage)+' inventory')
plt.xticks(rotation=30)
#traccio la capacità
#if (len(lastPlanningDay)>0) & (len(lowerBoundDataCensored)>0):
capMax=max(D_plannedRouteVessel['inventory'])
plt.plot(D_plannedRouteVessel[sortTimefield],[capMax]*len(D_plannedRouteVessel),'r--')
plt.axvline(x=lastPlanningDay, color='red',linestyle='--')
plt.axvline(x=lowerBoundDataCensored, color='red',linestyle='--')
figure_results[f"{filteringfield}_{voyage}_inventory"]=figure
#plt.close('all')
# genero grafici di flusso su grafo
D_plannedRouteVessel_fromTo = D_arcs_route[D_arcs_route[f"{filteringfield}_from"]==voyage]
if len(D_plannedRouteVessel_fromTo)>0:
#traccio le route su grafo
FlowAnalysis=D_plannedRouteVessel_fromTo.groupby(['arcFrom', 'arcTo']).size().reset_index()
FlowAnalysis=FlowAnalysis.rename(columns={0:'Trips'})
fig1=plotGraph(df=FlowAnalysis,
edgeFrom='arcFrom',
edgeTo='arcTo',
distance='Trips',
weight='Trips',
title=str(voyage),
arcLabel=True)
figure_results[f"{filteringfield}_{voyage}_graph"]=fig1
return figure_results
# %% network graph clock
def graphClock(D_mov,
loadingNode='LOADING_NODE',
dischargingNode='DISCHARGING_NODE',
sortingField='PTA_FROM',
vehicle='VEHICLE_CODE',
capacityField='QUANTITY',
timeColumns={},
actual='PROVISIONAL'):
output_figure={}
output_df={}
#identifico colonne necessarie e calcolo coperture
if actual=='PROVISIONAL':
colonneNecessarie = ['loadingptd','dischargingpta']
if all([column in timeColumns.keys() for column in colonneNecessarie ]):
allcolumns = [loadingNode,dischargingNode,vehicle, timeColumns['loadingptd'],timeColumns['dischargingpta']]
accuracy, _ = getCoverageStats(D_mov,analysisFieldList=allcolumns,capacityField='QUANTITY')
else:
colonneMancanti=[column for column in colonneNecessarie if column not in timeColumns.keys()]
output_df['coverages']=pd.DataFrame([f"NO columns {colonneMancanti} in timeColumns"])
return output_figure, output_df
elif actual == 'ACTUAL':
colonneNecessarie = ['loadingatd','dischargingata']
if all([column in timeColumns.keys() for column in colonneNecessarie ]):
allcolumns = [loadingNode,dischargingNode,vehicle, timeColumns['loadingatd'],timeColumns['dischargingata']]
accuracy, _ = getCoverageStats(D_mov,analysisFieldList=allcolumns,capacityField='QUANTITY')
output_df[f"coverages_{actual}"]=pd.DataFrame(accuracy)
#identifico tutti i terminal e assegno un'ordinata
#andrebbero ordinati su una retta
terminal_dict={}
D_mov=D_mov.sort_values(by=[sortingField])
terminals = list(set([*D_mov[loadingNode], *D_mov[dischargingNode]]))
for i in range(0,len(terminals)):
terminal_dict[terminals[i]]=i
#identifico i movimenti
D = createTabellaMovimenti( D_mov,
locfrom = loadingNode,
locto= dischargingNode,
capacityField=capacityField,
timeColumns=timeColumns
)
D_route, timestartfield, timeendfield = defineRouteTable(D,
agregationVariables =[vehicle],
actual=actual)
for vessel in set(D_route[vehicle]):
D_mov_filtered = D_route[D_mov[vehicle]==vessel]
D_mov_filtered = D_route.sort_values(by=timestartfield)
D_mov_filtered=D_mov_filtered.dropna(subset=[timestartfield, 'Location'])
#realizzo grafico su tutto l'asse temporale
fig1=plt.figure()
for i in range(1,len(D_mov_filtered)):
#plotto viaggio
x_array = [D_mov_filtered[timeendfield].iloc[i-1], D_mov_filtered[timestartfield].iloc[i]]
y_array = [terminal_dict[D_mov_filtered['Location'].iloc[i-1]], terminal_dict[D_mov_filtered['Location'].iloc[i]]]
plt.plot(x_array,y_array,color='orange',marker='o')
#plotto attesa
x_array = [D_mov_filtered[timestartfield].iloc[i], D_mov_filtered[timeendfield].iloc[i]]
y_array = [terminal_dict[D_mov_filtered['Location'].iloc[i]], terminal_dict[D_mov_filtered['Location'].iloc[i]]]
plt.plot(x_array,y_array,color='orange',marker='o')
plt.ylabel('Terminal')
plt.xlabel('Time')
output_figure[f"Train_chart_{vessel}_{actual}"] = fig1
if actual=='PROVISIONAL':
time_from = timeColumns['loadingptd']
time_to = timeColumns['dischargingpta']
elif actual=='ACTUAL':
time_from = timeColumns['loadingatd']
time_to = timeColumns['dischargingata']
#realizzo grafico raggruppando su un giorno
D_train=D_mov[D_mov[vehicle]==vessel]
D_train['hour_from']=D_train[time_from].dt.time
D_train['hour_to']=D_train[time_to].dt.time
D_graph=D_train.groupby([loadingNode,dischargingNode,'hour_from','hour_to']).sum()[capacityField].reset_index()
D_graph=D_graph.sort_values(by=capacityField, ascending=False)
fig1=plt.figure()
for i in range(0,len(D_graph)):
x_array = [D_graph['hour_from'].iloc[i], D_graph['hour_to'].iloc[i]]
y_array = [terminal_dict[D_graph[loadingNode].iloc[i]], terminal_dict[D_graph[dischargingNode].iloc[i]]]
my_day = datetime.date(1990, 1, 1)
x_array = [ datetime.datetime.combine(my_day, t) for t in x_array ]
plt.title(f"Train schedule chart VEHICLE: {vessel}")
plt.plot(x_array,y_array,color='orange',marker='o',linewidth = np.log(D_graph[capacityField].iloc[i]))
output_figure[f"Train_chart_daily_{vessel}_{actual}"] = fig1
plt.close('all')
return output_figure, output_df
|
import io
import gzip
import kvenjoy.graph
from kvenjoy.io import *
class Variable:
"""Representa a variable.
A variable has a X/Y coordinates and a name.
"""
def __init__(self, x, y, name):
self.x = x
self.y = y
self.name = name
@classmethod
def load(cls, stm):
"""Construct a variable from given input stream.
Arguments:
stm -- The input stream.
return -- The constructed Variable object.
"""
(x, y) = read_float(stm, 2)
(name,) = read_utf_string(stm)
return cls(x, y, name)
def save(self, stm):
"""Write a variable to given output stream.
Arguments:
stm -- The output stream.
"""
write_float(stm, self.x, self.y)
write_utf_string(stm, self.name)
class Gap:
"""Represent a GAP file.
A GAP file is a collection of stroke groups and variables.
"""
def __init__(self, version, uuid, name, author, description, variables, stroke_groups):
self.version = version
self.uuid = uuid
self.name = name
self.author = author
self.description = description
self.variables = variables
self.stroke_groups = stroke_groups
def bounding_box(self):
"""Calculate bounding box that could just hold this gap
Arguments:
return -- (x0, y0, x1, y1) Left top and right bottom coordinates of the bounding box.
"""
(x0, y0, x1, y1) = (None, None, None, None)
for v in self.variables:
if x0 is None or x0 > v.x:
x0 = v.x
if y0 is None or y0 > v.y:
y0 = v.y
if x1 is None or x1 < v.x:
x1 = v.x
if y1 is None or y1 < v.y:
y1 = v.y
for sg in self.stroke_groups:
for s in sg:
for p in s.points:
(px0, py0, px1, py1) = p.bounding_box()
if x0 is None or x0 > px0:
x0 = px0
if y0 is None or y0 > py0:
y0 = py0
if x1 is None or x1 < px1:
x1 = px1
if y1 is None or y1 < py1:
y1 = py1
return (x0, y0, x1, y1)
@classmethod
def load(cls, stm):
"""Construct a GAP object from given input stream.
Arguments:
stm -- The input stream.
return -- GAP object constructed from input stream.
"""
bs = gzip.decompress(stm.read())
zstm = io.BytesIO(bs)
(version,) = read_int(zstm)
(uuid, name, author, description) = read_utf_string(zstm, 4)
(num_vars,) = read_int(zstm)
variables = [Variable.load(zstm) for i in range(num_vars)]
(num_groups,) = read_int(zstm)
stroke_groups = [kvenjoy.graph.Stroke.load_list(zstm) for i in range(num_groups)]
return cls(version, uuid, name, author, description, variables, stroke_groups)
def save(self, stm):
"""Write a GAP object to given output stream.
Arguments:
stm -- The output stream.
"""
zstm = io.BytesIO()
write_int(zstm, self.version)
write_utf_string(zstm, self.uuid, self.name, self.author, self.description)
write_int(zstm, len(self.variables))
for v in self.variables:
v.save(zstm)
write_int(zstm, len(self.stroke_groups))
for g in self.stroke_groups:
kvenjoy.graph.Stroke.save_list(zstm, g)
stm.write(gzip.compress(zstm.getvalue()))
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
from finished_files.survey_data_dictionary import DATA_DICTIONARY
# Load data
# We want to take the names list from our data dictionary
names = [x.name for x in DATA_DICTIONARY]
# Generate the list of names to import
usecols = [x.name for x in DATA_DICTIONARY if x.usecol]
# dtypes should be a dict of 'col_name' : dtype
dtypes = {x.name : x.dtype for x in DATA_DICTIONARY if x.dtype}
# same for converters
converters = {x.name : x.converter for x in DATA_DICTIONARY if x.converter}
df = pd.read_csv('data/survey.csv',
header=0,
names=names,
dtype=dtypes,
converters=converters,
usecols=usecols)
#%% Clean up data: remove disqualified users
# In the survey, any user who selected they don't use Python was then
# disqualified from the rest of the survey. So let's drop them here.
df = df[df['python_main'] != 'No, I don’t use Python for my current projects']
# Considering we now only have two categories left:
# - Yes
# - No, I use Python for secondary projects only
# Let's turn it into a bool
df['python_main'] = df['python_main'] == 'Yes'
#%% Plot the web dev / data scientist ratio
# In the survey, respondents were asked to estimate the ratio between
# the amount of web developers vs the amount of data scientists. Afterwards
# they were asked what they thought the most popular answer would be.
# Let's see if there's a difference!
# This is a categorical data point, and it's already ordered in the data
# dictionary. So we shouldn't sort it after counting the values.
ratio_self = df['webdev_science_ratio_self'].value_counts(sort=False)
ratio_others = df['webdev_science_ratio_others'].value_counts(sort=False)
# Let's draw a bar chart comparing the distributions
fig = plt.figure()
ax = fig.add_subplot(111)
RATIO_COUNT = ratio_self.count()
x = np.arange(RATIO_COUNT)
WIDTH = 0.4
self_bars = ax.bar(x-WIDTH, ratio_self, width=WIDTH, color='b', align='center')
others_bars = ax.bar(x, ratio_others, width=WIDTH, color='g', align='center')
ax.set_xlabel('Ratios')
ax.set_ylabel('Observations')
labels = [str(lbl) for lbl in ratio_self.index]
ax.set_xticks(x - 0.5 * WIDTH)
ax.set_xticklabels(labels)
ax.legend((self_bars[0], others_bars[0]),
('Self', 'Most popular'))
plt.show()
#%% Calculate the predicted totals
# Let's recode the ratios to numbers, and calculate the means
CONVERSION = {
'10:1': 10,
'5:1' : 5,
'2:1' : 2,
'1:1' : 1,
'1:2' : 0.5,
'1:5' : 0.2,
'1:10': 0.1
}
self_numeric = df['webdev_science_ratio_self'] \
.replace(CONVERSION.keys(), CONVERSION.values())
others_numeric = df['webdev_science_ratio_others'] \
.replace(CONVERSION.keys(), CONVERSION.values())
print(f'Self:\t\t{self_numeric.mean().round(2)} web devs / scientist')
print(f'Others:\t\t{others_numeric.mean().round(2)} web devs / scientist')
#%% Is the difference statistically significant?
result = scipy.stats.chisquare(ratio_self, ratio_others)
# The null hypothesis is that they're the same. Let's see if we can reject it
print(result) |
#coding: utf-8
import json
from m3_ext.ui.app_ui import (
DesktopModel, DesktopLoader, BaseDesktopElement, MenuSeparator
)
from m3.actions import ControllerCache, ActionPack
class DesktopProcessor(object):
@classmethod
def filter_factory(cls, request, place):
u"""
Возвращает функцию фильтрации элементов рабочего стола:
f(DesktopShortcut) -> bool.
"""
def filter_by_permissions(elem):
"""
Возвращает True, если у ползователя есть права на пак элемента.
Работет только с DesktopShortcut'ами - у них есть атрибут pack,
остальные же элементы - отображаются всегда
:param elem: элемент рабочего стола
:type elem: DesktopShortcut
:return: has_perm - наличие права доступа к элементу
:rtype: bool
"""
pack = getattr(elem, 'pack', None)
if pack is None or isinstance(pack, ActionPack):
return True
else:
return pack.has_perm(request)
return filter_by_permissions
@classmethod
def _dump_element(cls, obj):
"""
Возвращает UI-элемент в сериализованном виде
:param obj: элемент рабочего стола
:type obj: MenuSeparator / BaseDesktopElement
"""
if isinstance(obj, MenuSeparator):
result = '-'
elif isinstance(obj, BaseDesktopElement):
result = {
'text': obj.name,
'icon': obj.icon,
'extra': {}
}
if obj.has_subitems:
result['items'] = list(obj.subitems)
else:
result['url'] = obj.url
context = getattr(obj, 'context', {})
if context:
result['context'] = context
else:
result['context'] = {}
else:
raise TypeError("%r is not JSON-able!" % obj)
return result
@classmethod
def _get_desktop(cls, request):
"""
Формирует элементы Рабочего Стола
:param request: request
:type request: Request
"""
desktop_model = DesktopModel(request, cls.filter_factory)
ControllerCache.populate()
DesktopLoader._success = False
if hasattr(request, 'user'):
DesktopLoader.populate(request.user, desktop=desktop_model)
else:
DesktopLoader.populate_desktop(desktop=desktop_model)
return {
'desktopItems': list(desktop_model.desktop.subitems),
'menuItems': list(desktop_model.start_menu.subitems),
'topToolbarItems': list(desktop_model.toptoolbar.subitems),
'toolboxItems': list(desktop_model.toolbox.subitems),
}
@classmethod
def process(cls, request):
"""
Добавляет в контекст элементы Рабочего Стола
:param request: request
:type request: Request
"""
desktop = cls._get_desktop(request)
return {
'desktop': json.dumps(
desktop, indent=2, default=cls._dump_element,
ensure_ascii=False),
# TODO: переделать на JS!
'desktop_icons': [
(idx, i.name, i.icon)
for (idx, i) in enumerate(desktop['desktopItems'], 1)
]
}
|
from .character import (
CharacterBlackSpriteMerger,
CharacterLimeSpriteMerger,
CharacterMagentaSpriteMerger,
CharacterOliveSpriteMerger,
CharacterOrangeSpriteMerger,
CharacterPinkSpriteMerger,
CharacterRedSpriteMerger,
CharacterVioletSpriteMerger,
CharacterWhiteSpriteMerger,
CharacterYellowSpriteMerger,
CharacterBlueSpriteMerger,
CharacterCeruleanSpriteMerger,
CharacterCinnabarSpriteMerger,
CharacterCyanSpriteMerger,
CharacterGoldSpriteMerger,
CharacterGraySpriteMerger,
CharacterGreenSpriteMerger,
CharacterIrisSpriteMerger,
CharacterKhakiSpriteMerger,
CharacterLemonSpriteMerger,
CharacterEggChildSpriteMerger,
CharacterHiredHandSpriteMerger,
)
from .mounts import (
TurkeySpriteMerger,
RockdogSpriteMerger,
AxolotlSpriteMerger,
QilinSpriteMerger,
)
from .pets import MontySpriteMerger, PercySpriteMerger, PoochiSpriteMerger
__all__ = [
"CharacterBlackSpriteMerger",
"CharacterLimeSpriteMerger",
"CharacterMagentaSpriteMerger",
"CharacterOliveSpriteMerger",
"CharacterOrangeSpriteMerger",
"CharacterPinkSpriteMerger",
"CharacterRedSpriteMerger",
"CharacterVioletSpriteMerger",
"CharacterWhiteSpriteMerger",
"CharacterYellowSpriteMerger",
"CharacterBlueSpriteMerger",
"CharacterCeruleanSpriteMerger",
"CharacterCinnabarSpriteMerger",
"CharacterCyanSpriteMerger",
"CharacterGoldSpriteMerger",
"CharacterGraySpriteMerger",
"CharacterGreenSpriteMerger",
"CharacterIrisSpriteMerger",
"CharacterKhakiSpriteMerger",
"CharacterLemonSpriteMerger",
"CharacterEggChildSpriteMerger",
"CharacterHiredHandSpriteMerger",
"TurkeySpriteMerger",
"RockdogSpriteMerger",
"AxolotlSpriteMerger",
"QilinSpriteMerger",
"MontySpriteMerger",
"PercySpriteMerger",
"PoochiSpriteMerger",
]
|
"""
Given an m x n grid of characters board and a string word, return true if word exists in the grid.
The word can be constructed from letters of sequentially adjacent cells,
where adjacent cells are horizontally or vertically neighboring.
The same letter cell may not be used more than once.
Example 1:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCCED"
Output: true
Example 2:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "SEE"
Output: true
Example 3:
Input: board = [["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]], word = "ABCB"
Output: false
Constraints:
m == board.length
n = board[i].length
1 <= m, n <= 6
1 <= word.length <= 15
board and word consists of only lowercase and uppercase English letters.
"""
# V0
# IDEA : DFS + backtracking
class Solution(object):
def exist(self, board, word):
visited = [[False for j in range(len(board[0]))] for i in range(len(board))]
for i in range(len(board)):
for j in range(len(board[0])):
if self.existRecu(board, word, 0, i, j, visited):
return True
return False
def existRecu(self, board, word, cur, i, j, visited):
# if "not false" till cur == len(word), means we already found the wprd in board
if cur == len(word):
return True
# note this condition
if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]) or visited[i][j] or board[i][j] != word[cur]:
return False
# mark as visited
visited[i][j] = True
### NOTE THIS TRICK (run the existRecu on 4 directions on the same time)
result = self.existRecu(board, word, cur + 1, i + 1, j, visited) or\
self.existRecu(board, word, cur + 1, i - 1, j, visited) or\
self.existRecu(board, word, cur + 1, i, j + 1, visited) or\
self.existRecu(board, word, cur + 1, i, j - 1, visited)
# mark as non-visited
visited[i][j] = False
return result
# V0'
# IDEA : DFS
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
for y in range(len(board)):
for x in range(len(board[0])):
if self.dfs(board, word, x, y, 0):
return True
return False
def dfs(self, board, word, x, y, i):
if i == len(word):
return True
if x < 0 or x >= len(board[0]) or y < 0 or y >= len(board):
return False
if board[y][x] != word[i]:
return False
board[y][x] = board[y][x].swapcase() # to mark if the route already been passed (.swapcase(), e.g. A->a)
isExit = self.dfs(board, word, x + 1, y, i + 1) or self.dfs(board, word, x, y + 1, i + 1) or self.dfs(board, word, x - 1, y, i + 1) or self.dfs(board, word, x, y - 1, i + 1)
board[y][x] = board[y][x].swapcase() # if already visited all possible route within the route collection, then roll back the maked route (.swapcase(), e.g. a->A), and run the other visit again
return isExit
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/79386066
# IDEA : BACKTRACKING
# DEMO :
# In [39]: 'ACV'.swapcase()
# Out[39]: 'acv'
# In [41]: 'wfwwrgergewCEVER'.swapcase()
# Out[41]: 'WFWWRGERGEWcever'
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
for y in range(len(board)):
for x in range(len(board[0])):
if self.exit(board, word, x, y, 0):
return True
return False
def exit(self, board, word, x, y, i):
if i == len(word):
return True
if x < 0 or x >= len(board[0]) or y < 0 or y >= len(board):
return False
if board[y][x] != word[i]:
return False
board[y][x] = board[y][x].swapcase() # to mark if the route already been passed (.swapcase(), e.g. A->a)
isexit = self.exit(board, word, x + 1, y, i + 1) or self.exit(board, word, x, y + 1, i + 1) or self.exit(board, word, x - 1, y, i + 1) or self.exit(board, word, x, y - 1, i + 1)
board[y][x] = board[y][x].swapcase() # if already visited all possible route within the route collection, then roll back the maked route (.swapcase(), e.g. a->A), and run the other visit again
return isexit
# V1'
# https://www.cnblogs.com/zuoyuan/p/3769767.html
# IDEA : DFS
class Solution:
# @param board, a list of lists of 1 length string
# @param word, a string
# @return a boolean
def exist(self, board, word):
def dfs(x, y, word):
if len(word)==0: return True
#left
if x>0 and board[x-1][y]==word[0]:
tmp=board[x][y]; board[x][y]='#'
if dfs(x-1,y,word[1:]):
return True
board[x][y]=tmp
#right
if x<len(board)-1 and board[x+1][y]==word[0]:
tmp=board[x][y]; board[x][y]='#'
if dfs(x+1,y,word[1:]):
return True
board[x][y]=tmp
#down
if y>0 and board[x][y-1]==word[0]:
tmp=board[x][y]; board[x][y]='#'
if dfs(x,y-1,word[1:]):
return True
board[x][y]=tmp
#up
if y<len(board[0])-1 and board[x][y+1]==word[0]:
tmp=board[x][y]; board[x][y]='#'
if dfs(x,y+1,word[1:]):
return True
board[x][y]=tmp
return False
for i in range(len(board)):
for j in range(len(board[0])):
if board[i][j]==word[0]:
if(dfs(i,j,word[1:])):
return True
return False
# V1''
# https://www.jiuzhang.com/solution/word-search/#tag-highlight-lang-python
class Solution:
# @param board, a list of lists of 1 length string
# @param word, a string
# @return a boolean
def exist(self, board, word):
# write your code here
# Boundary Condition
if word == []:
return True
m = len(board)
if m == 0:
return False
n = len(board[0])
if n == 0:
return False
# Visited Matrix
visited = [[False for j in range(n)] for i in range(m)]
# DFS
for i in range(m):
for j in range(n):
if self.exist2(board, word, visited, i, j):
return True
return False
def exist2(self, board, word, visited, row, col):
if word == '':
return True
m, n = len(board), len(board[0])
if row < 0 or row >= m or col < 0 or col >= n:
return False
if board[row][col] == word[0] and not visited[row][col]:
visited[row][col] = True
# row - 1, col
if self.exist2(board, word[1:], visited, row - 1, col) or self.exist2(board, word[1:], visited, row, col - 1) or self.exist2(board, word[1:], visited, row + 1, col) or self.exist2(board, word[1:], visited, row, col + 1):
return True
else:
visited[row][col] = False
return False
# V2
# Time: O(m * n * l)
# Space: O(l)
class Solution(object):
# @param board, a list of lists of 1 length string
# @param word, a string
# @return a boolean
def exist(self, board, word):
visited = [[False for j in range(len(board[0]))] for i in range(len(board))]
for i in range(len(board)):
for j in range(len(board[0])):
if self.existRecu(board, word, 0, i, j, visited):
return True
return False
def existRecu(self, board, word, cur, i, j, visited):
if cur == len(word):
return True
if i < 0 or i >= len(board) or j < 0 or j >= len(board[0]) or visited[i][j] or board[i][j] != word[cur]:
return False
visited[i][j] = True
result = self.existRecu(board, word, cur + 1, i + 1, j, visited) or\
self.existRecu(board, word, cur + 1, i - 1, j, visited) or\
self.existRecu(board, word, cur + 1, i, j + 1, visited) or\
self.existRecu(board, word, cur + 1, i, j - 1, visited)
visited[i][j] = False
return result
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
requirements = []
with open("Requirements.txt", "r", encoding="utf-8") as fh:
for line in fh.readlines():
requirements.append(line[:-1])
setuptools.setup(
name="desktop-organiser-tht",
version="0.0.1",
author="Tauseef Hilal Tantary",
author_email="tantary.tauseef@gmail.com",
description="Desktop Organiser",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Tauseef-Hilal/DesktopOrganiser",
license="MIT",
project_urls={
"Bug Tracker": "https://github.com/Tauseef-Hilal/DesktopOrganiser/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=["organiser"],
include_package_data=True,
install_requires=requirements,
entry_points={"console_scripts": ["desktop=organiser.__main__:main"]},
)
|
""" Bayes Theorem Rule """
def bayes(x,nx,y,y_x,y_nx):
return (x_y*x)/((y_nx*nx)+(y_x*x))
""" Finding P(A|B) with Bayes Theorem """
#Basic Probabilities
A = 0
notA = 1-A
B=0
notB = 1-B
#Conditional probabilities
B_A = 0
notB_A = 1 -B_A
B_notA = 0
notB_notA =1 - B_notA
#A_B = (B_A*A)/((B_notA*notA)+(B_A*A))
def bayes(x,nx,y,y_x,y_nx):
return (x_y*x)/((y_nx*nx)+(y_x*x))
#ex
|
###############################################################################
#
# uniqueMarkers.py - find a set of markers that are descriptive for a taxonomy
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import argparse
import sqlite3
import re
from collections import defaultdict
def parseTaxonomy(taxonomy):
tax = re.compile('[;,:]?\s?[kpcofgs]__|[;,:]')
return tax.split(taxonomy)[1:]
def getTaxonId(cursor, *args):
ranks = ['Domain', 'Phylum', 'Class', '"Order"', 'Family', 'Genus', 'Species']
query = []
for rank, value in zip(ranks, args):
query.append(' %s = \'%s\' ' % (rank, value))
query.append(' %s IS NULL' % ranks[len(args)])
query_string = 'AND'.join(query)
result = cursor.execute('SELECT Id, "Count" FROM taxons WHERE %s' % query_string)
return result.fetchall()
def getOppositeRankSpecificTaxonId(cursor, *args):
''' Get all other taxon lineages at the same level as the requested taxon
'''
ranks = ['Domain', 'Phylum', 'Class', '"Order"', 'Family', 'Genus', 'Species']
query = []
for rank, value in zip(ranks, args[:-1]):
query.append(' %s = \'%s\' ' % (rank, value))
query.append(' %s != \'%s\' ' % (ranks[len(args) - 1], args[-1]))
query.append(' %s IS NULL' % ranks[len(args)])
query_string = 'AND'.join(query)
print query_string
result = cursor.execute('SELECT Id, "Count" FROM taxons WHERE %s' % query_string)
return result.fetchall()
def getOppositeRankInspecificTaxonId(cursor, *args):
''' Get all other taxon lineages at the same level as the requested taxon
'''
ranks = ['Domain', 'Phylum', 'Class', '"Order"', 'Family', 'Genus', 'Species']
query = []
for rank, value in zip(ranks, args):
query.append(' %s != \'%s\' ' % (rank, value))
# query.append(' %s IS NULL' % ranks[len(args)])
query_string = query[-1]
result = cursor.execute('SELECT Id, "Count" FROM taxons WHERE %s' % query_string)
return result.fetchall()
def getMarkersFromTaxon(cursor, taxid):
result = cursor.execute('''SELECT Marker, "Count" FROM marker_mapping WHERE Taxon = ?''', (taxid,))
return result.fetchall()
def getMarkersNotInTaxon(cursor, taxid):
result = cursor.execute('''SELECT Marker, "Count" FROM marker_mapping WHERE Taxon != ?''', (taxid,))
return result.fetchall()
def countAllGenomes(cursor):
result = cursor.execute('''SELECT Id, "Count" FROM taxons''')
return result.fetchall()
def countGenomesInTaxon(cursor, taxId):
result = cursor.execute('''SELECT "Count" FROM taxons WHERE Id = ?''', (taxId,))
return result.fetchone()[0]
def getDescriptiveMarkers(cursor, markers):
result = cursor.execute('''SELECT Acc, Name FROM markers WHERE Id = ?''', markers)
return result.fetchone()
def doWork(args):
taxon_ranks = parseTaxonomy(args.taxonomy)
con = sqlite3.connect(args.database)
cur = con.cursor()
taxon_ids = getTaxonId(cur, *taxon_ranks)
if len(taxon_ids) > 1:
raise RuntimeError("Taxon string returns more than one lineage "\
"please be more specific")
else:
tax_id, tax_count = taxon_ids[0]
all_markers = getMarkersFromTaxon(cur, tax_id)
marker_in_taxon_mapping = {}
for (Id, count) in all_markers:
if float(count) / float(tax_count) >= args.include:
marker_in_taxon_mapping[Id] = float(count) / float(tax_count)
opposite_taxons = getOppositeRankInspecificTaxonId(cur, *taxon_ranks)
markers_from_others = defaultdict(int)
others_total_count = 0
for (Id, count) in opposite_taxons:
others_total_count += count
this_taxon_count = getMarkersFromTaxon(cur, Id)
for (Id, count) in this_taxon_count:
markers_from_others[Id] += count
descriptive_markers = []
for marker_id, _ in marker_in_taxon_mapping.items():
if marker_id in markers_from_others:
fraction_in_others = float(markers_from_others[marker_id]) / float(others_total_count)
if fraction_in_others <= args.exclude:
descriptive_markers.append((marker_id,))
else:
# not found in anything else
descriptive_markers.append((marker_id,))
des_markers = []
for i in descriptive_markers:
des_markers.append(getDescriptiveMarkers(cur, i))
for des_acc, des_name in des_markers:
print des_acc, des_name
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--taxon-string',
default='k__Bacteria;p__Proteobacteria',
dest='taxonomy', help="specify the taxonomy")
parser.add_argument('-i', '--inclusive-percent', dest='include',
default=0.95, help="The required percentage of member of the "\
"specified taxonomy must have a particular marker")
parser.add_argument('-e', '--exclusive-percent', dest='exclude',
default=0.05, help="The maximum prevelence of the marker in "\
"all other non-target taxons")
parser.add_argument('-d', '--database', dest='database',
default='markers.db', help='specify path to database')
# parse the arguments
args = parser.parse_args()
# do what we came here to do
doWork(args)
|
import os
# Discord Bot token here
token = os.environ.get('GLOSSY_DISCORD_TOKEN')
# Prefix for commands
prefix = '!'
# MongoDB address including any credentials
db_address = os.environ.get('GLOSSY_DB_ADDRESS')
# Name of the MongoDB database
db_name = 'ESOBot'
|
#!/usr/bin/env python
# sp800_22_serial_test.py
#
# Copyright (C) 2017 David Johnston
# This program is distributed under the terms of the GNU General Public License.
#
# This file is part of sp800_22_tests.
#
# sp800_22_tests is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# sp800_22_tests is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with sp800_22_tests. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import math
#from scipy.special import gamma, gammainc, gammaincc
from gamma_functions import *
def int2patt(n,m):
pattern = list()
for i in range(m):
pattern.append((n >> i) & 1)
return pattern
def countpattern(patt,bits,n):
thecount = 0
for i in range(n):
match = True
for j in range(len(patt)):
if patt[j] != bits[i+j]:
match = False
if match:
thecount += 1
return thecount
def psi_sq_mv1(m, n, padded_bits):
counts = [0 for i in range(2**m)]
for i in range(2**m):
pattern = int2patt(i,m)
count = countpattern(pattern,padded_bits,n)
counts.append(count)
psi_sq_m = 0.0
for count in counts:
psi_sq_m += (count**2)
psi_sq_m = psi_sq_m * (2**m)/n
psi_sq_m -= n
return psi_sq_m
def serial_test(bits,patternlen=None):
n = len(bits)
if patternlen != None:
m = patternlen
else:
m = int(math.floor(math.log(n,2)))-2
if m < 4:
print("Error. Not enough data for m to be 4")
return False,0,None
m = 4
# Step 1
padded_bits=bits+bits[0:m-1]
# Step 2
psi_sq_m = psi_sq_mv1(m, n, padded_bits)
psi_sq_mm1 = psi_sq_mv1(m-1, n, padded_bits)
psi_sq_mm2 = psi_sq_mv1(m-2, n, padded_bits)
delta1 = psi_sq_m - psi_sq_mm1
delta2 = psi_sq_m - (2*psi_sq_mm1) + psi_sq_mm2
P1 = gammaincc(2**(m-2),delta1/2.0)
P2 = gammaincc(2**(m-3),delta2/2.0)
print(" psi_sq_m = ",psi_sq_m)
print(" psi_sq_mm1 = ",psi_sq_mm1)
print(" psi_sq_mm2 = ",psi_sq_mm2)
print(" delta1 = ",delta1)
print(" delta2 = ",delta2)
print(" P1 = ",P1)
print(" P2 = ",P2)
success = (P1 >= 0.01) and (P2 >= 0.01)
return (success, None, [P1,P2])
if __name__ == "__main__":
bits = [0,0,1,1,0,1,1,1,0,1]
success, _, plist = serial_test(bits, patternlen=3)
print("success =",success)
print("plist = ",plist)
|
"""Commands to interact with Yara rules stored in Yeti."""
import os
import click
from yeti_python_api.api import YetiAPI
from cli.config import config
@click.command()
@click.option('--recurse', help='Recurse in directory', is_flag=True, default=False) # pylint: disable=line-too-long
@click.option('--verbose', help='Display match details', is_flag=True, default=False) # pylint: disable=line-too-long
@click.option('--name_filter', help='Filter indicators by name', type=click.STRING, default='') # pylint: disable=line-too-long
@click.argument('path', type=click.STRING)
def yara_scan(path, name_filter, verbose, recurse):
"""Scan a local file or directory using Yara rules from the Yeti server."""
if not os.path.exists(path):
print('Error: {0:s} was not found'.format(path))
exit(-1)
api = YetiAPI(config.api_base, config.api_key)
yara_rules = api.filter_indicators(name_filter, 'x-yara')
paths = [path]
max_path_len = 0
if recurse:
print('Recursing on directory {0:s}'.format(path))
paths = []
for root, _, files in os.walk(path):
for filename in files:
full_path = os.path.join(root, filename)
paths.append(full_path)
if len(full_path) > max_path_len:
max_path_len = len(full_path)
results = []
max_ruleid_len = 0
for rule in yara_rules:
for filename in paths:
matches = rule.compiled_pattern.match(filename)
if matches:
if len(rule.name) > max_ruleid_len:
max_ruleid_len = len(rule.name + rule.id)
results.append((filename, rule, matches))
if not results:
print('No matches found')
exit()
print('Found {0:d} matches!'.format(len(results)))
print('{0:s} {1:s} {2:s}'.format(
'Filename'.ljust(max_path_len),
'ID'.ljust(max_ruleid_len + 3),
'Details' if verbose else ''))
print('{0:s} {1:s} {2:s}'.format(
'='.ljust(max_path_len, '='),
'='.ljust(max_ruleid_len + 3, '='),
'='*10 if verbose else ''))
for filename, rule, result_list in results:
for result in result_list:
print('{0:s} {1:s} {2!s}'.format(
filename.ljust(max_path_len),
'{} ({})'.format(rule.name, rule.id).ljust(max_ruleid_len),
result.strings if verbose else ''
))
@click.command()
@click.option('--name_filter', help='Filter indicators by name', type=click.STRING, default='') # pylint: disable=line-too-long
@click.argument('path', type=click.STRING)
def dump_yara_rules(path, name_filter):
"""Dump existing Yara rules to files in a local directory."""
if not os.path.exists(path):
print('Error: {0:s} was not found'.format(path))
exit(-1)
api = YetiAPI(config.api_base, config.api_key)
yara_rules = api.filter_indicators(name_filter, 'x-yara')
choice = input('About to dump {0:d} Yara rules to "{1:s}"\n'
'Continue? [Y/n] '.format(len(yara_rules), path))
if not choice.lower() in ['y', 'yes', '']:
exit()
for rule in yara_rules:
filename = (rule.name + '.yara').lower()
with open(filename, 'w') as output:
output.write(rule.pattern)
print('[+] Wrote rule "{0:s}" to {1:s}'.format(rule.name, filename))
|
# Write a program to sort a stack in ascending order. You should not make any assumptions about how the stack is implemented. The following are the only functions that should be used to write this program: push | pop | peek | isEmpty.
class Stack(list):
def push(self, val):
self.append(val)
# pop == list.pop
def isEmpty(self):
return not self
def peek(self):
return self[-1]
def sortStack(stack):
pass
stack = Stack()
import random
for i in xrange(100):
stack.push(random.randint(1, 100))
sortStack(stack)
while not stack.isEmpty():
print stack.pop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.