content stringlengths 5 1.05M |
|---|
import pathlib
from importlib.machinery import SourceFileLoader
import setuptools
version = SourceFileLoader('version', 'darq/version.py').load_module()
def readfile(filename: str) -> str:
return pathlib.Path(filename).read_text('utf-8').strip()
long_description = '\n\n'.join((
readfile('README.rst'),
readfile('CHANGES.rst'),
))
setuptools.setup(
name='darq',
version=str(version.VERSION),
author='Igor Mozharovsky',
author_email='igor.mozharovsky@gmail.com',
description='Async task manager with Celery-like features. Fork of arq.',
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/seedofjoy/darq',
packages=['darq'],
package_data={'darq': ['py.typed']},
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Typing :: Typed',
],
entry_points={
'console_scripts': ['darq = darq.cli:cli'],
},
install_requires=[
'async-timeout>=3.0.0',
'aioredis>=1.1.0',
'click>=6.7',
'pydantic>=0.20',
'dataclasses>=0.6; python_version=="3.6"',
'typing_extensions>=3.7.4; python_version<"3.8"',
],
extras_require={
'watch': ['watchgod>=0.4'],
},
python_requires='>=3.6',
license='MIT',
)
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : __init__.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 01/27/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
from .batchnorm import *
|
"""
GeoReport v2 Server
-------------------
Open311 GeoReport v2 Server implementation written in Flask.
:copyright: (c) Miami-Dade County 2011
:author: Julian Bonilla (@julianbonilla)
:license: Apache License v2.0, see LICENSE for more details.
"""
from data import service_types, service_definitions, service_discovery, srs
from flask import Flask, render_template, request, abort, json, jsonify, make_response, redirect, url_for
from database import db, serviceRequest, Admin, Users
from forms import RequestForm, SignupForm, SignInForm, SearchForm, updateForm
import random
# Configuration
DEBUG = True
ORGANIZATION = 'Schenectady'
JURISDICTION = 'cityoschenectady.com'
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('GEOREPORT_SETTINGS', silent=True)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://127.0.0.1/schenectadyOpen311'
app.config['SQLALCHEMY_PGUSER'] = 'tehhdaryy'
db.init_app(app)
app.secret_key = "development-key"
@app.route('/')
def index():
return render_template('initial.html', org=app.config['ORGANIZATION'],
jurisdiction=app.config['JURISDICTION'])
@app.route('/discovery.<format>')
def discovery(format):
"""Service discovery mechanism required for Open311 APIs."""
if format == 'json':
return jsonify(service_discovery)
elif format == 'xml':
response = make_response(render_template('discovery.xml', discovery=service_discovery))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
else:
abort(404)
@app.route('/services.<format>')
def service_list(format):
"""Provide a list of acceptable 311 service request types and their
associated service codes. These request types can be unique to the
city/jurisdiction.
"""
if format == 'json':
response = make_response(json.dumps(service_types))
response.headers['Content-Type'] = 'application/json; charset=utf-8'
return response
elif format == 'xml':
response = make_response(render_template('services.xml', services=service_types))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
else:
abort(404)
@app.route('/services/<service_code>.<format>')
def service_definition(service_code, format):
"""Define attributes associated with a service code.
These attributes can be unique to the city/jurisdiction.
"""
if service_code not in service_definitions:
abort(404)
if format == 'json':
return jsonify(service_definitions[service_code])
elif format == 'xml':
response = make_response(render_template('definition.xml',
definition=service_definitions[service_code]))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
else:
abort(404)
@app.route('/requests', methods=['GET', 'POST'])
def requests():
##Create and Submit Requests
form = RequestForm()
if request.method == 'POST':
## If the form is not valid, then load the request template.
if form.validate() == False:
return render_template('request.html', form=form)
else:
##get data from the form, hardcoded some fill-ins.
newRequest = serviceRequest(form.address.data, form.zipcode.data, form.service_code.data,
form.service_name.data, random.randint(100, 150), form.description.data, "Open", "N/A",
form.request_date.data, "03-07-17", "03-09-17", "Pothole Fillers Inc.")
db.session.add(newRequest) ##Add it to the database
db.session.commit()
return "Request Submitted!" ##Change to suitable page.
elif request.method == 'GET':
return render_template('request.html', form=form)
@app.route("/usersignup", methods=['GET', 'POST'])
def usersignup():
form = SignupForm()
if request.method == 'POST':
if form.validate() == False:
return render_template('usersignup.html', form=form)
else:
newuser = Users(form.first_name.data, form.last_name.data, form.email.data, form.password.data)
db.session.add(newuser)
db.session.commit()
return "User SignUp Success!" ##Change to more suitable page
elif request.method == 'GET':
return render_template('usersignup.html', form=form)
@app.route("/adminsignup", methods=['GET', 'POST'])
def adminsignup():
form = SignupForm()
if request.method == 'POST':
if form.validate() == False:
return render_template('adminsignup.html', form=form)
else:
newadmin = Admin(form.first_name.data, form.last_name.data, form.email.data, form.password.data)
db.session.add(newadmin)
db.session.commit()
return "Admin SignUp Success!" ##Change to more suitable page
elif request.method == 'GET':
return render_template('adminsignup.html', form=form)
@app.route("/usersignin", methods=["GET", "POST"])
def usersignin():
form = SignInForm()
if request.method == "POST":
if form.validate() == False:
return render_template("usersignin.html", form=form)
else:
email = form.email.data
password = form.password.data
user = Users.query.filter_by(email=email).first()
if user is not None and user.check_password(password):
return "User Login Success!"
else:
return redirect(url_for('usersignin'))
elif request.method == "GET":
return render_template('usersignin.html', form=form)
@app.route("/adminsignin", methods=["GET", "POST"])
def adminsignin():
form = SignInForm()
if request.method == "POST":
if form.validate() == False:
return render_template("adminsignin.html", form=form)
else:
email = form.email.data
password = form.password.data
admin = Admin.query.filter_by(email=email).first()
if admin is not None and admin.check_password(password):
return "Admin Login Success!"
else:
return redirect(url_for('adminsignin'))
elif request.method == "GET":
return render_template('adminsignin.html', form=form)
@app.route('/requests/<service_request_id>.<format>')
def service_request(service_request_id, format):
"""Query the current status of an individual request."""
result = search(request.form)
if format == 'json':
return jsonify(srs[0])
elif format == 'xml':
response = make_response(render_template('service-requests.xml', service_requests=[srs[0]]))
response.headers['Content-Type'] = 'text/xml; charset=utf-8'
return response
else:
abort(404)
@app.route('/tokens/<token>.<format>')
def token(token, format):
"""Get a service request id from a temporary token. This is unnecessary
if the response from creating a service request does not contain a token.
"""
abort(404)
@app.route('/search', methods=["GET", "POST"])
def search():
form = SearchForm()
if request.method == "POST":
if form.validate() == False:
return render_template("requestSearch.html", form=form)
else:
request_ID = form.request_ID.data
service = serviceRequest.query.filter_by(service_request_id=request_ID).first()
if service is not None:
service_info = "Address: " + service.address + " " + service.zipcode + "\n" + "Service Code: " + service.service_code + "\n" + "Service Name: " + service.service_name + "\n" + "Service Description " + service.description + "\n" + "Status: " + service.status + "\n" + "Notes: " + service.status_notes + "\n" + "Date Requested: " + service.request_date + "\n" + "Date Updated: " + service.update_date + "\n" + "Expected Date of Completion: " + service.expected_date + "\n" + "Agency to Respond: " +service.agency_responsible
return service_info
else:
return redirect(url_for('search'))
elif request.method == "GET":
return render_template('requestSearch.html', form=form)
@app.route("/update", methods=["GET", "POST"])
def updateRequest():
form = updateForm()
if request.method == "POST":
if form.validate() == False:
return render_template("updateRequest.html", form=form)
else:
request_ID= form.request_ID.data
note = form.update_note.data
date = form.update_date.data
status = form.status.data
service = serviceRequest.query.filter_by(service_request_id=request_ID).first()
if service is not None and service.searchRequest(request_ID):
service.status_notes = note;
service.update_date = date;
service.update_status = status;
db.session.commit()
return "Update Complete!"
else:
return redirect(url_for('updateRequest'))
elif request.method == "GET":
return render_template('updateRequest.html', form=form)
if __name__ == '__main__':
app.run()
|
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import os
import unittest2
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as setting
reload(setting)
from base import BaseTest
from compass.db.api import database
from compass.db.api import switch
from compass.db.api import user as user_api
from compass.db import exception
from compass.utils import flags
from compass.utils import logsetting
class TestGetSwitch(BaseTest):
"""Test get switch."""
def setUp(self):
super(TestGetSwitch, self).setUp()
def tearDown(self):
super(TestGetSwitch, self).tearDown()
def test_get_switch(self):
get_switch = switch.get_switch(
1,
user=self.user_object,
)
self.assertIsNotNone(get_switch)
self.assertEqual(get_switch['ip'], '0.0.0.0')
class TestAddSwitch(BaseTest):
"""Test add switch."""
def setUp(self):
super(TestAddSwitch, self).setUp()
def tearDown(self):
super(TestAddSwitch, self).tearDown()
def test_add_switch(self):
add_switch = switch.add_switch(
ip='2887583784',
user=self.user_object,
)
expected = '172.29.8.40'
self.assertEqual(expected, add_switch['ip'])
def test_add_switch_position_args(self):
add_switch = switch.add_switch(
True,
'2887583784',
user=self.user_object,
)
expected = '172.29.8.40'
self.assertEqual(expected, add_switch['ip'])
def test_add_switch_session(self):
with database.session() as session:
add_switch = switch.add_switch(
ip='2887583784',
user=self.user_object,
session=session
)
expected = '172.29.8.40'
self.assertEqual(expected, add_switch['ip'])
class TestAddSwitches(BaseTest):
"""Test add switches."""
def setUp(self):
super(TestAddSwitches, self).setUp()
def tearDown(self):
super(TestAddSwitches, self).tearDown()
def test_add_switches(self):
data = [
{
'ip': '172.29.8.30',
'vendor': 'Huawei',
'credentials': {
"version": "2c",
"community": "public"
}
}, {
'ip': '172.29.8.40'
}, {
'ip': '172.29.8.40'
}
]
switches = switch.add_switches(
data=data,
user=self.user_object
)
ip = []
for item in switches['switches']:
ip.append(item['ip'])
fail_ip = []
for item in switches['fail_switches']:
fail_ip.append(item['ip'])
expected = ['172.29.8.30', '172.29.8.40']
expected_fail = ['172.29.8.40']
for expect in expected:
self.assertIn(expect, ip)
for expect_fail in expected_fail:
self.assertIn(expect_fail, fail_ip)
class TestListSwitches(BaseTest):
"""Test list switch."""
def setUp(self):
super(TestListSwitches, self).setUp()
def tearDown(self):
super(TestListSwitches, self).tearDown()
def test_list_switches_ip_int_invalid(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
list_switches = switch.list_switches(
ip_int='test',
user=self.user_object,
)
self.assertEqual(list_switches, [])
def test_list_switches_with_ip_int(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
list_switches = switch.list_switches(
ip_int='2887583784',
user=self.user_object,
)
expected = '172.29.8.40'
self.assertIsNotNone(list_switches)
self.assertEqual(expected, list_switches[0]['ip'])
def test_list_switches(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
list_switches = switch.list_switches(
user=self.user_object
)
expected = '172.29.8.40'
self.assertIsNotNone(list_switches)
self.assertEqual(expected, list_switches[0]['ip'])
class TestDelSwitch(BaseTest):
"""Test delete switch."""
def setUp(self):
super(TestDelSwitch, self).setUp()
def tearDown(self):
super(TestDelSwitch, self).tearDown()
def test_del_switch(self):
switch.del_switch(
1,
user=self.user_object,
)
del_switch = switch.list_switches(
user=self.user_object
)
self.assertEqual([], del_switch)
class TestUpdateSwitch(BaseTest):
"""Test update switch."""
def setUp(self):
super(TestUpdateSwitch, self).setUp()
def tearDown(self):
super(TestUpdateSwitch, self).tearDown()
def test_update_switch(self):
switch.update_switch(
1,
user=self.user_object,
vendor='test_update'
)
update_switch = switch.get_switch(
1,
user=self.user_object,
)
expected = 'test_update'
self.assertEqual(expected, update_switch['vendor'])
class TestPatchSwitch(BaseTest):
"""Test patch switch."""
def setUp(self):
super(TestPatchSwitch, self).setUp()
def tearDown(self):
super(TestPatchSwitch, self).tearDown()
def test_patch_switch(self):
switch.patch_switch(
1,
user=self.user_object,
credentials={
'version': '2c',
'community': 'public'
}
)
patch_switch = switch.get_switch(
1,
user=self.user_object,
)
expected = {
'credentials': {
'version': '2c',
'community': 'public'
}
}
self.assertTrue(
all(item in patch_switch.items() for item in expected.items())
)
class TestListSwitchFilters(BaseTest):
"""Test list switch filters."""
def setUp(self):
super(TestListSwitchFilters, self).setUp()
def tearDown(self):
super(TestListSwitchFilters, self).tearDown()
def test_list_switch_filters(self):
list_switch_filters = switch.list_switch_filters(
user=self.user_object
)
expected = {
'ip': '0.0.0.0',
'id': 1,
'filters': 'allow ports all',
}
self.assertIsNotNone(list_switch_filters)
self.assertTrue(
all(item in list_switch_filters[0].items()
for item in expected.items()))
class TestGetSwitchFilters(BaseTest):
"""Test get switch filter."""
def setUp(self):
super(TestGetSwitchFilters, self).setUp()
def tearDown(self):
super(TestGetSwitchFilters, self).tearDown()
def test_get_swtich_filters(self):
get_switch_filter = switch.get_switch_filters(
1,
user=self.user_object,
)
expected = {
'ip': '0.0.0.0',
'id': 1,
'filters': 'allow ports all',
}
self.assertIsNotNone(get_switch_filter)
self.assertTrue(
all(item in get_switch_filter.items()
for item in expected.items()))
class TestUpdateSwitchFilters(BaseTest):
"""Test update a switch filter."""
def setUp(self):
super(TestUpdateSwitchFilters, self).setUp()
def tearDown(self):
super(TestUpdateSwitchFilters, self).tearDown()
def test_update_switch_filters(self):
switch.update_switch_filters(
1,
user=self.user_object,
machine_filters=[
{
'filter_type': 'allow'
}
]
)
update_switch_filters = switch.get_switch_filters(
1,
user=self.user_object,
)
expected = {
'filters': 'allow'
}
self.assertTrue(
all(item in update_switch_filters.items()
for item in expected.items())
)
class TestPatchSwitchFilter(BaseTest):
"""Test patch a switch filter."""
def setUp(self):
super(TestPatchSwitchFilter, self).setUp()
def tearDown(self):
super(TestPatchSwitchFilter, self).tearDown()
def test_patch_switch_filter(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.patch_switch_filter(
2,
user=self.user_object,
machine_filters=[
{
'filter_type': 'allow'
}
]
)
patch_switch_filter = switch.get_switch_filters(
2,
user=self.user_object,
)
expected = {
'filters': 'allow'
}
self.assertTrue(
all(item in patch_switch_filter.items()
for item in expected.items())
)
class TestAddSwitchMachine(BaseTest):
"""Test add switch machine."""
def setUp(self):
super(TestAddSwitchMachine, self).setUp()
def tearDown(self):
super(TestAddSwitchMachine, self).tearDown()
def test_add_switch_machine(self):
add_switch_machine = switch.add_switch_machine(
1,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
expected = '28:6e:d4:46:c4:25'
self.assertEqual(expected, add_switch_machine['mac'])
def test_add_switch_machine_position_args(self):
add_switch_machine = switch.add_switch_machine(
1,
True,
'28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
expected = '28:6e:d4:46:c4:25'
self.assertEqual(expected, add_switch_machine['mac'])
def test_add_switch_machine_session(self):
with database.session() as session:
add_switch_machine = switch.add_switch_machine(
1,
mac='28:6e:d4:46:c4:25',
user=self.user_object,
session=session,
port='1'
)
expected = '28:6e:d4:46:c4:25'
self.assertEqual(expected, add_switch_machine['mac'])
class TestAddSwitchMachines(BaseTest):
"""Test add switch machines."""
def setUp(self):
super(TestAddSwitchMachines, self).setUp()
def tearDown(self):
super(TestAddSwitchMachines, self).tearDown()
def test_add_switch_machines(self):
data = [{
'switch_ip': '0.0.0.0',
'mac': '1a:2b:3c:4d:5e:6f',
'port': '100'
}, {
'switch_ip': '0.0.0.0',
'mac': 'a1:b2:c3:d4:e5:f6',
'port': '101'
}, {
'switch_ip': '0.0.0.0',
'mac': 'a1:b2:c3:d4:e5:f6',
'port': '103'
}, {
'switch_ip': '0.0.0.0',
'mac': 'a1:b2:c3:d4:e5:f6',
'port': '101'
}]
add_switch_machines = switch.add_switch_machines(
data=data, user=self.user_object
)
mac = []
failed_mac = []
for switch_machine in add_switch_machines['switches_machines']:
mac.append(switch_machine['mac'])
for failed_switch in add_switch_machines['fail_switches_machines']:
failed_mac.append(failed_switch['mac'])
expect = ['1a:2b:3c:4d:5e:6f', 'a1:b2:c3:d4:e5:f6']
expect_fail = ['a1:b2:c3:d4:e5:f6']
for item in expect:
self.assertIn(item, mac)
for item in expect_fail:
self.assertIn(item, failed_mac)
class TestListSwitchMachines(BaseTest):
"""Test get switch machines."""
def setUp(self):
super(TestListSwitchMachines, self).setUp()
def tearDown(self):
super(TestListSwitchMachines, self).tearDown()
def test_list_switch_machines(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.add_switch_machine(
2,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
list_switch_machines = switch.list_switch_machines(
2,
user=self.user_object,
)
expected = {
'switch_id': 2,
'id': 1,
'mac': '28:6e:d4:46:c4:25',
'switch_ip': '172.29.8.40',
'machine_id': 1,
'port': '1',
'switch_machine_id': 1
}
self.assertIsNotNone(list_switch_machines)
self.assertTrue(
all(item in list_switch_machines[0].items()
for item in expected.items()))
class TestListSwitchmachines(BaseTest):
"""Test list switch machines."""
def setUp(self):
super(TestListSwitchmachines, self).setUp()
def tearDown(self):
super(TestListSwitchmachines, self).tearDown()
def test_list_switch_machines_with_ip_int(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.add_switch_machine(
2,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
list_switch_machines = switch.list_switchmachines(
switch_ip_int='2887583784',
user=self.user_object,
)
expected = {'switch_ip': '172.29.8.40'}
self.assertTrue(
all(item in list_switch_machines[0].items()
for item in expected.items()))
def test_list_switch_machines_ip_invalid(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.add_switch_machine(
2,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
list_switch_machines = switch.list_switchmachines(
switch_ip_int='test',
user=self.user_object,
)
self.assertEqual(list_switch_machines, [])
def test_list_switch_machines_without_ip(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.add_switch_machine(
2,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
list_switch_machines = switch.list_switchmachines(
user=self.user_object
)
expected = {'switch_ip': '172.29.8.40'}
self.assertTrue(
all(item in list_switch_machines[0].items()
for item in expected.items()))
class TestListSwitchMachinesHosts(BaseTest):
"""Test get switch machines hosts."""
def setUp(self):
super(TestListSwitchMachinesHosts, self).setUp()
def tearDown(self):
super(TestListSwitchMachinesHosts, self).tearDown()
def test_list_hosts(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.add_switch_machine(
2,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
list_hosts = switch.list_switch_machines_hosts(
2,
user=self.user_object,
)
expected = {
'switch_id': 2,
'id': 1,
'mac': '28:6e:d4:46:c4:25',
'switch_ip': '172.29.8.40',
'machine_id': 1,
'port': '1',
'switch_machine_id': 1
}
self.assertTrue(
all(item in list_hosts[0].items()
for item in expected.items()))
class TestListSwitchmachinesHosts(BaseTest):
"""Test list switch machines hosts."""
def setUp(self):
super(TestListSwitchmachinesHosts, self).setUp()
def tearDown(self):
super(TestListSwitchmachinesHosts, self).tearDown()
def test_list_hosts_with_ip_int(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.add_switch_machine(
2,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
list_hosts = switch.list_switchmachines_hosts(
switch_ip_int='2887583784',
user=self.user_object,
)
expected = {'switch_ip': '172.29.8.40'}
self.assertTrue(
all(item in list_hosts[0].items()
for item in expected.items()))
def test_list_hosts_ip_invalid(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.add_switch_machine(
2,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
list_hosts = switch.list_switchmachines_hosts(
switch_ip_int='test',
user=self.user_object,
)
self.assertEqual(list_hosts, [])
def test_list_hosts_without_ip(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.add_switch_machine(
2,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
list_hosts = switch.list_switchmachines_hosts(
user=self.user_object
)
expected = {'switch_ip': '172.29.8.40'}
self.assertTrue(
all(item in list_hosts[0].items()
for item in expected.items()))
self.assertIsNotNone(list_hosts)
class TestGetSwitchMachine(BaseTest):
"""Test get a switch machines."""
def setUp(self):
super(TestGetSwitchMachine, self).setUp()
def tearDown(self):
super(TestGetSwitchMachine, self).tearDown()
def test_get_switch_machine(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.add_switch_machine(
2,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
get_switch_machine = switch.get_switch_machine(
2,
1,
user=self.user_object,
)
self.assertIsNotNone(get_switch_machine)
self.assertEqual(get_switch_machine['mac'], '28:6e:d4:46:c4:25')
class TestGetSwitchmachine(BaseTest):
"""Test get a switch machine."""
def setUp(self):
super(TestGetSwitchmachine, self).setUp()
def tearDown(self):
super(TestGetSwitchmachine, self).tearDown()
def test_get_switchmachine(self):
switch.add_switch_machine(
1,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
get_switchmachine = switch.get_switchmachine(
1,
user=self.user_object,
)
self.assertIsNotNone(get_switchmachine)
self.assertEqual(get_switchmachine['mac'], '28:6e:d4:46:c4:25')
class TestUpdateSwitchMachine(BaseTest):
"""Test update switch machine."""
def setUp(self):
super(TestUpdateSwitchMachine, self).setUp()
def tearDown(self):
super(TestUpdateSwitchMachine, self).tearDown()
def test_update_switch_machine(self):
switch.add_switch_machine(
1,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
switch.update_switch_machine(
1,
1,
tag='test_tag',
user=self.user_object,
)
update_switch_machine = switch.list_switch_machines(
1,
user=self.user_object,
)
expected = {
'switch_id': 1,
'id': 1,
'mac': '28:6e:d4:46:c4:25',
'tag': 'test_tag',
'switch_ip': '0.0.0.0',
'machine_id': 1,
'port': '1',
'switch_machine_id': 1
}
self.assertTrue(
all(item in update_switch_machine[0].items()
for item in expected.items())
)
class TestUpdateSwitchmachine(BaseTest):
"""Test update switch machine."""
def setUp(self):
super(TestUpdateSwitchmachine, self).setUp()
def tearDown(self):
super(TestUpdateSwitchmachine, self).tearDown()
def test_update_switchmachine(self):
switch.add_switch_machine(
1,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
switch.update_switchmachine(
1,
location='test_location',
user=self.user_object,
)
update_switchmachine = switch.list_switchmachines(
user=self.user_object,
)
expected = {
'switch_id': 1,
'id': 1,
'mac': '28:6e:d4:46:c4:25',
'location': 'test_location',
'switch_ip': '0.0.0.0',
'machine_id': 1,
'port': '1',
'switch_machine_id': 1
}
self.assertTrue(
all(item in update_switchmachine[0].items()
for item in expected.items())
)
class TestPatchSwitchMachine(BaseTest):
"""Test patch switch machine."""
def setUp(self):
super(TestPatchSwitchMachine, self).setUp()
def tearDown(self):
super(TestPatchSwitchMachine, self).tearDown()
def test_patch_switch_machine(self):
switch.add_switch_machine(
1,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
switch.patch_switch_machine(
1,
1,
user=self.user_object,
tag={
'patched_tag': 'test_patched_tag'
}
)
switch_patch_switch_machine = switch.list_switch_machines(
1,
user=self.user_object,
)
expected = {'tag': {
'patched_tag': 'test_patched_tag'}
}
self.assertTrue(
all(item in switch_patch_switch_machine[0].items()
for item in expected.items())
)
class TestPatchSwitchmachine(BaseTest):
"""Test patch switch machine."""
def setUp(self):
super(TestPatchSwitchmachine, self).setUp()
def tearDown(self):
super(TestPatchSwitchmachine, self).tearDown()
def test_patch_switchmachine(self):
switch.add_switch_machine(
1,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
switch.patch_switchmachine(
1,
user=self.user_object,
location={
'patched_location': 'test_location'
}
)
patch_switchmachine = switch.list_switchmachines(
user=self.user_object
)
expected = {'location': {
'patched_location': 'test_location'}
}
self.assertTrue(
all(item in patch_switchmachine[0].items()
for item in expected.items())
)
class TestDelSwitchMachine(BaseTest):
"""Test delete switch machines."""
def setUp(self):
super(TestDelSwitchMachine, self).setUp()
def tearDown(self):
super(TestDelSwitchMachine, self).tearDown()
def test_del_switch_machine(self):
switch.add_switch_machine(
1,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
switch.del_switch_machine(
1,
1,
user=self.user_object,
)
del_switch_machine = switch.list_switch_machines(
1,
user=self.user_object,
)
self.assertEqual([], del_switch_machine)
class TestDelSwitchmachine(BaseTest):
"""Test delete switch machines."""
def setUp(self):
super(TestDelSwitchmachine, self).setUp()
def tearDown(self):
super(TestDelSwitchmachine, self).tearDown()
def test_switchmachine(self):
switch.add_switch_machine(
1,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
switch.del_switchmachine(
1,
user=self.user_object,
)
del_switchmachine = switch.list_switchmachines(
user=self.user_object
)
self.assertEqual([], del_switchmachine)
class TestUpdateSwitchMachines(BaseTest):
"""Test update switch machines."""
def setUp(self):
super(TestUpdateSwitchMachines, self).setUp()
def tearDown(self):
super(TestUpdateSwitchMachines, self).tearDown()
def test_update_switch_machines_remove(self):
switch.add_switch(
ip='2887583784',
user=self.user_object,
)
switch.add_switch_machine(
2,
mac='28:6e:d4:46:c4:25',
port='1',
user=self.user_object,
)
switch.update_switch_machines(
2,
remove_machines=1,
user=self.user_object,
)
update_remove = switch.list_switch_machines(
2,
user=self.user_object,
)
self.assertEqual([], update_remove)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()
|
#!/usr/bin/env python
"""
QCKTest.py
==================
::
QCerenkovIntegralTest
ipython -i tests/QCKTest.py
Hmm largest c2 for different BetaInverse always at 7.6eV
just to left of rindex peak.
Possibly there is a one bin shifted issue, that is showing up
the most in the region where rindex is changing fastest.
Perhaps could check this with an artifical rindex pattern,
such as a step function.
Actually just setting BetaInverse to 1.792 just less than rmx 1.793
is informative as then there is only a very small range
of possible energies.
Hmm: generating millions of photons just there is a kinda
extreme test, as in reality will be less than 1.
Hmm maybe should exclude BetaInverse where the average number
of photons is less than 1
::
In [18]: np.c_[t.s2c[-120:-100,-1],t.bis[-120:-100]]
Out[18]:
array([[8.45 , 1.113, 1.746],
[8.448, 1.1 , 1.746],
[8.446, 1.087, 1.747],
See also::
ana/rindex.py
ana/ckn.py
"""
import os, logging, numpy as np
from opticks.ana.nbase import chi2
from opticks.ana.edges import divide_bins
from opticks.ana.rsttable import RSTTable
log = logging.getLogger(__name__)
class QCKTest(object):
FOLD = os.path.expandvars("/tmp/$USER/opticks/QCerenkovIntegralTest")
def __init__(self, approach="UpperCut", use_icdf=False):
assert approach in ["UpperCut", "SplitBin"]
self.approach = approach
self.use_icdf = use_icdf
base_path = os.path.join(self.FOLD, "test_makeICDF_%s" % approach)
if not os.path.exists(base_path):
log.fatal("base_path %s does not exist" % base_path)
assert 0
pass
names = os.listdir(base_path)
log.info("loading from base_path %s " % base_path)
for name in filter(lambda _:_.endswith(".npy"), names):
path = os.path.join(base_path, name)
stem = name[:-4]
a = np.load(path)
print( " t.%5s %s " % (stem, str(a.shape)))
setattr(self, stem, a )
pass
sample_base = os.path.join(base_path, "QCKTest")
self.sample_base = sample_base
pass
def bislist(self):
names = sorted(os.listdir(os.path.expandvars(self.sample_base)))
names = filter(lambda n:not n.startswith("bis"), names)
print(names)
bis = list(map(float, names))
return bis
def s2cn_plot(self, istep):
"""
:param ii: list of first dimension indices, corresponding to BetaInverse values
"""
s2cn = self.s2cn
ii = np.arange( 0,len(s2cn), istep )
title_ = "QCKTest.py : s2cn_plot : s2cn.shape %s istep %d " % (str(s2cn.shape), istep)
desc_ = "JUNO LS : Cerenkov S2 integral CDF for sample of BetaInverse values"
title = "\n".join([title_, desc_])
fig, ax = plt.subplots(figsize=[12.8, 7.2])
fig.suptitle(title)
for i in ii:
ax.plot( s2cn[i,:,0], s2cn[i,:,1] , label="%d" % i )
pass
#ax.legend()
fig.show()
def one_s2cn_plot(self, BetaInverse ):
s2cn = self.s2cn
ibi = self.getBetaInverseIndex(BetaInverse)
title_ = "QCKTest.py : one_s2cn_plot BetaInverse %6.4f ibi %d s2cn[ibi] %s " % (BetaInverse, ibi, str(s2cn[ibi].shape))
desc_ = " cdf (normalized s2 integral) for single BetaInverse "
title = "\n".join([title_, desc_]) ;
fig, ax = plt.subplots(figsize=[12.8, 7.2])
fig.suptitle(title)
ax.plot( s2cn[ibi,:,0], s2cn[ibi,:,1] , label="s2cn[%d]" % ibi )
ax.legend()
fig.show()
def getBetaInverseIndex(self, BetaInverse):
bis = self.bis
ibi = np.abs(bis - BetaInverse).argmin()
return ibi
def rindex_plot(self):
ri = self.rindex
c2 = self.c2
c2poppy = self.c2poppy
bi = self.bi
edges = self.edges
c2riscale = self.c2riscale
title = "\n".join(["QCKTest.py : rindex_plot"]) ;
fig, ax = plt.subplots(figsize=[12.8, 7.2])
fig.suptitle(title)
ax.scatter( ri[:,0], ri[:,1], label="ri" )
ax.plot( ri[:,0], ri[:,1], label="ri" )
ax.plot( edges[:-1], c2*c2riscale, label="c2", drawstyle="steps-post" )
ax.plot( [ri[0,0], ri[-1,0]], [bi, bi], label="bi %6.4f " % bi )
bi0 = 1.75
ax.plot( [ri[0,0], ri[-1,0]], [bi0, bi0], label="bi0 %6.4f " % bi0 )
ylim = ax.get_ylim()
for i in c2poppy:
ax.plot( [edges[i], edges[i]], ylim , label="edge %d " % i, linestyle="dotted" )
ax.plot( [edges[i+1], edges[i+1]], ylim , label="edge+1 %d " % (i+1), linestyle="dotted" )
pass
ax.legend()
fig.show()
def en_load(self, bi):
bi_base = os.path.expandvars("%s/%6.4f" % (self.sample_base, bi) )
use_icdf = self.use_icdf
ext = ["s2cn","icdf"][int(use_icdf)] ;
log.info("load from bi_base %s ext %s " % (bi_base, ext))
el = np.load(os.path.join(bi_base,"test_energy_lookup_many_%s.npy" % ext ))
es = np.load(os.path.join(bi_base,"test_energy_sample_many.npy"))
tl = np.load(os.path.join(bi_base,"test_energy_lookup_many_tt.npy"))
ts = np.load(os.path.join(bi_base,"test_energy_sample_many_tt.npy"))
self.bi_base = bi_base
self.el = el
self.es = es
self.tl = tl
self.ts = ts
def check_s2c_monotonic(self):
s2c = self.s2c
for i in range(len(s2c)):
w = np.where( np.diff(s2c[i,:,2]) < 0 )[0]
print(" %5d : %s " % (i, str(w)))
pass
def en_compare(self, bi, num_edges=101):
"""
Compare the energy samples created by QCKTest for a single BetaInverse
"""
ri = self.rindex
el = self.el
es = self.es
s2cn = self.s2cn
avph = self.avph
s2c = self.s2c
ibi = self.getBetaInverseIndex(bi)
approach = self.approach
if approach == "UpperCut": # see QCerenkov::getS2Integral_UpperCut
en_slot = 0
s2_slot = 1
cdf_slot = 2
emn = s2cn[ibi, 0,en_slot]
emx = s2cn[ibi,-1,en_slot]
avp = s2c[ibi, -1,cdf_slot]
elif approach == "SplitBin": # see QCerenkov::getS2Integral_SplitBin
en_slot = 0 # en_b
s2_slot = 5 # s2_b
cdf_slot = 7 # s2integral
emn = avph[ibi, 1]
emx = avph[ibi, 2]
avp = avph[ibi, 3]
else:
assert 0, "unknown approach %s " % approach
pass
self.en_slot = en_slot
self.s2_slot = s2_slot
self.cdf_slot = cdf_slot
self.emn = emn
self.emx = emx
self.avp = avp
edom = emx - emn
edif = edom/(num_edges-1)
edges0 = np.linspace( emn, emx, num_edges ) # across Cerenkov permissable range
edges = np.linspace( emn-edif, emx+edif, num_edges + 2 ) # push out with extra bins either side
#edges = np.linspace(1.55,15.5,100) # including rightmost
#edges = np.linspace(1.55,15.5,200) # including rightmost
#edges = divide_bins( ri[:,0], mul=4 )
hl = np.histogram( el, bins=edges )
hs = np.histogram( es, bins=edges )
c2, c2n, c2c = chi2( hl[0], hs[0] )
ndf = max(c2n - 1, 1)
c2sum = c2.sum()
c2p = c2sum/ndf
c2label = "chi2/ndf %4.2f [%d] %.2f " % (c2p, ndf, c2sum)
c2amx = c2.argmax()
rimax = ri[:,1].max()
c2max = c2.max()
c2riscale = rimax/c2max
c2poppy = np.where( c2 > c2max/3. )[0]
hmax = max(hl[0].max(), hs[0].max())
c2hscale = hmax/c2max
cf = " c2max:%4.2f c2amx:%d c2[c2amx] %4.2f edges[c2amx] %5.3f edges[c2amx+1] %5.3f " % (c2max, c2amx, c2[c2amx], edges[c2amx], edges[c2amx+1] )
print("cf", cf)
#print("c2", c2)
print("c2n", c2n)
print("c2c", c2c)
qq = "hl hs c2 c2label c2n c2c c2riscale c2hscale hmax edges c2max c2poppy cf bi ibi"
for q in qq.split():
globals()[q] = locals()[q]
setattr(self, q, locals()[q] )
pass
t = self
print("np.c_[t.c2, t.hs[0], t.hl[0]][t.c2 > 0]")
print(np.c_[t.c2, t.hs[0], t.hl[0]][t.c2 > 0] )
return [bi, c2sum, ndf, c2p, emn, emx, avp ]
LABELS = "bi c2sum ndf c2p emn emx avp".split()
def en_plot(self, c2overlay=0., c2poppy_=True):
"""
Using divide_edges is good for chi2 checking as it prevents
bin migrations or "edge" effects. But it leads to rather
differently sized bins resulting in a strange histogram shape.
"""
ri = self.rindex
s2c = self.s2c
s2cn = self.s2cn
bi = self.bi
ibi = self.ibi
c2 = self.c2
c2poppy = self.c2poppy
c2hscale = self.c2hscale
hmax = self.hmax
hl = self.hl
hs = self.hs
edges = self.edges
en_slot = self.en_slot
s2_slot = self.s2_slot
cdf_slot = self.cdf_slot
emn = self.emn
emx = self.emx
avp = self.avp
icdf_shape = str(s2cn.shape)
title_ = ["QCKTest.py : en_plot : lookup cf sampled : icdf_shape %s : %s " % ( icdf_shape, self.bi_base ),
"%s : %s " % ( self.c2label, self.cf),
"approach:%s use_icdf:%s avp %6.2f " % (self.approach, self.use_icdf, avp )
]
title = "\n".join(title_)
print(title)
fig, ax = plt.subplots(figsize=[12.8, 7.2])
fig.suptitle(title)
ax.plot( edges[:-1], hl[0], drawstyle="steps-post", label="lookup" )
ax.plot( edges[:-1], hs[0], drawstyle="steps-post", label="sampled" )
if c2overlay != 0.:
ax.plot( edges[:-1], c2*c2hscale*c2overlay , label="c2", drawstyle="steps-post" )
pass
ylim = ax.get_ylim()
xlim = ax.get_xlim()
s2max = s2cn[ibi,:,s2_slot].max()
ax.plot( s2cn[ibi,:,en_slot], s2cn[ibi,:,s2_slot]*hmax/s2max , label="s2cn[%d,:,%d]*hmax/s2max (s2)" % (ibi,s2_slot) )
ax.plot( s2cn[ibi,:,en_slot], s2cn[ibi,:,cdf_slot]*hmax , label="s2cn[%d,:,%d]*hmax (cdf)" % (ibi, cdf_slot) )
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.plot( [emx, emx], ylim, linestyle="dotted" )
ax.plot( [emn, emn], ylim, linestyle="dotted" )
if c2poppy_:
for i in c2poppy:
ax.plot( [edges[i], edges[i]], ylim , label="c2poppy edge %d " % i , linestyle="dotted" )
ax.plot( [edges[i+1], edges[i+1]], ylim , label="c2poppy edge+1 %d " % (i+1), linestyle="dotted" )
pass
pass
ax.legend()
fig.show()
figpath = os.path.join(self.bi_base, "en_plot.png")
log.info("savefig %s " % figpath)
fig.savefig(figpath)
def compare(t, bis):
res = np.zeros( (len(bis), len(t.LABELS)) )
for i, bi in enumerate(bis):
t.en_load(bi)
res[i] = t.en_compare(bi)
t.en_plot(c2overlay=0.5, c2poppy_=False)
#t.rindex_plot()
pass
t.res = res
def __str__(self):
title = "%s use_icdf:%s" % ( self.sample_base, self.use_icdf )
underline = "=" * len(title)
rst = RSTTable.Rdr(self.res, self.LABELS )
return "\n".join(["", title, underline, "", rst])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
#approach = "UpperCut"
approach = "SplitBin"
use_icdf = False
t = QCKTest(approach=approach, use_icdf=use_icdf)
#t.s2cn_plot(istep=20)
bis = t.bislist()
#bis = bis[-2:-1]
#bis = [1.45,]
#bis = [1.6,]
t.compare(bis)
print(t)
|
#!/usr/local/autopkg/python
#
# Copyright 2020 Drew Coobs
# WARNING: This is an independent project and is not supported by CrowdStrike.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import ssl
import sys
import urllib.error
from urllib import parse, request
import autopkglib.github
from autopkglib import Processor, ProcessorError
__all__ = ["CrowdStrikeDownloadAPIProvider"]
class CrowdStrikeDownloadAPIProvider(Processor):
description = (
"Downloads the latest version of the CrowdStrike sensor"
"WARNING: This is an independent project and is not supported by CrowdStrike."
)
input_variables = {
"client_id": {
"required": True,
"description": (
"CrowdStrike API Client ID"
),
},
"client_secret": {
"required": True,
"description": (
"CrowdStrike API Secret"
),
},
"output_file": {
"required": True,
"description": (
"Name of output file"
),
},
"platform": {
"required": False,
"default": 'mac',
"description": (
"Optional string to set the OS platform to download the latest sensor for."
"Options: 'mac', 'windows', or 'linux'"
"Defaults to 'mac'"
),
},
"cloud": {
"required": False,
"default": 'US',
"description": (
"Optional string to set the cloud to send commands to."
"Options: 'US', 'US-2', 'EU', or 'USFed'"
"Defaults to 'US'"
),
},
}
output_variables = {
"pathname": {
"description": ("Path to the downloaded sensor installer")
},
"version": {
"description": (
"Version info parsed from the API json metadata"
)
},
}
__doc__ = description
def cloudurl(self, cloud):
switcher ={
'US':'api.crowdstrike.com',
'US-2':'api.us-2.crowdstrike.com',
'USFed':'api.laggar.gcw.crowdstrike.com',
'EU':'api.eu-1.crowdstrike.com'
}
return switcher.get(cloud, 'api.crowdstrike.com')
def urlopen_func(self, req):
try:
# Disable default SSL validation for urllib
ssl._create_default_https_context = ssl._create_unverified_context
urlopen_response = request.urlopen(req)
return urlopen_response
except urllib.error.HTTPError as err:
raise ProcessorError(f"HTTP error making API call!")
self.output(err)
error_json = err.read()
error = json.loads(error_json)
self.output(f"API message: {error['errors'][0]['message']}")
sys.exit(1)
def main(self):
cloudapiurl = self.cloudurl(self.env["cloud"])
### API URLs ###
auth_token_api_url = 'https://{}/oauth2/token'.format(cloudapiurl)
sensor_hash_api_url = "https://{}/sensors/combined/installers/v1?offset=0&limit=1&filter=platform%3A%22{}%22".format(cloudapiurl,self.env["platform"])
sensor_download_api_url = "https://{}/sensors/entities/download-installer/v1?id={}"
### Obtain API auth token ###
token_headers = {
'accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
}
token_data = {
'client_id': self.env["client_id"],
'client_secret': self.env["client_secret"]
}
token_data = parse.urlencode(token_data)
token_data = token_data.encode('ascii')
req = request.Request(auth_token_api_url, headers=token_headers, data=token_data)
response = self.urlopen_func(req)
token_parsed = json.loads(response.read())
auth_token = token_parsed['access_token']
### Obtain installer hash for latest sensor ###
sensor_hash_headers = {
'accept': 'application/json',
'authorization': "bearer {}".format(auth_token),
'Content-Type': 'application/json',
}
req = request.Request(sensor_hash_api_url, headers=sensor_hash_headers)
response = self.urlopen_func(req)
sensor_hash_parsed = json.loads(response.read())
sensor_hash_name = sensor_hash_parsed['resources'][0]['name']
sensor_hash_version = sensor_hash_parsed['resources'][0]['version']
sensor_hash_sha256 = sensor_hash_parsed['resources'][0]['sha256']
self.env["version"] = sensor_hash_version
### Download the latest sensor ###
self.output("Downloading %s" % sensor_hash_name)
self.output("Version: %s" % self.env["version"])
download_headers = {
'accept': 'application/json',
'authorization': "bearer {}".format(auth_token),
'Content-Type': 'application/json',
}
sensor_download_url = sensor_download_api_url.format(cloudapiurl,sensor_hash_sha256)
req = request.Request(sensor_download_url, headers=download_headers)
response = self.urlopen_func(req)
with open(self.env["output_file"], 'wb') as file:
file.write(response.read())
self.env["pathname"] = self.env["output_file"]
if __name__ == "__main__":
PROCESSOR = GitHubReleasesInfoProvider()
PROCESSOR.execute_shell()
|
"""
Copyright 2020 Robert MacGregor
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository")
def doxygen():
maybe(
new_git_repository,
name = "doxygen",
remote = "https://github.com/doxygen/doxygen.git",
# Tag Release_1_8_20
commit = "f246dd2f1c58eea39ea3f50c108019e4d4137bd5",
build_file_content = """
load("@rules_foreign_cc//tools/build_defs:cmake.bzl", "cmake_external")
filegroup(
name = "doxygen_files",
srcs = glob(
include=[
"**/*"
]
)
)
cmake_external(
name = "doxygen",
lib_source = ":doxygen_files",
generate_crosstool_file = select({
"@bazel_tools//src/conditions:windows": True,
"//conditions:default": False
}),
cmake_options = select({
"@bazel_tools//src/conditions:windows": ["-GNinja"],
"//conditions:default": None
}),
make_commands = select({
"@bazel_tools//src/conditions:windows": [
"ninja",
"ninja install"
],
"//conditions:default": [
"make -j$(nproc)",
"make install"
]
}),
binaries = select({
"@bazel_tools//src/conditions:windows": [
"doxygen.exe"
],
# Linux
"//conditions:default": [
"doxygen"
]
}),
visibility = ["//visibility:public"]
)
"""
)
|
# coding: utf-8
'''
Determine whether an integer is a palindrome. Do this without extra space.
'''
### 判断一个整数是不是一个回文整数,注意,负数不是回文整数
import math
def isPalindrome(x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
n = 0
## n代表x的位数
for i in range(0, 100):
if x / math.pow(10, i) > 0 and x / math.pow(10, i) < 10:
n = i
break
i = 1
j = n + 1
while i <= j:
if math.floor(x % math.pow(10, i) / math.pow(10, i - 1)) != math.floor(
x % math.pow(10, j) / math.pow(10, j - 1)):
return False
i += 1
j -= 1
return True
print isPalindrome(1)
|
from keras import backend as K
from keras.models import load_model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes
from yad2k.models.keras_yolo import yolo_head
from app_utils import yolo_eval
from os import listdir
from os.path import isfile, join
import os
# Create a session to start graph
sess = K.get_session()
class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)
# Read yolo model
yolo_model = load_model("model_data/yolo.h5")
# Preprocessing on yolo_model.output
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
# Filter boxes on NMS and thresholding
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
def predict(sess, image_file):
"""
Run the graph stored in "sess" to predict boxes for "image_file". Print and plot the preditions.
Arguments:
sess -- tensorflow/Keras session containing the YOLO graph
image_file -- name of an image stored in the "images" folder.
Returns:
out_scores -- tensor of shape (None, ), scores of the predicted boxes
out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
out_classes -- tensor of shape (None, ), class index of the predicted boxes
"""
# Preprocess your image
image, image_data = preprocess_image("images/" + image_file, model_image_size=(608, 608))
# Run the yolo model
out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data,
K.learning_phase(): 0})
# Generate colors for drawing bounding boxes.
colors = generate_colors(class_names)
# Draw bounding boxes on the image file
draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
# Save the predicted bounding box on the image
image.save(os.path.join("out", image_file), quality=90)
return out_scores, out_boxes, out_classes
# Run predict for all files in images/ directory and save it to out/ directory
all_outputs = [predict(sess,f) for f in listdir('images/') if isfile(join('images/', f))]
print('All outputs are saved in out/ directory') |
# Modified by Lijie
# Email:glee1018@buaa.edu.cn
# Date Modified: 2019-03-20 21:54
################################################################################
# MIT License
#
# Copyright (c) 2018 Tom Runia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to conditions.
#
# Author: Tom Runia
# Date Created: 2018-12-04
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.misc import factorial
import torch
import torchvision
import steerable.math_utils as math_utils
pointOp = math_utils.pointOp
################################################################################
class SCFpyr_NumPy():
'''
Modified by Li Jie
---------------------------------------------------------------------------------------------
This is a modified version of buildSFpyr, that constructs a
complex-valued steerable pyramid using Hilbert-transform pairs
of filters. Note that the imaginary parts will *not* be steerable.
Description of this transform appears in: Portilla & Simoncelli,
International Journal of Computer Vision, 40(1):49-71, Oct 2000.
Further information: http://www.cns.nyu.edu/~eero/STEERPYR/
Modified code from the perceptual repository:
https://github.com/andreydung/Steerable-filter
This code looks very similar to the original Matlab code:
https://github.com/LabForComputationalVision/matlabPyrTools/blob/master/buildSCFpyr.m
Also looks very similar to the original Python code presented here:
https://github.com/LabForComputationalVision/pyPyrTools/blob/master/pyPyrTools/SCFpyr.py
function:
build: build csp, return the result after ifft
reconstruct: input from build
build_c: build csp, return the dft result
reconstruct_c: input from build_c
pyr_info: print coeff information
BatchCsp: Do batch CSP
Batch_recon: Reconstruct images batch from coeffs_batch
'''
def __init__(self, height=5, nbands=4, scale_factor=2):
self.nbands = nbands # number of orientation bands
self.height = height # including low-pass and high-pass
self.scale_factor = scale_factor
# Cache constants
self.lutsize = 1024
self.Xcosn = np.pi * np.array(range(-(2*self.lutsize+1), (self.lutsize+2)))/self.lutsize
# self.alpha = (self.Xcosn + np.pi) % (2*np.pi) - np.pi
self.alpha = np.mod(self.Xcosn + np.pi,2*np.pi) - np.pi
################################################################################
# Construction of Steerable Pyramid
def build(self, im):
''' Decomposes an image into it's complex steerable pyramid.
Args:
im_batch (np.ndarray): single image [H,W]
Returns:
pyramid: list containing np.ndarray objects storing the pyramid
'''
assert len(im.shape) == 2, 'Input im must be grayscale'
height, width = im.shape
# Check whether image size is sufficient for number of levels
if self.height > int(np.floor(np.log2(min(width, height))/np.log2(self.scale_factor)) - 2):
raise RuntimeError('Cannot build {} levels, image too small.'.format(self.height))
# Prepare a grid
log_rad, angle = math_utils.prepare_grid(height, width)
# Radial transition function (a raised cosine in log-frequency):
Xrcos, Yrcos = math_utils.rcosFn(1, -0.5)
Yrcos = np.sqrt(Yrcos)
YIrcos = np.sqrt(1 - Yrcos**2)
lo0mask = pointOp(log_rad, YIrcos, Xrcos)
hi0mask = pointOp(log_rad, Yrcos, Xrcos)
# Shift the zero-frequency component to the center of the spectrum.
imdft = np.fft.fftshift(np.fft.fft2(im))
# Low-pass
lo0dft = imdft * lo0mask
# Recursive build the steerable pyramid
coeff = self._build_levels(lo0dft, log_rad, angle, Xrcos, Yrcos, self.height-1, np.array(im.shape))
# High-pass
hi0dft = imdft * hi0mask
hi0 = np.fft.ifft2(np.fft.ifftshift(hi0dft))
coeff.insert(0, hi0.real)
return coeff
def _build_levels(self, lodft, log_rad, angle, Xrcos, Yrcos, height, img_dims):
#modified by Li Jie
#add muti scale,for example,scale_factor=2**(1/2)
if height <= 1:
# Low-pass
lo0 = np.fft.ifft2(np.fft.ifftshift(lodft))
coeff = [lo0.real]
else:
Xrcos = Xrcos - np.log2(self.scale_factor)
####################################################################
####################### Orientation bandpass #######################
####################################################################
himask = pointOp(log_rad, Yrcos, Xrcos)
order = self.nbands - 1
const = np.power(2, 2*order) * np.square(factorial(order)) / (self.nbands * factorial(2*order))
Ycosn = 2*np.sqrt(const) * np.power(np.cos(self.Xcosn), order) * (np.abs(self.alpha) < np.pi/2)
# Loop through all orientation bands
orientations = []
for b in range(self.nbands):
anglemask = pointOp(angle, Ycosn, self.Xcosn + np.pi*b/self.nbands)
banddft = np.power(np.complex(0, -1), self.nbands - 1) * lodft * anglemask * himask
band = np.fft.ifft2(np.fft.ifftshift(banddft))
orientations.append(band)
####################################################################
######################## Subsample lowpass #########################
####################################################################
dims=np.array(lodft.shape)
ctr=np.ceil((dims+0.5)/2)
lodims=np.round(img_dims/(self.scale_factor**(self.height-height)))
loctr=np.ceil((lodims+0.5)/2)
lostart=(ctr-loctr).astype(np.int)
loend=(lostart+lodims).astype(np.int)
# Selection
log_rad = log_rad[lostart[0]:loend[0], lostart[1]:loend[1]]
angle = angle[lostart[0]:loend[0], lostart[1]:loend[1]]
lodft = lodft[lostart[0]:loend[0], lostart[1]:loend[1]]
# Subsampling in frequency domain
YIrcos = np.abs(np.sqrt(1 - Yrcos**2))
lomask = pointOp(log_rad, YIrcos, Xrcos)
lodft = lomask * lodft
####################################################################
####################### Recursion next level #######################
####################################################################
coeff = self._build_levels(lodft, log_rad, angle, Xrcos, Yrcos, height-1,img_dims)
coeff.insert(0, orientations)
return coeff
############################################################################
########################### RECONSTRUCTION #################################
############################################################################
def reconstruct(self, coeff):
if self.nbands != len(coeff[1]):
raise Exception("Unmatched number of orientations")
img_dims=np.array(coeff[0].shape)
height, width = coeff[0].shape
log_rad, angle = math_utils.prepare_grid(height, width)
Xrcos, Yrcos = math_utils.rcosFn(1, -0.5)
Yrcos = np.sqrt(Yrcos)
YIrcos = np.sqrt(np.abs(1 - Yrcos**2))
lo0mask = pointOp(log_rad, YIrcos, Xrcos)
hi0mask = pointOp(log_rad, Yrcos, Xrcos)
tempdft = self._reconstruct_levels(coeff[1:], log_rad, Xrcos, Yrcos, angle, img_dims)
hidft = np.fft.fftshift(np.fft.fft2(coeff[0]))
outdft = tempdft * lo0mask + hidft * hi0mask
# outdft = tempdft * lo0mask
reconstruction = np.fft.ifft2(np.fft.ifftshift(outdft))
reconstruction = reconstruction.real
return reconstruction
def _reconstruct_levels(self, coeff, log_rad, Xrcos, Yrcos, angle, img_dims):
if len(coeff) == 1:
dft = np.fft.fft2(coeff[0])
dft = np.fft.fftshift(dft)
return dft
Xrcos = Xrcos - np.log2(self.scale_factor)
# print('len coeff:',len(coeff))
####################################################################
####################### Orientation Residue ########################
####################################################################
himask = pointOp(log_rad, Yrcos, Xrcos)
lutsize = 1024
Xcosn = np.pi * np.array(range(-(2*lutsize+1), (lutsize+2)))/lutsize
order = self.nbands - 1
const = np.power(2, 2*order) * np.square(factorial(order)) / (self.nbands * factorial(2*order))
Ycosn = np.sqrt(const) * np.power(np.cos(Xcosn), order)
orientdft = np.zeros(coeff[0][0].shape)
for b in range(self.nbands):
anglemask = pointOp(angle, Ycosn, Xcosn + np.pi * b/self.nbands)
banddft = np.fft.fftshift(np.fft.fft2(coeff[0][b]))
orientdft = orientdft + np.power(np.complex(0, 1), order) * banddft * anglemask * himask
####################################################################
########## Lowpass component are upsampled and convoluted ##########
####################################################################
dims=np.array(coeff[0][0].shape)
ctr=np.ceil((dims+0.5)/2)
lodims=np.round(img_dims/(self.scale_factor**(self.height-len(coeff))))
loctr=np.ceil((lodims+0.5)/2)
lostart=(ctr-loctr).astype(np.int)
loend=(lostart+lodims).astype(np.int)
log_rad = log_rad[lostart[0]:loend[0], lostart[1]:loend[1]]
angle = angle[lostart[0]:loend[0], lostart[1]:loend[1]]
YIrcos = np.sqrt(np.abs(1 - Yrcos**2))
lomask = pointOp(log_rad, YIrcos, Xrcos)
################################################################################
# Recursive call for image reconstruction
nresdft = self._reconstruct_levels(coeff[1:], log_rad, Xrcos, Yrcos, angle, img_dims)
resdft = np.zeros(dims, 'complex')
resdft[lostart[0]:loend[0], lostart[1]:loend[1]] = nresdft * lomask
return resdft + orientdft
def build_c(self, im):
''' Decomposes an image into it's complex steerable pyramid.
Args:
im_batch (np.ndarray): single image [H,W]
Returns:
pyramid: list containing complex np.ndarray objects storing the pyramid
'''
assert len(im.shape) == 2, 'Input im must be grayscale'
height, width = im.shape
# Check whether image size is sufficient for number of levels
if self.height > int(np.floor(np.log2(min(width, height))/np.log2(self.scale_factor)) - 2):
raise RuntimeError('Cannot build {} levels, image too small.'.format(self.height))
# Prepare a grid
log_rad, angle = math_utils.prepare_grid(height, width)
# Radial transition function (a raised cosine in log-frequency):
Xrcos, Yrcos = math_utils.rcosFn(1, -0.5)
Yrcos = np.sqrt(Yrcos)
YIrcos = np.sqrt(1 - Yrcos**2)
lo0mask = pointOp(log_rad, YIrcos, Xrcos)
hi0mask = pointOp(log_rad, Yrcos, Xrcos)
# Shift the zero-frequency component to the center of the spectrum.
imdft = np.fft.fftshift(np.fft.fft2(im))
# Low-pass
lo0dft = imdft * lo0mask
# Recursive build the steerable pyramid
coeff = self._build_levels_c(lo0dft, log_rad, angle, Xrcos, Yrcos, self.height-1, np.array(im.shape))
# High-pass
hi0dft = imdft * hi0mask
# hi0 = np.fft.ifft2(np.fft.ifftshift(hi0dft))
# coeff.insert(0, hi0.real)
coeff.insert(0, hi0dft)
return coeff
def _build_levels_c(self, lodft, log_rad, angle, Xrcos, Yrcos, height, img_dims):
'''
Modified by Li Jie
Add muti scale,for example,scale_factor=2**(1/2)
'''
if height <= 1:
coeff = [lodft]
else:
Xrcos = Xrcos - np.log2(self.scale_factor)
####################################################################
####################### Orientation bandpass #######################
####################################################################
himask = pointOp(log_rad, Yrcos, Xrcos)
order = self.nbands - 1
const = np.power(2, 2*order) * np.square(factorial(order)) / (self.nbands * factorial(2*order))
Ycosn = 2*np.sqrt(const) * np.power(np.cos(self.Xcosn), order) * (np.abs(self.alpha) < np.pi/2)
# Loop through all orientation bands
orientations = []
for b in range(self.nbands):
anglemask = pointOp(angle, Ycosn, self.Xcosn + np.pi*b/self.nbands)
banddft = np.power(np.complex(0, -1), self.nbands - 1) * lodft * anglemask * himask
orientations.append(banddft)
####################################################################
######################## Subsample lowpass #########################
####################################################################
dims=np.array(lodft.shape)
ctr=np.ceil((dims+0.5)/2)
lodims=np.round(img_dims/(self.scale_factor**(self.height-height)))
loctr=np.ceil((lodims+0.5)/2)
lostart=(ctr-loctr).astype(np.int)
loend=(lostart+lodims).astype(np.int)
# Selection
log_rad = log_rad[lostart[0]:loend[0], lostart[1]:loend[1]]
angle = angle[lostart[0]:loend[0], lostart[1]:loend[1]]
lodft = lodft[lostart[0]:loend[0], lostart[1]:loend[1]]
# Subsampling in frequency domain
YIrcos = np.abs(np.sqrt(1 - Yrcos**2))
lomask = pointOp(log_rad, YIrcos, Xrcos)
lodft = lomask * lodft
####################################################################
####################### Recursion next level #######################
####################################################################
coeff = self._build_levels_c(lodft, log_rad, angle, Xrcos, Yrcos, height-1,img_dims)
coeff.insert(0, orientations)
return coeff
############################################################################
########################### RECONSTRUCTION #################################
############################################################################
def reconstruct_c(self, coeff):
if self.nbands != len(coeff[1]):
raise Exception("Unmatched number of orientations")
img_dims=np.array(coeff[0].shape)
height, width = coeff[0].shape
log_rad, angle = math_utils.prepare_grid(height, width)
Xrcos, Yrcos = math_utils.rcosFn(1, -0.5)
Yrcos = np.sqrt(Yrcos)
YIrcos = np.sqrt(np.abs(1 - Yrcos**2))
lo0mask = pointOp(log_rad, YIrcos, Xrcos)
hi0mask = pointOp(log_rad, Yrcos, Xrcos)
tempdft = self._reconstruct_levels_c(coeff[1:], log_rad, Xrcos, Yrcos, angle, img_dims)
hidft = coeff[0]
outdft = tempdft * lo0mask + hidft * hi0mask
# outdft = tempdft * lo0mask
reconstruction = np.fft.ifft2(np.fft.ifftshift(outdft))
reconstruction = reconstruction.real
return reconstruction
def _reconstruct_levels_c(self, coeff, log_rad, Xrcos, Yrcos, angle, img_dims):
if len(coeff) == 1:
return coeff[0]
Xrcos = Xrcos - np.log2(self.scale_factor)
####################################################################
####################### Orientation Residue ########################
####################################################################
himask = pointOp(log_rad, Yrcos, Xrcos)
lutsize = 1024
Xcosn = np.pi * np.array(range(-(2*lutsize+1), (lutsize+2)))/lutsize
order = self.nbands - 1
const = np.power(2, 2*order) * np.square(factorial(order)) / (self.nbands * factorial(2*order))
Ycosn = np.sqrt(const) * np.power(np.cos(Xcosn), order)
orientdft = np.zeros(coeff[0][0].shape)
for b in range(self.nbands):
anglemask = pointOp(angle, Ycosn, Xcosn + np.pi * b/self.nbands)
banddft = coeff[0][b]
orientdft = orientdft + np.power(np.complex(0, 1), order) * banddft * anglemask * himask
####################################################################
########## Lowpass component are upsampled and convoluted ##########
####################################################################
dims=np.array(coeff[0][0].shape)
ctr=np.ceil((dims+0.5)/2)
lodims=np.round(img_dims/(self.scale_factor**(self.height-len(coeff))))
loctr=np.ceil((lodims+0.5)/2)
lostart=(ctr-loctr).astype(np.int)
loend=(lostart+lodims).astype(np.int)
log_rad = log_rad[lostart[0]:loend[0], lostart[1]:loend[1]]
angle = angle[lostart[0]:loend[0], lostart[1]:loend[1]]
YIrcos = np.sqrt(np.abs(1 - Yrcos**2))
lomask = pointOp(log_rad, YIrcos, Xrcos)
################################################################################
# Recursive call for image reconstruction
nresdft = self._reconstruct_levels_c(coeff[1:], log_rad, Xrcos, Yrcos, angle, img_dims)
resdft = np.zeros(dims, 'complex')
resdft[lostart[0]:loend[0], lostart[1]:loend[1]] = nresdft * lomask
return resdft + orientdft
@staticmethod
def pyr_info(coeff):
'''
Added by Lijie
Print pyramid info
Args:
coeff (list): The image decomposition by complex steerable pyramid
'''
assert isinstance(coeff, list)
height=len(coeff)
bands=len(coeff[1])
print('-----------------------------------')
print('Pyr height: {} Pyr bands: {}'.format(height,bands))
print('Pyr{0:2d}: {1} {2}'.format(height,coeff[0].dtype,coeff[0].shape))
for i in range(height-2):
print('Pyr{0:2d}: {1} {2}'.format(height-i-1,coeff[i+1][0].dtype,coeff[i+1][0].shape))
print('Pyr{0:2d}: {1} {2}'.format(1,coeff[height-1].dtype,coeff[height-1].shape))
print('-----------------------------------')
def BatchCsp(self, imgs_batch, channel, type):
'''
Added by Lijie
Do batch csp
Args:
img_batch: images batch[N,C,H,W], usually from torch.utils.data.DataLoader
channel: do csp on this channel
type: 0 for build
1 for build_c
Return:
BatchCsp(np.array) list
'''
assert isinstance(
imgs_batch, torch.Tensor), 'imgs_batch must be type torch.Tensor!'
assert imgs_batch.shape[1] >= channel, 'Invalid input channe!'
assert type==0 or type==1, 'Invalid input type!'
coeffs_batch = []
if type==0:
for i in range(imgs_batch.shape[0]):
# img_array = np.array(torchvision.transforms.ToPILImage()(imgs_batch[i, channel, :, :]))
img_array = imgs_batch[i, channel, :, :].numpy()
coeffs_batch.append(self.build(img_array))
return coeffs_batch
else:
for i in range(imgs_batch.shape[0]):
# img_array = np.array(torchvision.transforms.ToPILImage()(imgs_batch[i, channel, :, :]))
img_array = imgs_batch[i, channel, :, :].numpy()
coeffs_batch.append(self.build_c(img_array))
return coeffs_batch
def Batch_recon(self,coeffs_batch,type):
'''
Add by Lijie
Reconstruct images batch from coeffs_batch
Args:
coeffs_batch: Result from Funciton BatchCsp
type: 0 for build
1 for build_c
Return:
image(np.array) batch list
'''
assert type==0 or type==1, 'Invalid input type!'
if type==0:
return [self.reconstruct(coeff) for coeff in coeffs_batch ]
if type==1:
return [self.reconstruct_c(coeff) for coeff in coeffs_batch ]
# def phasenet_recon(self,output):
# '''
# Only for PhaseNet output, to reconstruct img
# Args:
# output: PhaseNet output
# Return:
# image(np.array) batch list
# '''
# assert self.nbands == int(output[1].shape[1]/2),'error input!'
# coeffs_batch = []
# batch_size = output[0].shape[0]
# for i in range(batch_size):
# temp=[]
# for j in range(len(output)):
# if j==0:#insert lodft
# temp.insert(0,np.fft.fftshift(np.fft.fft2(output[j][i].detach().numpy())))
# else:
# banddft_list = []
# for n in range(self.nbands):
# bands = np.empty(shape=(output[j].shape[2],output[j].shape[3]),dtype=np.complex)
# amp = output[j][i][n].detach().numpy()
# phase = output[j][i][n+self.nbands].detach().numpy()*np.pi
# bands.real = amp*np.cos(phase)
# bands.imag = amp*np.sin(phase)
# banddft_list.append(bands)
# temp.insert(0,banddft_list)
# temp.insert(0,np.zeros(shape=(output[-1].shape[2],output[-1].shape[3]),dtype=np.complex))#insert hidft(zero)
# coeffs_batch.append(temp)
# return self.Batch_recon(coeffs_batch,1)
|
from typing import List
from math import ceil
from numpy import linspace
from scipy.interpolate import splrep, splev
import torch
class Spline(torch.nn.Module):
def __init__(self, upsample_rate : int) -> None:
super(Spline, self).__init__()
self.upsample_rate = upsample_rate
def forward(self, x : torch.Tensor) -> torch.Tensor:
# TODO: Tensor -> numpy -> Tensor transformation,
# implement no-scipy style if possible later.
assert len(x.shape) == 1 or len(x.shape) == 2
w = x.shape[-1]
t = lambda z: splev(
linspace(0, w-1, w*self.upsample_rate),
splrep(linspace(0, w-1, w), z),
der=0
)
if len(x.shape) == 1:
return torch.Tensor(t(x.cpu().numpy())).to(x.device)
return torch.stack([torch.Tensor(t(z)) for z in x.cpu().numpy()])\
.to(x.device)
class Subpixel(torch.nn.Module):
def __init__(self, r_subpixel : int) -> None:
super(Subpixel, self).__init__()
self.r_subpixel = r_subpixel
def forward(self, x : torch.Tensor) -> torch.Tensor:
n, c_in, l_in = x.shape
assert c_in % self.r_subpixel == 0
return x.reshape(n, c_in // self.r_subpixel, self.r_subpixel, l_in) \
.transpose(2, 3) \
.reshape(n, c_in // self.r_subpixel, l_in * self.r_subpixel)
class Superpixel(torch.nn.Module):
def __init__(self, r_superpixel : int) -> None:
super(Superpixel, self).__init__()
self.r_superpixel = r_superpixel
def forward(self, x : torch.Tensor) -> torch.Tensor:
r_pad = (self.r_superpixel - x.shape[-1] % self.r_superpixel) \
% self.r_superpixel
x = torch.nn.functional.pad(x, (0, r_pad), mode='reflect')
n, c_in, l_in = x.shape
assert l_in % self.r_superpixel == 0
return x.reshape(n, c_in, l_in // self.r_superpixel, self.r_superpixel) \
.transpose(2, 3) \
.reshape(n, c_in * self.r_superpixel, l_in // self.r_superpixel)
class MultiscaleConv(torch.nn.Module):
def __init__(
self,
in_channels : int,
out_channels_per_conv : int,
kernel_size : List[int]
) -> None:
super(MultiscaleConv, self).__init__()
self.conv_layers = torch.nn.ModuleList([
torch.nn.Conv1d(
in_channels,
out_channels_per_conv,
k,
padding=(k-1)//2
)
for k in kernel_size
])
def forward(self, x : torch.Tensor) -> torch.Tensor:
return torch.cat([c(x) for c in self.conv_layers], dim=1)
class DownsamplingBlock(torch.nn.Module):
def __init__(
self,
in_channels : int,
out_channels : int,
kernel_size : List[int],
r_superpixel : int
) -> None:
super(DownsamplingBlock, self).__init__()
assert out_channels % (len(kernel_size) * r_superpixel) == 0
out_channels_per_conv \
= out_channels // (len(kernel_size) * r_superpixel)
self.multiscale_conv = MultiscaleConv(
in_channels, out_channels_per_conv, kernel_size)
self.prelu = torch.nn.PReLU(init=0.2)
self.superpixel = Superpixel(r_superpixel)
def forward(self, x : torch.Tensor) -> torch.Tensor:
return self.superpixel(self.prelu(self.multiscale_conv(x)))
class UpsamplingBlock(torch.nn.Module):
'''The upsampling blocks on the original paper include stacking from downsampling block.
However, This impl. exclude it for (possible) flexibility
'''
def __init__(
self,
in_channels : int,
out_channels : int,
kernel_size : List[int],
r_subpixel : int,
p_dropout : float
) -> None:
super(UpsamplingBlock, self).__init__()
assert (out_channels * r_subpixel) % len(kernel_size) == 0
out_channels_per_conv \
= (out_channels * r_subpixel) // len(kernel_size)
self.multiscale_conv = MultiscaleConv(
in_channels, out_channels_per_conv, kernel_size)
self.dropout = torch.nn.Dropout(p=p_dropout)
self.prelu = torch.nn.PReLU(init=0.2)
self.subpixel = Subpixel(r_subpixel)
def forward(self, x : torch.Tensor) -> torch.Tensor:
return self.subpixel(self.prelu(self.dropout(self.multiscale_conv(x))))
class Autoencoder(torch.nn.Module):
class Encoder(torch.nn.Module):
def __init__(
self,
out_channel : List[int],
kernel_size : List[int],
conv_in_channel : int,
conv_kernel_size : int,
superpixel_rate : int,
dropout_p : float,
) -> None:
super(Autoencoder.Encoder, self).__init__()
self.downsampling_blocks = torch.nn.ModuleList([
DownsamplingBlock(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
r_superpixel=superpixel_rate
)
for in_c, out_c in zip([1]+out_channel, out_channel)
])
def forward(self, x : torch.Tensor) -> torch.Tensor:
downsample_out = x.unsqueeze(1)
for d in self.downsampling_blocks:
downsample_out = d(downsample_out)
return downsample_out
class Decoder(torch.nn.Module):
def __init__(
self,
out_channel : List[int],
kernel_size : List[int],
conv_in_channel : int,
conv_kernel_size : int,
superpixel_rate : int,
dropout_p : float,
) -> None:
super(Autoencoder.Decoder, self).__init__()
self.upsampling_blocks = torch.nn.ModuleList([
UpsamplingBlock(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
r_subpixel=superpixel_rate,
p_dropout=dropout_p
)
for block_i, (in_c, out_c) in enumerate(zip(
out_channel[::-1],
out_channel[-2::-1]+[conv_in_channel]
))
])
self.conv = torch.nn.Conv1d(
conv_in_channel,
1,
conv_kernel_size,
padding=(conv_kernel_size-1) // 2
)
def forward(self, x : torch.Tensor) -> torch.Tensor:
upsample_in = x
for u in self.upsampling_blocks:
upsample_in = u(upsample_in)
return self.conv(upsample_in).squeeze(1)
def __init__(
self,
out_channel : List[int],
kernel_size : List[int],
conv_in_channel : int,
conv_kernel_size : int,
superpixel_rate : int,
dropout_p : float,
) -> None:
super(Autoencoder, self).__init__()
self.encoder = Autoencoder.Encoder(
out_channel=out_channel,
kernel_size=kernel_size,
conv_in_channel=conv_in_channel,
conv_kernel_size=conv_kernel_size,
superpixel_rate=superpixel_rate,
dropout_p=dropout_p,
)
self.decoder = Autoencoder.Decoder(
out_channel=out_channel,
kernel_size=kernel_size,
conv_in_channel=conv_in_channel,
conv_kernel_size=conv_kernel_size,
superpixel_rate=superpixel_rate,
dropout_p=dropout_p,
)
def forward(self, x : torch.Tensor) -> torch.Tensor:
orig_size = x.shape[-1]
return self.decoder(self.encoder(x))[:, :orig_size]
class Generator(torch.nn.Module):
def __init__(
self,
supersampling_rate : int,
out_channel : List[int],
kernel_size : List[int],
conv_in_channel : int,
conv_kernel_size : int,
superpixel_rate : int,
dropout_p : float,
) -> None:
super(Generator, self).__init__()
self.supersampling_rate = supersampling_rate
self.out_channel = out_channel
self.spline = Spline(supersampling_rate)
self.downsampling_blocks = torch.nn.ModuleList([
DownsamplingBlock(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
r_superpixel=superpixel_rate
)
for in_c, out_c in zip([1]+out_channel, out_channel)
])
# Each output of upsampling block is concat'ed with corresponding
# downsampling block's output and feed to the next block.
# So, double the input channels except the first block.
self.upsampling_blocks = torch.nn.ModuleList([
UpsamplingBlock(
in_channels=half_in_c * 2 if block_i > 0 else half_in_c,
out_channels=out_c,
kernel_size=kernel_size,
r_subpixel=superpixel_rate,
p_dropout=dropout_p
)
for block_i, (half_in_c, out_c) in enumerate(zip(
out_channel[::-1],
out_channel[-2::-1]+[conv_in_channel]
))
])
self.conv = torch.nn.Conv1d(
conv_in_channel,
1,
conv_kernel_size,
padding=(conv_kernel_size-1) // 2
)
def forward(self, x : torch.Tensor) -> torch.Tensor:
x = self.spline(x)
orig_size = x.shape[-1]
size_factor = self.supersampling_rate ** len(self.out_channel)
r_pad = (size_factor - (orig_size % size_factor)) % size_factor
x = torch.nn.functional.pad(
x.unsqueeze(0), (0, r_pad), mode='reflect').squeeze(0)
downsample_out = []
for d in self.downsampling_blocks:
downsample_out.append(d(
downsample_out[-1] if downsample_out else x.unsqueeze(1)
))
upsample_in = None
for d_out, u in zip(downsample_out[::-1], self.upsampling_blocks):
upsample_in = u(
d_out if upsample_in is None else
torch.cat([upsample_in, d_out], dim=1)
)
return (x + self.conv(upsample_in).squeeze(1))[:, :orig_size]
class Discriminator(torch.nn.Module):
class DownsamplingBlock(torch.nn.Module):
def __init__(
self,
in_channels : int,
out_channels : int,
kernel_size : List[int],
r_superpixel : int,
p_dropout : float
) -> None:
super(Discriminator.DownsamplingBlock, self).__init__()
assert out_channels % (len(kernel_size) * r_superpixel) == 0
out_channels_per_conv \
= out_channels // (len(kernel_size) * r_superpixel)
self.multiscale_conv = MultiscaleConv(
in_channels, out_channels_per_conv, kernel_size)
self.batchnorm = torch.nn.BatchNorm1d(out_channels // r_superpixel)
self.dropout = torch.nn.Dropout(p=p_dropout)
self.leakyrelu = torch.nn.LeakyReLU()
self.superpixel = Superpixel(r_superpixel)
def forward(self, x : torch.Tensor) -> torch.Tensor:
return self.superpixel(self.leakyrelu(
self.dropout(self.batchnorm(self.multiscale_conv(x)))
))
def __init__(
self,
input_length : int,
out_channel : List[int],
kernel_size : List[int],
linear_out_features : int,
superpixel_rate : int,
dropout_p : float,
) -> None:
super(Discriminator, self).__init__()
assert out_channel[0] % len(kernel_size) == 0
out_channels_per_conv = out_channel[0] // len(kernel_size)
self.multiscale_conv = MultiscaleConv(
1, out_channels_per_conv, kernel_size)
self.leakyrelu_1 = torch.nn.LeakyReLU(0.2)
self.downsampling_blocks = torch.nn.ModuleList([
Discriminator.DownsamplingBlock(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
r_superpixel=superpixel_rate,
p_dropout=dropout_p
)
for in_c, out_c in zip(out_channel, out_channel[1:])
])
# first multiscale conv. does not perform superpixel
downsampling_length = ceil(
input_length / superpixel_rate**(len(out_channel)-1))
# The original paper suggests to use extra linear layer
# instead of global max pooling.
# This implementation uses global max pooling due to the
# limitation of GPU VRAM memory.
"""
self.linear_1 = torch.nn.Linear(
downsampling_length * out_channel[-1], linear_out_features)
self.dropout = torch.nn.Dropout(p=dropout_p)
self.leakyrelu_2 = torch.nn.LeakyReLU(0.2)
self.linear_2 = torch.nn.Linear(linear_out_features, 1)
self.sigmoid = torch.nn.Sigmoid()
"""
self.linear = torch.nn.Linear(out_channel[-1], 1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x : torch.Tensor) -> torch.Tensor:
downsample_out = x.unsqueeze(1)
downsample_out = self.leakyrelu_1(self.multiscale_conv(
downsample_out))
for d in self.downsampling_blocks:
downsample_out = d(downsample_out)
"""
linear_out = self.leakyrelu_2(self.dropout(
self.linear_1(downsample_out.flatten(start_dim=1))))
return self.sigmoid(self.linear_2(linear_out)).squeeze(1)
"""
linear_out = self.linear(downsample_out.mean(dim=-1))
return self.sigmoid(linear_out)
|
import torch
from torch.autograd import Function
from .utils import eye_like
from warnings import warn
def householder_matrix(unit_vector):
# If unit_vector has shape (batch,m) or (m,), turn it into matrix shape
# (batch,m,1) respectively (m,1).
if unit_vector.shape[-1] != 1:
# Handle edge case if unit_vector.shape is (1,)
if len(unit_vector.shape) == 1:
return torch.ones_like(unit_vector)
unit_vector = unit_vector.view(*tuple(unit_vector.shape), 1)
transform = 2 * unit_vector @ torch.transpose(unit_vector, -1, -2)
return eye_like(transform) - transform
def normalize_matrix_rows(matrix, eps=1e-6):
norms = torch.sqrt(torch.sum(matrix**2, dim=-2, keepdim=True) + eps)
return matrix / norms
def householder_transform(matrix, n_reflections=-1, eps=1e-6):
"""Implements a product of Householder transforms.
"""
# If n_reflections==-1, use as many reflections as possible.
if n_reflections == -1:
n_reflections = matrix.shape[-1]
if n_reflections > matrix.shape[-1]:
warn("n_reflections is set higher than the number of rows.")
n_reflections = matrix.shape[-1]
matrix = normalize_matrix_rows(matrix, eps)
if n_reflections == 0:
output = torch.eye(
matrix.shape[-2],
dtype=matrix.dtype,
device=matrix.device
)
if len(matrix.shape) == 3:
output = output.view(1, matrix.shape[1], matrix.shape[1])
output = output.expand(matrix.shape[0], -1, -1)
for i in range(n_reflections):
unit_vector = matrix[..., i:i+1]
householder = householder_matrix(unit_vector)
if i == 0:
output = householder
else:
output = output @ householder
return output |
#!/usr/bin/env python3
import os
import sys
import unittest
from pathlib import Path
from typing import Optional
from unittest import TestCase, mock
from unittest.mock import patch
import zulip_bots.run
from zulip_bots.lib import extract_query_without_mention
class TestDefaultArguments(TestCase):
our_dir = os.path.dirname(__file__)
path_to_bot = os.path.abspath(os.path.join(our_dir, "../bots/giphy/giphy.py"))
@patch("sys.argv", ["zulip-run-bot", "giphy", "--config-file", "/foo/bar/baz.conf"])
@patch("zulip_bots.run.run_message_handler_for_bot")
def test_argument_parsing_with_bot_name(
self, mock_run_message_handler_for_bot: mock.Mock
) -> None:
with patch("zulip_bots.run.exit_gracefully_if_zulip_config_is_missing"):
zulip_bots.run.main()
mock_run_message_handler_for_bot.assert_called_with(
bot_name="giphy",
config_file="/foo/bar/baz.conf",
bot_config_file=None,
lib_module=mock.ANY,
quiet=False,
)
@patch("sys.argv", ["zulip-run-bot", path_to_bot, "--config-file", "/foo/bar/baz.conf"])
@patch("zulip_bots.run.run_message_handler_for_bot")
def test_argument_parsing_with_bot_path(
self, mock_run_message_handler_for_bot: mock.Mock
) -> None:
with patch("zulip_bots.run.exit_gracefully_if_zulip_config_is_missing"):
zulip_bots.run.main()
mock_run_message_handler_for_bot.assert_called_with(
bot_name="giphy",
config_file="/foo/bar/baz.conf",
bot_config_file=None,
lib_module=mock.ANY,
quiet=False,
)
def test_adding_bot_parent_dir_to_sys_path_when_bot_name_specified(self) -> None:
bot_name = "helloworld" # existing bot's name
expected_bot_dir_path = Path(
os.path.dirname(zulip_bots.run.__file__), "bots", bot_name
).as_posix()
self._test_adding_bot_parent_dir_to_sys_path(
bot_qualifier=bot_name, bot_dir_path=expected_bot_dir_path
)
@patch("os.path.isfile", return_value=True)
def test_adding_bot_parent_dir_to_sys_path_when_bot_path_specified(
self, mock_os_path_isfile: mock.Mock
) -> None:
bot_path = "/path/to/bot"
expected_bot_dir_path = Path("/path/to").as_posix()
self._test_adding_bot_parent_dir_to_sys_path(
bot_qualifier=bot_path, bot_dir_path=expected_bot_dir_path
)
def _test_adding_bot_parent_dir_to_sys_path(
self, bot_qualifier: str, bot_dir_path: str
) -> None:
with patch(
"sys.argv", ["zulip-run-bot", bot_qualifier, "--config-file", "/path/to/config"]
):
with patch("zulip_bots.finder.import_module_from_source", return_value=mock.Mock()):
with patch("zulip_bots.run.run_message_handler_for_bot"):
with patch("zulip_bots.run.exit_gracefully_if_zulip_config_is_missing"):
zulip_bots.run.main()
sys_path = [Path(path).as_posix() for path in sys.path]
self.assertIn(bot_dir_path, sys_path)
@patch("os.path.isfile", return_value=False)
def test_run_bot_by_module_name(self, mock_os_path_isfile: mock.Mock) -> None:
bot_module_name = "bot.module.name"
mock_bot_module = mock.Mock()
mock_bot_module.__name__ = bot_module_name
with patch(
"sys.argv", ["zulip-run-bot", "bot.module.name", "--config-file", "/path/to/config"]
):
with patch(
"importlib.import_module", return_value=mock_bot_module
) as mock_import_module:
with patch("zulip_bots.run.run_message_handler_for_bot"):
with patch("zulip_bots.run.exit_gracefully_if_zulip_config_is_missing"):
zulip_bots.run.main()
mock_import_module.assert_called_once_with(bot_module_name)
class TestBotLib(TestCase):
def test_extract_query_without_mention(self) -> None:
def test_message(name: str, message: str, expected_return: Optional[str]) -> None:
mock_client = mock.MagicMock()
mock_client.full_name = name
mock_message = {"content": message}
self.assertEqual(
expected_return, extract_query_without_mention(mock_message, mock_client)
)
test_message("xkcd", "@**xkcd**foo", "foo")
test_message("xkcd", "@**xkcd** foo", "foo")
test_message("xkcd", "@**xkcd** foo bar baz", "foo bar baz")
test_message("xkcd", "@**xkcd** foo bar baz", "foo bar baz")
test_message("xkcd", "@**xkcd** 123_) (/&%) +}}}l", "123_) (/&%) +}}}l")
test_message("brokenmention", "@**brokenmention* foo", None)
test_message("nomention", "foo", None)
test_message("Max Mustermann", "@**Max Mustermann** foo", "foo")
test_message(r"Max (Mustermann)#(*$&12]\]", r"@**Max (Mustermann)#(*$&12]\]** foo", "foo")
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
# Copyright (c) 2022 Oxford Robotics Institute (ORI), University of Oxford
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import time
import weakref
from edr.edr_sensor import EDRSensor
# ==============================================================================
# -- EDRLidar3D ----------------------------------------------------------------
# ==============================================================================
class EDRLidar3D(EDRSensor):
def __init__(
self, parent_actor, preevent_time, postevent_time, sensor_id, transform
):
super().__init__(
parent_actor, preevent_time, postevent_time, 10.0, "lidar3d", sensor_id
)
self.ext = ".ply"
world = self._parent.get_world()
bp = world.get_blueprint_library().find("sensor.lidar.ray_cast")
bp.set_attribute("horizontal_fov", "360.0")
bp.set_attribute("upper_fov", "22.5")
bp.set_attribute("lower_fov", "-22.5")
bp.set_attribute("channels", "64")
bp.set_attribute("range", "100.0")
bp.set_attribute("rotation_frequency", "20.0")
bp.set_attribute("sensor_tick", "0.1")
bp.set_attribute("points_per_second", "655360") # 1024 pts x 64 ch x 10 Hz
# bp.set_attribute('points_per_second', '327680') # 512 pts x 64 ch x 10 Hz
self.sensor = world.spawn_actor(bp, transform, attach_to=self._parent)
weak_self = weakref.ref(self)
self.sensor.listen(lambda event: EDRLidar3D._on_data_event(weak_self, event))
@staticmethod
def _on_data_event(weak_self, event):
self = weak_self()
if not self:
return
timestamp = time.time()
self.edr_buffer.on_data(timestamp, event)
|
# Generated by Django 2.0.10 on 2019-04-08 19:28
from django.db import migrations
def fill_reporter_slug(apps, schema_editor):
Reporter = apps.get_model("capdb", "Reporter")
for reporter in Reporter.objects.all():
reporter.save()
class Migration(migrations.Migration):
dependencies = [
('capdb', '0063_auto_20190408_1928'),
]
operations = [
migrations.RunPython(fill_reporter_slug, migrations.RunPython.noop),
]
|
from django.utils.translation import ugettext_lazy as _
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from .menu import NewsItemMenu
class NewsAppHook(CMSApp):
app_name = 'cmsplugin_newsplus'
name = _('News App')
def get_urls(self, page=None, language=None, **kwargs):
return ['cmsplugin_newsplus.urls']
menus = [NewsItemMenu]
apphook_pool.register(NewsAppHook)
|
"""
Meraki SSIDs API Resource
"""
from .meraki_api_resource import MerakiAPIResource
from .l3_firewall_rules import L3FirewallRules
class SSIDs(MerakiAPIResource):
""" Meraki API Organization Admins resource. """
resource = "ssids"
parameters = [
"name"
, "enabled"
, "authMode"
, "encriptionMode"
, "psk"
, "splashPage"
, "radiusServer"
, "radiusCoAEnabled"
, "radiusAccountingEnabled"
, "radiusAccountingServers"
, "ipAssignmentMode"
, "useVlanTagging"
, "concentratorNetworkId"
, "vlanId"
, "defaultVlan"
, "apTagsAndVlanIds"
, "walledGardenEnabled"
, "walledGardenRanges"
, "minBitrate"
, "bandSelection"
, "perClientBandwidthLimitUp"
, "perClientBandwidthLimitDown"
]
def __init__(self, key, prefix, resource_id=None):
MerakiAPIResource.__init__(self, key, prefix, resource_id)
def l3_firewall_rules(self, l3_firewall_rule_id=None):
""" Returns the L3 Firewall Rules API Resource. """
return L3FirewallRules(self.key, self.endpoint(), l3_firewall_rule_id)
|
import os
import random
import albumentations as A
import cv2
import imgaug as ia
import numpy as np
from PIL import Image, ImageFilter
from skimage.filters import (rank, threshold_niblack, threshold_sauvola)
from skimage.morphology import disk
import pyblur
from background_generator import BackgroundGenerator
from computer_text_generator import ComputerTextGenerator
from distorsion_generator import DistorsionGenerator
from elastic_distortion_generator import ElasticDistortionGenerator
from handwritten_text_generator import HandwrittenTextGenerator
def decision(probability):
return random.uniform(0, 1) < probability
def sauvola_bin(img, thres=0.3):
img = np.array(img)
bin = img > threshold_sauvola(img, window_size=15, k=thres)
img = bin.astype('uint8') * 255
return img
def add_random_space_to_string(s):
s = list(s)
for i in range(len(s) - 1):
if s[i] == ' ':
while random.randrange(3):
s[i] = s[i] + ' '
return ''.join(s)
def nick_binarize(img_list):
'''Binarize linecut images using two differently sized local threshold kernels
Args:
img_list: list of grayscale linecut images
Returns:
results: binarized images in the same order as the input'''
results = []
for img in img_list:
# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
height = img.shape[0]
width = img.shape[1]
# Resize the images to 200 pixel height
scaling_factor = 100/img.shape[0]
new_w = int(scaling_factor*img.shape[1])
new_h = int(scaling_factor*img.shape[0])
# img = cv2.resize(img, (new_w, new_h))
img = np.array(Image.fromarray(img).resize(
(new_w, new_h), Image.ANTIALIAS))
# First pass thresholding
th1 = threshold_niblack(img, 13, 0.00)
# Second pass thresholding
radius = 101
structured_elem = disk(radius)
th2 = rank.otsu(img, structured_elem)
# Masking
img = (img > th1) | (img > th2)
img = img.astype('uint8')*255
img = np.array(Image.fromarray(img).resize(
(width, height), Image.ANTIALIAS))
results.append(img)
return results
class FakeTextDataGenerator(object):
@classmethod
def generate_from_tuple(cls, t):
"""
Same as generate, but takes all parameters as one tuple
"""
cls.generate(*t)
@classmethod
def generate(cls, index, text, font, out_dir, height, extension, skewing_angle, random_skew, blur, background_type, distorsion_type, distorsion_orientation, is_handwritten, name_format, text_color=-1, prefix="", random_crop=False, debug=False):
try:
max_height = 80.0
albu = A.Compose([
A.RandomBrightness(limit=.1, p=0.3),
A.RandomContrast(limit=.1, p=0.3),
A.RandomGamma(gamma_limit=(90, 110), p=0.3),
A.CLAHE(p=0.3),
A.HueSaturationValue(hue_shift_limit=20,
sat_shift_limit=30,
val_shift_limit=20, p=0.3),
# A.ChannelShuffle(p=0.3),
A.JpegCompression(quality_lower=95, p=0.3),
], p=1)
#####################################
# Generate name for resulting image #
#####################################
if name_format == 0:
image_name = '{}_{}.{}'.format(text, str(index), extension)
elif name_format == 1:
image_name = '{}_{}.{}'.format(str(index), text, extension)
elif name_format == 2:
image_name = '{}.{}'.format(str(index), extension)
elif name_format == 3:
image_name = '{}_{}.{}'.format(prefix, str(index),
extension)
else:
print(
'{} is not a valid name format. Using default.'.format(
name_format))
image_name = '{}_{}.{}'.format(text, str(index), extension)
# print(image_name, font)
img = None
##########################
# Create picture of text #
##########################
add_random_space = ' ' in text and decision(0.02)
text = " " + text + " "
if (len(text) < 40):
for x in range(random.randint(1, 3)):
text = " " + text
for x in range(random.randint(1, 3)):
text = text + " "
if add_random_space:
text = add_random_space_to_string(text)
text_mode = np.random.choice(
5, 1, p=[0.86, 0.02, 0.02, 0.0, 0.1])[0]
extend_bottom = np.random.choice(3, 1, p=[0.5, 0.3, 0.2])[0] + 2
if is_handwritten:
img = HandwrittenTextGenerator.generate(text)
else:
img = ComputerTextGenerator.generate(
text, font, text_color, height, text_mode=text_mode, extend_bottom=extend_bottom)
img = np.array(img)
img = img[random.randint(0, 2):, :]
img = Image.fromarray(img)
if debug:
img.convert('L').save(
os.path.join(out_dir, image_name.replace(".jpg", "_7.jpg")))
if decision(0.6):
random_angle = random.uniform(-skewing_angle/4,
skewing_angle/4)
rotated_img = img.rotate(
skewing_angle if not random_skew else random_angle, expand=1) # .resize(img.size)
else:
random_angle = random.uniform(-skewing_angle,
skewing_angle)
rotated_img = img.rotate(
skewing_angle if not random_skew else random_angle,
expand=1)
# if decision(0.3):
# rotated_img = Image.fromarray(scipy.ndimage.rotate(img, random_angle))
# else:
# rotated_img = Image.fromarray(imutils.rotate_bound(np.array(img), random_angle))
# rotated_img = rotated_img.convert("RGBA")
# white_mask = Image.new('RGBA', rotated_img.size, (255,) * 4)
# rotated_img = Image.composite(rotated_img, white_mask, rotated_img)
if debug:
rotated_img.convert('L').save(
os.path.join(out_dir, image_name.replace(".jpg", "_6.jpg")))
# rotated_img = rotated_img.convert('L')
###################################
# Random miscellaneous distortion #
###################################
if decision(0.7):
rotated_img = cv2.cvtColor(
np.array(rotated_img), cv2.COLOR_GRAY2BGR)
augmented = albu(image=rotated_img, mask=None, bboxes=[],)
rotated_img = Image.fromarray(cv2.cvtColor(
augmented['image'], cv2.COLOR_BGR2GRAY))
if decision(0.9):
if decision(0.2):
if decision(0.5):
# full image erode
x = random.randint(0, 2)
kernel = np.ones((x, x), np.uint8)
im_arr = cv2.erode(
np.array(rotated_img), kernel, iterations=1)
else:
# partial image erode
im_arr = np.array(rotated_img)
start_x = random.randint(0, int(im_arr.shape[1] * 0.7))
if start_x + 10 < im_arr.shape[1]:
end_x = random.randint(
start_x + 10, im_arr.shape[1])
x = random.randint(1, 4)
kernel = np.ones((x, x), np.uint8)
im_arr[:, start_x:end_x] = cv2.erode(
im_arr[:, start_x:end_x], kernel, iterations=1)
else:
im_arr = np.array(rotated_img)
rotated_img = Image.fromarray(im_arr)
if debug:
rotated_img.convert('L').save(
os.path.join(out_dir, image_name.replace(".jpg", "_5.jpg")))
random_erode_pixel = decision(0.005)
prob = 1.0
if random_erode_pixel:
# random erode with random pixel sampling
x = random.randint(0, 2)
kernel = np.ones((x, x), np.uint8)
im_arr = np.array(rotated_img)
erode = cv2.erode(im_arr, kernel, iterations=1)
# prob = np.random.choice([0.1, 0.2, 0.3], p=[0.05, 0.3, 0.65])
prob = random.uniform(0.96, 1.0)
mask = np.random.choice(
2, im_arr.shape, p=[1 - prob, prob]).astype('uint8')
im_arr[mask > 0] = erode[mask > 0]
rotated_img = Image.fromarray(im_arr)
if debug:
rotated_img.convert('L').save(
os.path.join(out_dir,
image_name.replace(".jpg", "_3.jpg")))
random_pixel_discard = decision(0.2)
if random_pixel_discard:
# random pixel discard
# print("lol")
im_arr = np.array(rotated_img)
# prob = np.random.choice([0.1, 0.15, 0.25], p=[0.6, 0.3, 0.1])
prob = random.uniform(0.95, 1.0)
mask = np.random.choice(
2, im_arr.shape, p=[1 - prob, prob]).astype('uint8')
im_arr[mask == 0] = 255
# im_arr = np.clip(im_arr, 0, 255).astype('uint8')
rotated_img = Image.fromarray(im_arr)
# seq = ia.augmenters.Sequential([ia.augmenters.Dropout(random.uniform(0,0.05))])
# rotated_img = Image.fromarray(seq.augment_image(np.array(rotated_img)))
if debug:
rotated_img.convert('L').save(
os.path.join(out_dir, image_name.replace(".jpg", "_4.jpg")))
######################################
# Apply geometry distortion to image #
######################################
distorsion_type = np.random.choice(
4, 1, p=[0.75, 0.15, 0.05, 0.05])[0]
if distorsion_type == 0:
distorted_img = rotated_img # Mind = blown
elif distorsion_type == 1:
distorted_img = DistorsionGenerator.sin(
rotated_img,
vertical=(distorsion_orientation ==
0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation ==
1 or distorsion_orientation == 2),
max_offset=2
)
elif distorsion_type == 2:
distorted_img = DistorsionGenerator.cos(
rotated_img,
vertical=(distorsion_orientation ==
0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation ==
1 or distorsion_orientation == 2),
max_offset=2
)
elif not random_pixel_discard and distorsion_type == 3:
distorted_img = DistorsionGenerator.random(
rotated_img,
vertical=(distorsion_orientation ==
0 or distorsion_orientation == 2),
horizontal=(distorsion_orientation ==
1 or distorsion_orientation == 2)
)
else:
distorted_img = DistorsionGenerator.cos(
rotated_img,
vertical=(
distorsion_orientation == 0 or distorsion_orientation == 2),
horizontal=(
distorsion_orientation == 1 or distorsion_orientation == 2),
max_offset=2
)
new_text_width, new_text_height = distorted_img.size
if debug:
distorted_img.convert('L').save(
os.path.join(out_dir, image_name.replace(".jpg", "_2.jpg")))
affine_type = np.random.choice(4, 1, p=[0.1, 0.05, 0, 0.85])[0]
if not random_pixel_discard or (random_pixel_discard is True and prob > 0.98):
if affine_type == 0 and distorted_img.size[1] > 40 and distorsion_type == 0:
distorted_img = ElasticDistortionGenerator.afffine_transform(
distorted_img)
if debug:
distorted_img.convert('L').save(os.path.join(out_dir,
image_name.replace(
".jpg",
"_1_1.jpg")))
elif affine_type == 1:
distorted_img = ElasticDistortionGenerator.elastic_transform(
distorted_img)
if debug:
distorted_img.convert('L').save(os.path.join(out_dir,
image_name.replace(
".jpg",
"_1_2.jpg")))
# elif affine_type == 2:
# distorted_img = ElasticDistortionGenerator.perspective_transform(distorted_img)
# distorted_img.convert('L').save(os.path.join(out_dir,
# image_name.replace(
# ".jpg",
# "_1_3.jpg")))
if np.min(np.array(distorted_img)) > 250:
print(index, "2 wtf. why!!!",
affine_type, random_pixel_discard)
x = random.randint(-3, 3)
y = random.randint(1, 3)
if debug:
distorted_img.convert('L').save(os.path.join(
out_dir, image_name.replace(".jpg", "_1.jpg")))
#############################
# Generate background image #
#############################
background_type = np.random.choice(
4, 1, p=[0.1, 0.3, 0.02, 0.58])[0]
if background_type == 0:
background = BackgroundGenerator.gaussian_noise(
new_text_height + x, new_text_width + y)
elif background_type == 1:
background = BackgroundGenerator.plain_white(
new_text_height + x, new_text_width + y)
elif background_type == 2 and random_erode_pixel is False and random_pixel_discard is False:
background = BackgroundGenerator.quasicrystal(
new_text_height + x, new_text_width + y)
elif random_erode_pixel is False and random_pixel_discard is False and distorsion_type != 3:
background = BackgroundGenerator.picture(
new_text_height + x, new_text_width + y)
else:
background = BackgroundGenerator.gaussian_noise(
new_text_height + x, new_text_width + y)
distorted_img = distorted_img.convert('L')
mask = distorted_img.point(
lambda x: 0 if x == 255 or x == 0 else 255, '1')
apply_background = False
if (random.randint(0, 10) < 4):
background = distorted_img
else:
apply_background = True
background.paste(distorted_img, (5, 5), mask=mask)
##################################
# Resize image to desired format #
##################################
# new_width = float(new_text_width + y) * \
# (float(height) / float(new_text_height + x))
# image_on_background = background.resize((int(new_width), height), Image.ANTIALIAS)
# if distorsion_type != 3 and background_type != 2 and new_text_height > 45:
# final_image = background.filter(
# ImageFilter.GaussianBlur(
# radius=(blur if not random_blur else random.randint(0, blur))
# )
# )
# else:
##################################
# Random motion blur #
##################################
final_image = background.convert('L')
if debug:
final_image.save(
os.path.join(out_dir, image_name.replace(".jpg", "_0.jpg")))
# final_image = Image.fromarray(nick_binarize([np.array(final_image)])[0])
# random binary if background is white
# if blur_type in [1, 2] and background_type in [0, 1] and decision(0.6) and distorsion_type != 3:
# bin_thres = 0.3 if blur_type == 2 else 0.03
# binary_im = Image.fromarray(sauvola_bin(final_image, thres=bin_thres))
# if np.mean(binary_im) > 160:
# final_image = binary_im
else:
final_image = rotated_img.convert("L")
mask = final_image.point(
lambda x: 0 if x == 255 or x == 0 else 255, '1')
new_text_width, new_text_height = final_image.size
x = random.randint(-3, 3)
y = random.randint(1, 3)
background = BackgroundGenerator.plain_white(
new_text_height + x, new_text_width + y)
apply_background = False
background.paste(final_image, (5, 5), mask=mask)
final_image = background.convert('L')
resize_type = random.choice(
[Image.ANTIALIAS, Image.BILINEAR, Image.LANCZOS])
if decision(0.7):
if (decision(0.5)):
f = random.uniform(
0.7, min(1.4, max_height/final_image.size[1]))
final_image = final_image.resize((int(
final_image.size[0] * f), int(final_image.size[1] * f)),
resize_type)
if decision(0.05):
f = 64.0/final_image.size[1]
final_image = final_image.resize((int(
final_image.size[0] * f), int(final_image.size[1] * f)),
resize_type)
else:
if (random.randint(0, 1) == 0):
f = random.uniform(
0.6, min(1.2, max_height/final_image.size[1]))
final_image = final_image.resize(
(int(final_image.size[0] * f), int(final_image.size[1])), resize_type)
else:
f = random.uniform(
0.85, min(1.1, max_height/final_image.size[1]))
final_image = final_image.resize(
(int(final_image.size[0]), int(final_image.size[1] * f)), resize_type)
# blur distortion
if blur:
blur_type = np.random.choice(
5, 1, p=[0.15, 0.2, 0.25, 0.2, 0.2])[0]
else:
blur_type = -1
if decision(0.8) and distorsion_type != 2:
if blur_type == 0:
final_image = pyblur.LinearMotionBlur_random(final_image)
if debug:
final_image.save(
os.path.join(out_dir,
image_name.replace(".jpg",
"_0_0.jpg")))
elif blur_type == 1:
final_image = pyblur.GaussianBlur_random(final_image)
if debug:
final_image.save(
os.path.join(out_dir,
image_name.replace(".jpg",
"_0_1.jpg")))
elif blur_type == 2:
kernel = np.ones((5, 5), np.float32) / \
random.randint(30, 50)
final_image = Image.fromarray(
cv2.filter2D(np.array(final_image), -1,
kernel))
if debug:
final_image.save(
os.path.join(out_dir,
image_name.replace(".jpg",
"_0_2.jpg")))
elif blur_type == 3:
final_image = Image.fromarray(
cv2.blur(np.array(final_image), (5, 5)))
if debug:
final_image.save(
os.path.join(out_dir,
image_name.replace(".jpg",
"_0_3.jpg")))
elif blur_type == 4 and final_image.size[0] > 40 and apply_background is not True:
final_image = pyblur.PsfBlur_random(final_image)
if debug:
final_image.save(
os.path.join(out_dir,
image_name.replace(".jpg",
"_0_4.jpg")))
# additional sharpening
if decision(0.1) and blur_type != 4:
final_image = final_image.filter(ImageFilter.EDGE_ENHANCE)
if debug:
final_image.save(
os.path.join(out_dir,
image_name.replace(".jpg",
"_0_2.jpg")))
seq = ia.augmenters.Sequential(ia.augmenters.OneOf([
ia.augmenters.Affine(
shear=(-36, 36),
order=[0, 1],
cval=0,
mode=ia.ALL),
]))
final_image = Image.fromarray(
seq.augment_image(np.array(final_image)))
# random invert
inverted = False
if blur_type != 4:
if decision(0.3):
if (
background_type == 3 | distorsion_type | blur_type != 0):
if (decision(0.1)):
im_arr = np.array(final_image)
im_arr = np.bitwise_not(im_arr)
final_image = Image.fromarray(im_arr)
inverted = True
else:
im_arr = np.array(final_image)
im_arr = np.bitwise_not(im_arr)
final_image = Image.fromarray(im_arr)
inverted = True
if decision(0.1):
if inverted:
seq = ia.augmenters.Sequential(
[ia.augmenters.Salt(random.uniform(0.02, 0.05))])
final_image = Image.fromarray(
seq.augment_image(np.array(final_image)))
else:
seq = ia.augmenters.Sequential(
[ia.augmenters.Pepper(random.uniform(0.02, 0.05))])
final_image = Image.fromarray(
seq.augment_image(np.array(final_image)))
# Random crop
if random_crop and decision(0.1):
final_image = np.array(final_image)
final_image = final_image[random.randint(10,20):,:]
final_image = Image.fromarray(final_image)
# Save the image
final_image.convert('L').save(os.path.join(out_dir, image_name))
except Exception as ex:
print(ex)
|
"""Create a factory pattern using classmethods."""
class User:
def __init__(self, first_name: str, last_name: str):
self._first_name = first_name
self._last_name = last_name
def __str__(self) -> str:
return " ".join([self._first_name, self._last_name])
@property
def first_name(self):
return self._first_name
@property
def last_name(self):
return self._last_name
@classmethod
def create_using_string(cls, name_str):
first_name, last_name = name_str.split(" ")
user = User(first_name=first_name, last_name=last_name)
return user
@classmethod
def create_using_json(cls, name_json):
"""Parse JSON, create user object, then return it."""
pass
@classmethod
def create_using_obj(cls, user_obj):
if isinstance(user_obj, User):
first_name = user_obj.first_name
last_name = user_obj.last_name
user = User(first_name=first_name, last_name=last_name)
return user
else:
raise ValueError("user_obj is not of type User")
user_1 = User.create_using_string("John Smith")
print(user_1) |
"""Common configuration parameters for test cases"""
URL = "nats://127.0.0.1:4222"
CREDENTIALS = ""
CLUSTER_ID = "py-msgp-cluster"
CLIENT_ID = "test-client"
TEST_TOPIC = "test-topic-name"
TEST_PAYLOAD = b"test payload..."
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://stackoverflow.com/a/24352388/5909792
import win32gui
import win32ui
from ctypes import windll
from PIL import Image
hwnd = win32gui.FindWindow(None, 'Telegram')
# Change the line below depending on whether you want the whole window
# or just the client area.
#left, top, right, bot = win32gui.GetClientRect(hwnd)
left, top, right, bot = win32gui.GetWindowRect(hwnd)
w = right - left
h = bot - top
hwndDC = win32gui.GetWindowDC(hwnd)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
# Change the line below depending on whether you want the whole window
# or just the client area.
# result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 1)
result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 0)
print(result)
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
im = Image.frombuffer(
'RGB',
(bmpinfo['bmWidth'], bmpinfo['bmHeight']),
bmpstr, 'raw', 'BGRX', 0, 1)
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hwnd, hwndDC)
if result == 1:
#PrintWindow Succeeded
im.save("test.png")
|
import numpy as np
class Aggregator:
def __init__(self):
self.union_set = np.array([])
self.aggregated_non_embeddings = {}
self.aggregated_i_embeddings = {}
self.aggregated_item_freq = {}
self.aggregated_n_records = 0
def receive_item_set(self, item_set):
# Add new item set to the existing union set
self.union_set = np.concatenate([self.union_set, item_set], axis=0)
return
def compose_union_set(self):
union_set = np.unique(self.union_set).copy()
# Clear the memory of self.union_set for the use of the next communication...
self.union_set = np.array([])
return union_set
def aggregate_param_data(self, non_embedding_data, i_embedding_data):
for l, data in non_embedding_data.items():
if l not in self.aggregated_non_embeddings:
self.aggregated_non_embeddings[l] = data
else:
self.aggregated_non_embeddings[l]['weight'] += data['weight']
self.aggregated_non_embeddings[l]['bias'] += data['bias']
for i_id, data in i_embedding_data.items():
if i_id not in self.aggregated_i_embeddings:
self.aggregated_i_embeddings[i_id] = data
else:
self.aggregated_i_embeddings[i_id] += data
return
def aggregate_item_freq_and_n_records(self, item_freq, n_records):
for i_id, count in item_freq.items():
if i_id not in self.aggregated_item_freq:
self.aggregated_item_freq[i_id] = count
else:
self.aggregated_item_freq[i_id] += count
self.aggregated_n_records += n_records
def get_aggregated_data(self):
aggregated_non_embeddings = self.aggregated_non_embeddings
aggregated_i_embeddings = self.aggregated_i_embeddings
# Reset the memory after outputting the aggregated result
self.aggregated_non_embeddings = {}
self.aggregated_i_embeddings = {}
return aggregated_non_embeddings, aggregated_i_embeddings
def get_item_freq_and_n_records(self):
aggregated_item_freq = self.aggregated_item_freq
aggregated_n_records = self.aggregated_n_records
# Reset the memory after outputting the aggregated result
self.aggregated_item_freq = {}
self.aggregated_n_records = 0
return aggregated_item_freq, aggregated_n_records |
'''
Experiment to create a simple lookup cache
Author: Dave Cuthbert
Copyright: 2016
License: MIT
'''
def func_fact(x):
"""
>>> func_fact(0)
0
>>> func_fact(1)
1
>>> func_fact(4)
24
>>> func_fact(10)
3628800
>>> func_fact(20)
2432902008176640000L
"""
if x == 0:
return 0
elif x == 1:
return 1
else:
return x * func_fact(x-1)
def func_f_cache(x):
"""
>>> func_f_cache(0)
0
>>> func_f_cache(1)
1
>>> func_f_cache(4)
24
>>> func_fact(10)
3628800
>>> func_fact(20)
2432902008176640000L
"""
if x in f_cache:
return f_cache[x]
else:
f_cache[x] = x * func_f_cache(x-1)
return f_cache[x]
#Cache should be accessible between function invocations
f_cache = dict()
f_cache[0] = 0
f_cache[1] = 1
#Profile running code to see if the cache makes any difference
#Code runs too fast to see much, the extra runs magnify differences
import cProfile
cProfile.run('for i in range(100): func_fact(750)')
cProfile.run('for i in range(100): func_f_cache(750)')
if "__main__" == __name__:
import doctest
doctest.testmod()
'''
RESULTS
NOTE: Results were the same w/ and w/o running doctest (so no precaching)
75003 function calls (103 primitive calls) in 0.053 seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.001 0.001 0.053 0.053 <string>:1(<module>)
75000/100 0.052 0.000 0.052 0.001 factorial_cache.py:1(func_fact)
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
1 0.000 0.000 0.000 0.000 {range}
852 function calls (103 primitive calls) in 0.001 seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 0.001 0.001 <string>:1(<module>)
849/100 0.001 0.000 0.001 0.000 factorial_cache.py:28(func_f_cache)
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
1 0.000 0.000 0.000 0.000 {range}
'''
|
from typing import List
from ...excepcions import MeteocatLocalError
from ...helpers.utils import formateja_valors_data, neteja_diccionari, genera_info
class Pirineu:
def pirineu_pics_prediccio(self, slug_pic:str, any:int, mes:int, dia:int) -> List[dict]:
"""
Retorna la predicció pel pic i el dia indicats.
Args:
slug_pic (str): Identificador del pic.
any (int): Any de consulta en format numèric YYYY.
mes (int): Mes de consulta en format numèric MM.
dia (int): Dia de consulta en format numèric DD.
Returns:
List[dict]: [
{
"data": "2017-04-20T00:00Z",
"cotes": [
{
"cota": "totes",
"variables": [
{
"nom": "isozero",
"valor": 1800
},
{
"nom": "iso-10",
"valor": 3700
}
]
},
{
"cota": "1500",
"variables": [
{
"nom": "humitat",
"valor": 35
},
{
"nom": "temperatura",
"valor": 1
},
{
"nom": "direccio vent",
"valor": 356
},
{
"nom": "velocitat vent",
"valor": 43
}
]
},
...
{
"cota": "3000",
"variables": [ ... ]
}
]
},
{
"data": "2017-04-20T03:00Z",
"cotes": [ ... ]
}
]
"""
any, mes, dia = formateja_valors_data(any, mes, dia)
recurs = f"pirineu/pics/{slug_pic}/{any}/{mes}/{dia}"
return self._aconsegueix(recurs)
def pirineu_pics_metadades(self) -> List[dict]:
"""
Retorna les metadades dels pics del Pirineu.
Returns:
List[dict]: [
{
"codi": "5a69e26b",
"descripcio": "Pic de Montlude",
"coordenades": {
"latitud": 42.785239999990075,
"longitud": 0.7587399999918127
},
"slug": "pic-de-montlude",
"tipus": "Pics"
},
...
{
"codi": "c82a286f",
"descripcio": "Pics de Bassiero",
"coordenades": {
"latitud": 42.60669999999494,
"longitud": 0.9953999999953725
},
"slug": "pics-de-bassiero",
"tipus": "Pics"
}
]
"""
recurs = "pirineu/pics/metadades"
return self._aconsegueix(recurs)
def pirineu_refugis_prediccio(self, slug_refugi:str, any:int, mes:int, dia:int ) -> List[dict]:
"""
Retorna la predicció pel refugi i el dia indicats.
Args:
slug_refugi (str): Identificador del refugi.
any (int): Any de consulta en format numèric YYYY.
mes (int): Mes de consulta en format numèric MM.
dia (int): Dia de consulta en format numèric DD.
Returns:
List[dict]: [
{
"data": "2017-04-20T00:00Z",
"cotes": [
{
"cota": "totes",
"variables": [
{
"nom": "isozero",
"valor": 1800
},
{
"nom": "iso-10",
"valor": 3700
}
]
},
{
"cota": "1500",
"variables": [
{
"nom": "humitat",
"valor": 35
},
{
"nom": "temperatura",
"valor": 1
},
{
"nom": "direccio vent",
"valor": 356
},
{
"nom": "velocitat vent",
"valor": 43
}
]
},
...
{
"cota": "3000",
"variables": [ ... ]
}
]
},
{
"data": "2017-04-20T03:00Z",
"cotes": [ ... ]
}
]
"""
any, mes, dia = formateja_valors_data(any, mes, dia)
recurs = f"pirineu/refugis/{slug_refugi}/{any}/{mes}/{dia}"
return self._aconsegueix(recurs)
def pirineu_refugis_metadades(self)-> List[dict]:
"""
Retorna les metadades dels refugis del Pirineu.
Returns:
List[dict]: [
{
"codi": "9cc1a507",
"descripcio": "Refugi Colomina",
"coordenades": {
"latitud": 42.51942999999505,
"longitud": 1.0012399999954205
},
"slug": "refugi-colomina",
"tipus": "Refugis"
},
...
{
"codi": "64710fe2",
"descripcio": "Refugi Gall Fer (Bosc de Virós)",
"coordenades": {
"latitud": 42.52749999999814,
"longitud": 1.3027779999980433
},
"slug": "refugi-gall-fer-bosc-de-viros",
"tipus": "Refugis"
}
]
"""
recurs = f"pirineu/refugis/metadades"
return self._aconsegueix(recurs)
def pirineu_zones_prediccio(self, any:int, mes:int, dia:int) -> dict:
"""
Retorna la predicció per zones del Pirineu el dia indicat.
Args:
any (int): Any de consulta en format numèric YYYY.
mes (int): Mes de consulta en format numèric MM.
dia (int): Dia de consulta en format numèric DD.
Returns:
dict: {
"dataPrediccio": "2017-04-19Z",
"dataPublicacio": "2017-04-17T13:57Z",
"franjes": [
{
"idTipusFranja": 2,
"nom": "06:00 - 12:00h",
"zones": [
{
"variablesValors": [
{
"nom": "comentari",
"periode": 2
},
{
"nom": "intensitat",
"periode": 1
},
{
"nom": "tempesta",
"valor": "1",
"periode": 1
},
{
"nom": "acumulacioNeu",
"periode": 2
},
{
"nom": "acumulacio",
"periode": 2
},
{
"nom": "visibilitat",
"valor": "2",
"periode": 1
},
{
"nom": "cota",
"periode": 1
},
{
"nom": "cel",
"valor": "2",
"periode": 1
},
{
"nom": "probabilitat",
"valor": "1",
"periode": 1
}
],
"nom": "Pirineu oriental",
"idZona": 4
},
...
{
"variablesValors": [ ... ],
"nom": "Vessant nord Pirineu orie",
"idZona": 3
}
]
},
...
{
"idTipusFranja": 1,
"nom": "00:00h - 06:00h",
"zones": [ ... ]
}
]
}
"""
any, mes, dia = formateja_valors_data(any, mes, dia)
recurs = f"pirineu/{any}/{mes}/{dia}"
return self._aconsegueix(recurs) |
from __future__ import absolute_import, unicode_literals
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
import collections as collections_abc # noqa
import ConfigParser as configparser # noqa
import Queue as queue # noqa
import thread # noqa
def fake_python3_urllib_module():
import types
import urllib as py2_urllib
import urlparse as py2_urlparse
urllib = types.ModuleType(b'urllib') # noqa
urllib.parse = types.ModuleType(b'urlib.parse')
urllib.parse.quote = py2_urllib.quote
urllib.parse.unquote = py2_urllib.unquote
urllib.parse.urljoin = py2_urlparse.urljoin
urllib.parse.urlparse = py2_urlparse.urlparse
urllib.parse.urlsplit = py2_urlparse.urlsplit
urllib.parse.urlunsplit = py2_urlparse.urlunsplit
return urllib
urllib = fake_python3_urllib_module()
integer_types = (int, long) # noqa
string_types = basestring # noqa
text_type = unicode # noqa
input = raw_input # noqa
intern = intern # noqa
def itervalues(dct, **kwargs):
return iter(dct.itervalues(**kwargs))
from inspect import getargspec # noqa
from itertools import izip_longest as zip_longest # noqa
else:
import collections.abc as collections_abc # noqa
import configparser # noqa
import queue # noqa
import _thread as thread # noqa
import urllib # noqa
integer_types = (int,)
string_types = (str,)
text_type = str
input = input
intern = sys.intern
def itervalues(dct, **kwargs):
return iter(dct.values(**kwargs))
from itertools import zip_longest # noqa
import inspect # noqa
def getargspec(func):
spec = inspect.getfullargspec(func)
return inspect.ArgSpec(
spec.args, spec.varargs, spec.varkw, spec.defaults)
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
|
#abc, bac, cab,, acb,bca, cba,
# Time Complexity: O(n*n!) Note that there are n! permutations and it requires O(n) time to print a a permutation.
def main(s,l,r):
if l == r:
print(''.join(s))
else:
for i in range(l,r+1):
s[i],s[l] = s[l],s[i]
main(s,l+1,r)
#backtrack
s[i],s[l] = s[l],s[i]
if __name__ == "__main__":
s= 'abc'
l = 0
r = len(s)-1
s = list(s)
print(main(s,l,r)) |
'''
Created on Aug 2, 2018
@:
'''
import json
MAX_WEIGHT_HEADER = "max_weight"
SAVE_MODIFIED_ACTIONS_FILE_HEADER = "save_modified_actions_file"
MATCH_NUM_SAMPLES_HEADER = "match_num_samples"
SMOOTHING_ADDER_HEADER = "smoothing_adder"
NUM_EXPECTATION_SAMPLES_HEADER = "num_expectation_samples"
NUM_PERMUTATIONS_HEADER = "num_permutations"
BINARY_REWARDS_HEADER = "use_binary_rewards"
PRINT_ALL_STATS_FILE_HEADER = "print_all_stats"
NUM_STEP_SIZES_HEADER = "num_step_sizes"
SAMPLING_METHOD_HEADER = "sampling_type"
CALC_BANDITS_HEADER = "calculate_bandits"
MAKE_SUMMARY_PICKLES_HEADER = "make_summary_pickles"
MAKE_IPW_FILES_HEADER = "make_ipw_files"
RUN_PERMUTATION_TESTS_HEADER = "run_permutation_tests"
MAKE_SUMMARY_FILES_HEADER = "make_summary_files"
FORCE_ACTIONS_HEADER = "force_actions"
SMOOTHING_ADDER = 1;
MAX_WEIGHT = float("inf")
defaults_dictionary = {MAX_WEIGHT_HEADER: MAX_WEIGHT,
SAVE_MODIFIED_ACTIONS_FILE_HEADER: False,
MATCH_NUM_SAMPLES_HEADER: True,
SMOOTHING_ADDER_HEADER: SMOOTHING_ADDER,
NUM_EXPECTATION_SAMPLES_HEADER: 0,
NUM_PERMUTATIONS_HEADER: 5}
def get_json_arguments(configuration_file, add_defaults = True):
config = {}
if configuration_file.endswith("json"):
with open(configuration_file) as json_file:
config = json.load(json_file)
if add_defaults:
apply_defaults(config)
return config
def apply_defaults(config):
for param_key in defaults_dictionary:
if param_key not in config:
config[param_key] = defaults_dictionary[param_key]
|
expected_output = {
"interface": {
"GigabitEthernet1": {
"group": {
100: {
"advertise_interval_secs": 1.0,
"auth_text": "hash",
"description": "DC-LAN Subnet",
"master_advertisement_interval_secs": 1.0,
"master_down_interval_secs": 3.531,
"master_router": "local",
"master_router_ip": "192.168.1.233",
"master_router_priority": 120,
"preemption": "enabled",
"priority": 120,
"state": "Master",
"virtual_ip_address": "192.168.10.1",
"virtual_mac_address": "0000.5e00.0164",
"vrrs_name": {
"DC_LAN": {
"track_object":
{1:
{"decrement": 30,
"state": "Up"}}
}
},
}
}
}
}
}
|
"""
===========================================================
Getting Started with ``TrajectoryGroup`` and ``Trajectory``
===========================================================
The basic PySPLIT workflow consists of cycling through ``TrajectoryGroup``
containers of ``Trajectory`` objects and acting on each ``Trajectory``,
refining the ``TrajectoryGroup`` or creating new ones as necessary.
Initializing the first ``TrajectoryGroup``
------------------------------------------
The first ``TrajectoryGroup`` requires ``Trajectory`` objects to be
initialized from trajectory files. Here we initialize all of the trajectories
created in ``bulk_trajgen_example.py``.
"""
from __future__ import print_function
import pysplit
trajgroup = pysplit.make_trajectorygroup(r'C:/trajectories/colgate/*')
"""
Workflow
--------
Cycle through the ``TrajectoryGroup`` and act on each trajectory. Below
are some sample geometry calculations.
"""
for traj in trajgroup:
traj.calculate_distance()
traj.calculate_vector()
"""
Let's create a new ``TrajectoryGroup`` with a list of some of the
``Trajectory`` objects in ``tg``. For this example, we'll make
a group consisting only of trajectories with rainfall at timepoint 0.
Note- this will only work as written if rainfall was selected as
an output meteorological variable during trajectory generation. Alternatively,
``Trajectory.set_rainstatus()`` can use humidity variables if those
are available and if the appropriate kwargs are provided.
"""
rainylist = []
for traj in trajgroup:
traj.set_rainstatus()
if traj.rainy:
rainylist.append(traj)
rainy_trajgroup = pysplit.TrajectoryGroup(rainylist)
"""
A new ``TrajectoryGroup`` can also be created by addition or subtraction.
Let's subtract our new ``rainy_trajgroup`` from the original
``TrajectoryGroup``, ``trajgroup``. This yields a new ``TrajectoryGroup``
with only non-rainfall producing trajectories. The number of
member ``Trajectory`` objects can be checked using
``TrajectoryGroup.trajcount``
"""
dry_trajgroup = trajgroup - rainy_trajgroup
print(dry_trajgroup.trajcount)
print(rainy_trajgroup.trajcount)
print(trajgroup.trajcount)
|
import matplotlib
matplotlib.use('Agg', warn=False)
from nose.tools import assert_equal, assert_almost_equal
import numpy as np
from probfit import (describe, rename, Convolve, Normalized,
Extended, AddPdf, AddPdfNorm, BlindFunc)
from probfit.pdf import gaussian, ugaussian
from probfit._libstat import integrate1d
from probfit.decorator import extended, normalized
def test_describe_normal_function():
def f(x, y, z):
return x + y + z
d = describe(f)
assert_equal(list(d), ['x', 'y', 'z'])
def test_Normalized():
f = ugaussian
g = Normalized(f, (-1, 1))
norm = integrate1d(f, (-1., 1.), 1000, (0., 1.))
assert_almost_equal(g(1., 0., 1.), f(1., 0., 1.) / norm)
def test_normalized_decorator():
@normalized((-1, 1))
def f(x, mean, sigma):
return ugaussian(x, mean, sigma)
g = Normalized(ugaussian, (-1, 1))
assert_equal(describe(f), ['x', 'mean', 'sigma'])
assert_almost_equal(g(1, 0, 1), f(1, 0, 1))
def test_Normalized_cache_hit():
def f(x, y, z) : return 1.*(x + y + z)
def g(x, y, z) : return 1.*(x + y + 2 * z)
nf = Normalized(f, (-10., 10.))
ng = Normalized(g, (-10., 10.))
assert_equal(nf.hit, 0)
nf(1., 2., 3.)
ng(1., 2., 3.)
assert_equal(nf.hit, 0)
nf(3., 2., 3.)
assert_equal(nf.hit, 1)
ng(1., 2., 3.)
assert_equal(ng.hit, 1)
def test_add_pdf():
def f(x, y, z): return x + y + z
def g(x, a, b): return 2 * (x + a + b)
def h(x, c, a): return 3 * (x + c + a)
A = AddPdf(f, g, h)
assert_equal(tuple(describe(A)), ('x', 'y', 'z', 'a', 'b', 'c'))
ret = A(1, 2, 3, 4, 5, 6, 7)
expected = f(1, 2, 3) + g(1, 4, 5) + h(1, 6, 4)
assert_almost_equal(ret, expected)
# wrong integral on purpose
f.integrate = lambda bound, nint, y, z : 1. # unbound method works too
g.integrate = lambda bound, nint, a, b : 2.
h.integrate = lambda bound, nint, c, a : 3.
assert_equal(integrate1d(A, (-10., 10.), 100, (1., 2., 3., 4., 5.)), 6.)
def test_add_pdf_factor():
def f(x, y, z): return x + y + z
def g(x, a, b): return 2 * (x + a + b)
def k1(n1, n2): return 3 * (n1 + n2)
def k2(n1, y): return 4 * (n1 + y)
A = AddPdf(f, g, prefix=['f', 'g'], factors=[k1, k2])
assert_equal(tuple(describe(A)), ('x', 'fy', 'fz', 'ga', 'gb', 'fn1', 'fn2', 'gn1', 'gy'))
ret = A(1, 2, 3, 4, 5, 6, 7, 8, 9)
expected = k1(6, 7) * f(1, 2, 3) + k2(8, 9) * g(1, 4, 5)
assert_almost_equal(ret, expected)
parts = A.eval_parts(1, 2, 3, 4, 5, 6, 7, 8, 9)
assert_almost_equal(parts[0], k1(6, 7) * f(1, 2, 3))
assert_almost_equal(parts[1], k2(8, 9) * g(1, 4, 5))
def test_add_pdf_cache():
def f(x, y, z): return x + y + z
def g(x, a, b): return 2 * (x + a + b)
def h(x, c, a): return 3 * (x + c + a)
A = AddPdf(f, g, h)
assert_equal(tuple(describe(A)), ('x', 'y', 'z', 'a', 'b', 'c'))
ret = A(1, 2, 3, 4, 5, 6, 7)
assert_equal(A.hit, 0)
expected = f(1, 2, 3) + g(1, 4, 5) + h(1, 6, 4)
assert_almost_equal(ret, expected)
ret = A(1, 2, 3, 6, 7, 8, 9)
assert_equal(A.hit, 1)
expected = f(1, 2, 3) + g(1, 6, 7) + h(1, 8, 6)
assert_almost_equal(ret, expected)
def test_extended():
def f(x, y, z): return x + 2 * y + 3 * z
g = Extended(f)
assert_equal(tuple(describe(g)), ('x', 'y', 'z', 'N'))
assert_equal(g(1, 2, 3, 4), 4 * (f(1, 2, 3)))
# extended should use analytical when available
def ana_int(x, y): return y * x ** 2
ana_int_int = lambda b, n, y: 999. # wrong on purpose
ana_int.integrate = ana_int_int
g = Extended(ana_int)
assert_almost_equal(g.integrate((0, 1), 100, 5., 2.), 999.*2.)
# and not fail when it's not available
def no_ana_int(x, y): return y * x ** 2
g = Extended(no_ana_int)
assert_almost_equal(g.integrate((0, 1), 100, 5., 2.), (1.**3) / 3.*5.*2.)
def test_extended_decorator():
def f(x, y, z): return x + 2 * y + 3 * z
@extended()
def g(x, y, z):
return x + 2 * y + 3 * z
assert_equal(tuple(describe(g)), ('x', 'y', 'z', 'N'))
assert_equal(g(1, 2, 3, 4), 4 * (f(1, 2, 3)))
def test_addpdfnorm():
def f(x, y, z): return x + 2 * y + 3 * z
def g(x, z, p): return 4 * x + 5 * z + 6 * z
def p(x, y, q): return 7 * x + 8 * y + 9 * q
h = AddPdfNorm(f, g)
assert_equal(describe(h), ['x', 'y', 'z', 'p', 'f_0'])
q = AddPdfNorm(f, g, p)
assert_equal(describe(q), ['x', 'y', 'z', 'p', 'q', 'f_0', 'f_1'])
assert_almost_equal(h(1, 2, 3, 4, 0.1),
0.1 * f(1, 2, 3) + 0.9 * g(1, 3, 4))
assert_almost_equal(q(1, 2, 3, 4, 5, 0.1, 0.2),
0.1 * f(1, 2, 3) + 0.2 * g(1, 3, 4) + 0.7 * p(1, 2, 5))
def test_addpdfnorm_analytical_integrate():
def f(x, y, z): return x + 2 * y + 3 * z
def g(x, z, p): return 4 * x + 5 * z + 6 * z
def p(x, y, q): return 7 * x + 8 * y + 9 * q
f.integrate = lambda bound, nint, y, z: 1.
g.integrate = lambda bound, nint, z, p: 2.
p.integrate = lambda bound, nint, y, q: 3.
q = AddPdfNorm(f, g, p)
assert_equal(describe(q), ['x', 'y', 'z', 'p', 'q', 'f_0', 'f_1'])
integral = integrate1d(q, (-10., 10.), 100, (1., 2., 3., 4., 0.1, 0.2))
assert_almost_equal(integral, 0.1 * 1. + 0.2 * 2. + 0.7 * 3.)
def test_convolution():
f = gaussian
g = lambda x, mu1, sigma1 : gaussian(x, mu1, sigma1)
h = Convolve(f, g, (-10, 10), nbins=10000)
assert_equal(describe(h), ['x', 'mean', 'sigma', 'mu1', 'sigma1'])
assert_almost_equal(h(1, 0, 1, 1, 2), 0.17839457037411527) # center
assert_almost_equal(h(-1, 0, 1, 1, 2), 0.119581456625684) # left
assert_almost_equal(h(0, 0, 1, 1, 2), 0.1614180824489487) # left
assert_almost_equal(h(2, 0, 1, 1, 2), 0.1614180824489487) # right
assert_almost_equal(h(3, 0, 1, 1, 2), 0.119581456625684) # right
def test_rename():
def f(x, y, z):
return None
assert_equal(describe(f), ['x', 'y', 'z'])
g = rename(f, ['x', 'a', 'b'])
assert_equal(describe(g), ['x', 'a', 'b'])
def test_blindfunc():
np.random.seed(0)
f = BlindFunc(gaussian, 'mean', 'abcd', width=1.5, signflip=True)
arg = f.__shift_arg__((1, 1, 1))
totest = [1., -1.1665264284482637, 1.]
assert_almost_equal(arg[0], totest[0])
assert_almost_equal(arg[1], totest[1])
assert_almost_equal(arg[2], totest[2])
assert_almost_equal(f.__call__(0.5, 1., 1.), 0.0995003913596)
np.random.seed(575345)
f = BlindFunc(gaussian, 'mean', 'abcd', width=1.5, signflip=True)
arg = f.__shift_arg__((1, 1, 1))
assert_almost_equal(arg[0], totest[0])
assert_almost_equal(arg[1], totest[1])
assert_almost_equal(arg[2], totest[2])
assert_almost_equal(f.__call__(0.5, 1., 1.), 0.0995003913596)
|
from django.apps import AppConfig
class QuoteConfig(AppConfig):
"""Django App Config for the Quote app."""
name = 'datahub.omis.quote'
label = 'omis-quote' # namespaced app. Use this e.g. when migrating
|
#Given a list of numbers of size n, where n is greater than 3,
# find the maximum and minimum of the list using less than 2 * (n - 1) comparisons.
#Here's a start:
#Analysis
#Exploit the property of N>3
#for first 3 element, we can do 3-4 comparison to find the max and min numbers:
# A compare to B.... if A is larger,
# then we have two cases:
# Case 1:
# if B compare to C , if B is larger, stop comparison. we end up with 3 comparison max A, min C
# if B is smaller, C compare with A to find larger number. we end up with 4 comparison min B, max A or C
# We extend this comparison by N-1 times, starting from first element
# beginning, we compare 1st and 2nd element to find max value and min value
# then, we compare 3rd element
# 1) if 3rd element > max, we update max value and iterate to 4th element
# 2) if 3rd element < max, we compare 3rd element with min value to update min value before iterate to 4th element
# For N element
# IN general,
# In best case, if all element in ascending order, it takes, n-1 comparisons
# in worst case, if all element in decending order, it takes 1 + 2(N-2) = 2N-3 comparison.... still 1 less than 2*(n-1)
def find_min_max(nums):
# Fill this in.
min=0
max=0
l = len(nums)
if nums[0] > nums[1]:
max, min = nums[0], nums[1]
else:
max, min = nums[1], nums[0]
for inx in range(2, l):
if nums[inx] > max:
max = nums[inx]
elif nums[inx] < min:
min = nums[inx]
return (min, max)
if __name__ == "__main__":
print (find_min_max([3, 5, 1, 2, 4, 8]))
# (1, 8) |
from Jumpscale import j
from .healthcheck import HealthCheckRun
descr = """
Clean up ssh deamons and tcp services from migration
"""
class SSHCleanup(HealthCheckRun):
def __init__(self, node, service):
resource = "/nodes/{}".format(node.node_id)
super().__init__("ssh-cleanup", "SSH Cleanup", "System Cleanup", resource)
self.node = node
def run(self):
status = "OK"
text = "Migration Cleanup Succesful"
finished = []
try:
for job in self.service.aysrepo.jobsList():
job_dict = job.to_dict()
if job_dict["actionName"] == "processChange" and job_dict["actorName"] == "vm":
if job_dict["state"] == "running":
continue
vm = self.service.aysrepo.serviceGet(instance=job_dict["serviceName"], role=job_dict["actorName"])
tcp_services = vm.producers.get("tcp", [])
for tcp_service in tcp_services:
if "migrationtcp" not in tcp_service.name:
continue
tcp_service.executeAction("drop", context=self.job.context)
tcp_service.delete()
finished.append("ssh.config_%s" % vm.name)
for proc in self.node.client.process.list():
for partial in finished:
if partial not in proc["cmdline"]:
continue
config_file = proc["cmdline"].split()[-1]
self.node.client.process.kill(proc["pid"])
if self.node.client.filesystem.exists("/tmp"):
self.node.client.filesystem.remove(config_file)
except Exception as e:
text = "Error happened, Can not clean ssh process "
status = "ERROR"
self.add_message(self.id, status, text)
|
"""
Logic condition tests
"""
import pytest
from marshmallow import ValidationError
from py_abac.context import EvaluationContext
from py_abac.policy.conditions.logic import AllOf
from py_abac.policy.conditions.logic import AnyOf
from py_abac.policy.conditions.logic import Not
from py_abac.policy.conditions.numeric import Gt, Lt
from py_abac.policy.conditions.schema import ConditionSchema
from py_abac.request import AccessRequest
class TestLogicCondition(object):
@pytest.mark.parametrize("condition, condition_json", [
(AllOf([Gt(0.0), Lt(1.0)]),
{"condition": "AllOf", "values": [
{"condition": "Gt", "value": 0.0},
{"condition": "Lt", "value": 1.0}
]}),
(AnyOf([Gt(0.0), Lt(1.0)]),
{"condition": "AnyOf", "values": [
{"condition": "Gt", "value": 0.0},
{"condition": "Lt", "value": 1.0}
]}),
(Not(Gt(1.0)),
{"condition": "Not", "value": {
"condition": "Gt", "value": 1.0
}}),
])
def test_to_json(self, condition, condition_json):
assert ConditionSchema().dump(condition) == condition_json
def test_from_json_and(self):
condition = AllOf([Gt(0.0), Lt(1.0)])
condition_json = {
"condition": "AllOf", "values": [
{"condition": "Gt", "value": 0.0},
{"condition": "Lt", "value": 1.0}
]}
new_condition = ConditionSchema().load(condition_json)
assert isinstance(new_condition, AllOf)
assert len(condition.values) == len(new_condition.values)
assert isinstance(new_condition.values[0], condition.values[0].__class__)
assert new_condition.values[0].value == condition.values[0].value
assert isinstance(new_condition.values[1], condition.values[1].__class__)
assert new_condition.values[1].value == condition.values[1].value
def test_from_json_or(self):
condition = AnyOf([Gt(0.0), Lt(1.0)])
condition_json = {
"condition": "AnyOf", "values": [
{"condition": "Gt", "value": 0.0},
{"condition": "Lt", "value": 1.0}
]}
new_condition = ConditionSchema().load(condition_json)
assert isinstance(new_condition, AnyOf)
assert len(condition.values) == len(new_condition.values)
assert isinstance(new_condition.values[0], condition.values[0].__class__)
assert new_condition.values[0].value == condition.values[0].value
assert isinstance(new_condition.values[1], condition.values[1].__class__)
assert new_condition.values[1].value == condition.values[1].value
def test_from_json_not(self):
condition = Not(Gt(1.0))
condition_json = {
"condition": "Not", "value": {
"condition": "Gt", "value": 1.0
}}
new_condition = ConditionSchema().load(condition_json)
assert isinstance(new_condition, Not)
assert isinstance(new_condition.value, condition.value.__class__)
assert new_condition.value.value == condition.value.value
@pytest.mark.parametrize("condition_json", [
{"condition": "AllOf", "values": []},
{"condition": "AllOf", "values": None},
{"condition": "AllOf", "values": [None]},
{"condition": "AnyOf", "values": []},
{"condition": "AnyOf", "values": None},
{"condition": "AnyOf", "values": [None]},
{"condition": "Not", "values": 1.0},
])
def test_create_error(self, condition_json):
with pytest.raises(ValidationError):
ConditionSchema().load(condition_json)
@pytest.mark.parametrize("condition, what, result", [
(AllOf([Gt(0.0), Lt(1.0)]), -1.5, False),
(AllOf([Gt(0.0), Lt(1.0)]), 0.5, True),
(AllOf([Gt(0.0), Lt(1.0)]), 1.5, False),
(AnyOf([Gt(1.0), Lt(0.0)]), -1.5, True),
(AnyOf([Gt(1.0), Lt(0.0)]), 0.5, False),
(AnyOf([Gt(1.0), Lt(0.0)]), 1.5, True),
(Not(Gt(1.0)), 0.5, True),
(Not(Gt(1.0)), 1.5, False),
])
def test_is_satisfied(self, condition, what, result):
request = AccessRequest(subject={"attributes": {"what": what}}, resource={}, action={}, context={})
ctx = EvaluationContext(request)
ctx.ace = "subject"
ctx.attribute_path = "$.what"
assert condition.is_satisfied(ctx) == result
|
"""
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
"""
import unittest
from graph_notebook.network.gremlin.GremlinNetwork import parse_pattern_list_str
from graph_notebook.network.gremlin.GremlinNetwork import PathPattern
class TestPatternListParser(unittest.TestCase):
def test_parse_v_e_v(self):
pattern_str = " v ,e ,v "
expected = [PathPattern.V, PathPattern.E, PathPattern.V]
pattern = parse_pattern_list_str(pattern_str)
self.assertEqual(expected, pattern)
|
# -*- coding: utf-8
"""
@File : t_normaldatabased.py
@Author : Zitong Lu
@Contact : zitonglu1996@gmail.com
@License : MIT License
"""
import numpy as np
import unittest
from pyctrsa.ctsimilarity.normaldatabased import ctsimilarities_cal
class test_normaldatabased(unittest.TestCase):
def test_ctsimilarities_cal(self):
data1 = np.random.rand(8, 16, 20)
data2 = np.random.rand(8, 16, 20)
CTSimilarities = ctsimilarities_cal(data1=data1, data2=data2, sub_opt=1, chl_opt=1)
self.assertEqual(CTSimilarities.shape[0], 8)
self.assertEqual(len(CTSimilarities.shape), 5)
CTSimilarities = ctsimilarities_cal(data1=data1, data2=data2, sub_opt=1, chl_opt=0)
self.assertEqual(CTSimilarities.shape[0], 8)
self.assertEqual(len(CTSimilarities.shape), 4)
CTSimilarities = ctsimilarities_cal(data1=data1, data2=data2, sub_opt=0, chl_opt=1)
self.assertEqual(CTSimilarities.shape[0], 16)
self.assertEqual(len(CTSimilarities.shape), 4)
CTSimilarities = ctsimilarities_cal(data1=data1, data2=data2, sub_opt=0, chl_opt=0)
self.assertEqual(CTSimilarities.shape[0], 3)
self.assertEqual(len(CTSimilarities.shape), 3) |
import pandas as pd
import quandl
import pandas_datareader as pdr # Use to import data from the web
import datetime # use for start and end dates
import matplotlib.pyplot as plt
def importExcel(object_list):
for x in object_list:
raw_data = pd.read_excel(x)
return raw_data
def makeStock(ticker, start, end):
return pdr.get_data_google(ticker, datetime.datetime(start), datetime.datetime(end))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import sys
import re
import urllib.parse
import logging
import logging.handlers
import os
import time
import codecs
import string
from bs4 import BeautifulSoup
import tldextract
try:
from os import scandir, walk
except ImportError:
from scandir import scandir, walk
from tqdm import tqdm
import validators
import grequests
from tld import get_tld
from tld.utils import update_tld_names
import requests
# update_tld_names() https://stackoverflow.com/a/22228140
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# https://github.com/tqdm/tqdm/issues/481
tqdm.monitor_interval = 0
# RegEx that is used to filter searches for URLs on any given page.
# Used in is_relevant_link_from_soup and is_relevant_link_from_html functions
filter_regex = re.compile(".*([Pp]rogram|[Aa]dmission|[Cc]ertificate|[Dd]egree|[Dd]iploma|[Ff]aculty|[Ss]chool|[Dd]epartment|[Uu]ndergrad|[Gr]rad|[Ss]chool).*")
filter_title_regex = re.compile(".*([Pp]rogram|[Aa]dmission|[Cc]ourse).*")
def main():
current_working_dir = os.getcwd() # current directory we are standing on
websites_list = get_file_content_as_list(websites_file)
overall_prog = tqdm(total=len(websites_list), unit="website", desc="Overall")
for idx, website in enumerate(websites_list):
planned_urls_array = []
crawled_urls_array = []
# Extracts the top level domain from the URL (eg. ualberta.ca, no slashes)
seed = tldextract.extract(website).domain
pbar = {}
pbar[idx] = tqdm(total=max_pages, unit="page", desc=website, ascii=True)
if validators.url(website):
batch_website = "{}_{}".format(batch_name, get_tld(website))
if not os.path.exists(batch_website):
os.mkdir(batch_website)
with ChDir(batch_website):
setup_crawler_files()
start_page = 1
else:
with ChDir(batch_website):
start_page = get_start_page()
handler = setup_rotating_log(batch_website, seed)
with ChDir(batch_website):
crawl(seed, pbar[idx], start_page, planned_urls_array, crawled_urls_array, website, max_pages)
overall_prog.update(1)
handler.close()
logger.removeHandler(handler)
def setup_rotating_log(batch_website, seed):
with ChDir(batch_website):
# current time, used in the names of the folder and the logging file
curtime = time.strftime("%Y-%m-%d-%H-%M-%S", time.gmtime())
logs_dir = "logs"
if not os.path.exists(logs_dir):
os.mkdir(logs_dir)
log_file_name = '_uniscraperlog_{}_{}.log'.format(seed, curtime)
path_to_log_file = os.path.join(logs_dir, log_file_name)
# add a rotating logfile handler
handler = logging.handlers.RotatingFileHandler(
path_to_log_file,
maxBytes=2097152, # 2 MB
backupCount=100
)
logger.addHandler(handler)
return handler
def crawl(seed, prog_upd, start_page, planned_urls_array, crawled_urls_array, website, max_pages):
"""Function that takes link, saves the contents to text file call href_split
"""
logger.info("Crawling through domain '" + seed + "'")
tqdm.write("++++++++++Crawling through domain {}+++++++++++".format(seed))
visited_urls, planned_urls, crawled_urls = setup_crawler_files()
if start_page == 1:
# Array that holds the queue to be visited later
planned_urls_array.append(website)
# Logging the urls
planned_urls.write(website)
planned_urls.write("\n")
# Gets the root of the url
url_split = website.split("://", 1)
# Array that holds urls that have been found.
# This is the array that all new URLs are checked against to prevent repeating.
# Record URL with both http and https prefixes
crawled_urls_array.append("http://" + url_split[1])
crawled_urls_array.append("https://" + url_split[1])
# Also log the same into the text file
crawled_urls.write("http://" + url_split[1] + "\n")
crawled_urls.write("https://" + url_split[1] + "\n")
while start_page <= max_pages and len(planned_urls_array) > 0:
start_page = process_current_link(start_page,
prog_upd,
planned_urls_array[0],
seed,
visited_urls,
crawled_urls_array,
crawled_urls,
planned_urls_array,
planned_urls,
max_pages,
)
prog_upd.update(1)
# Deletes the currently looked at URL from the queue
planned_urls_array.pop(0)
def process_current_link(page, prog_upd, link, seed, visited_urls, crawled_urls_array, crawled_urls, planned_urls_array, planned_urls, max_pages):
"""Function that grabs the first link in the
list of planned urls, requests the page and processes it
"""
empty_request_log = codecs.open("_empty_requests.txt", "w", "utf-8")
# Try to get the html of the URL
r = request_url(link, visited_urls)
grab_all = False
if r: # if the request returned an html
html = r.text
current_url = r.url
# Soupify
# For now it soupifies the link regardless of the mode,
# because it uses soup later to extract visible text from the page
soup = BeautifulSoup(html, 'html.parser')
grab_all = is_title_page_relevant(soup)
# Gets the name for the file to store the html text in
name = create_name_from_html(html)
# find and process all links
process_links_from_html(html,
prog_upd,
current_url,
seed,
crawled_urls_array,
crawled_urls,
planned_urls_array,
planned_urls,
grab_all,
)
# Adds the .txt to the end of the name
name = "{0}.txt".format(name)
# Find only visible text
visible_text = extract_text(soup)
if visible_text: # save it as a text file
try:
# Create and open the file with that name
fo = codecs.open(name, "w", "utf-8-sig")
# Write URL to that file
fo.write(current_url + "\n")
# Append the html to the file
fo.write(visible_text)
# Close the pipe to the file
fo.close()
# Log the creation of the file
logger.info('Created file ' + name)
except KeyboardInterrupt:
tqdm.write("Script interrupted by user. Shutting down.")
logger.info("Script interrupted by user")
shut_down()
except Exception:
logger.exception("Can not encode file: " + current_url)
else:
tqdm.write("No visible text in {}".format(link))
logger.warning('No visible text in ' + link)
# Else: html does not exist or is empty. Log error
else:
logger.warning('Request for ' + link + ' returned empty html')
empty_request_log.write(link)
empty_request_log.write("\n")
# Update on the total number of pages
num_digits = len(str(max_pages))
grab_blurb = "grabbing ALL links" if grab_all else "grabbing key links"
tqdm.write("[{0:0{width}d}]:[{1}] - {2}".format(page, grab_blurb.ljust(18), link.encode("ascii", "ignore"), width=num_digits))
# Increment page count
page += 1
# Every 50 pages checks the size of the folder. Prints the amount of data collected in MB to the console and log file
if page % 50 == 0:
size_of_directory = get_tree_size(os.curdir) / 1000000
tqdm.write("Size: {} MB".format(str(round(size_of_directory, 5))))
logger.info("Size: " + str(round(size_of_directory, 5)) + "MB")
# Time delay in seconds to prevent crashing the server
time.sleep(.01)
return page
def get_tree_size(path):
"""Return total size of files in given path and subdirs by going through the tree.
Recursive.
Called from main function
"""
total = 0
for entry in scandir(path):
if entry.is_dir(follow_symlinks=False):
total += get_tree_size(entry.path)
else:
total += entry.stat(follow_symlinks=False).st_size
return total
def extract_links_from_page(html_page):
return re.findall(r'<a href="(http[s]?://[^">]*)', html_page)
def process_links_from_html(html, prog_upd, cur_link, seed, crawled_urls_array, crawled_urls, planned_urls_array, planned_urls, grab_all=False):
"""Take an array of links, run the split on each and add the results
to the appropriate arrays and files
"""
links = []
# tqdm.write("grabbing all {}".format(str(grab_all)))
if html.partition('<body')[2]:
html = html.partition('<body')[2]
link_strings = html.split('href=') # split the page into sections using "href=" as a delimiter
for lnk in link_strings[1:]:
href = lnk.partition('</a')[0] # grab all text before the "</a" – this var now contains text after an href parameter and before a closing tag, and thus includes the text content of the link
if (grab_all or is_relevant_link_from_html(href)):
href = href.partition('>')[0]
href = href.partition(' ')[0]
href = dequote(href)
new_link = (urllib.parse.urldefrag(href)[0]).rstrip('/')
new_link = urllib.parse.urljoin(cur_link, new_link)
if this_is_not_media(new_link):
if check_domain(new_link, seed):
# if the link is not in crawledURLsArray then it appends it to urls and crawledURLsArray
if new_link not in crawled_urls_array:
# Ensures no jpg or pdfs are stored and that no mailto: links are stored.
if new_link.startswith("http") and '.pdf' not in new_link and '.jpg' not in new_link and '.mp3' not in new_link:
#???TODO: add checks for www.domain.com and https://
# Adds new link to array
planned_urls_array.append(new_link)
# Adds new link to queue file
planned_urls.write(new_link)
planned_urls.write("\n")
try:
# Remove the front of the URL (http or https)
http_split = new_link.split("://", 1)
# Add all possible link variations to file of URLs that have been looked at
# Adds new link to array
crawled_urls_array.append("http://" + http_split[1])
# Adds new link to already looked at file
crawled_urls.write("http://" + http_split[1])
crawled_urls.write("\n")
# Adds new link to array
crawled_urls_array.append("https://" + http_split[1])
# Adds new link to already looked at file
crawled_urls.write("https://" + http_split[1])
crawled_urls.write("\n")
except IndexError as e:
logger.info(str(e))
return
def add_to_crawled_urls_list(new_link, crawled_urls_array, crawled_urls):
"""if the link is not in crawled_urls_array then it
appends it to urls and crawled_urls_array
"""
if new_link not in crawled_urls_array:
# Ensures no jpg or pdfs are stored and that no mailto: links are stored.
if new_link.startswith("http") and '.pdf' not in new_link and '.jpg' not in new_link and '.mp3' not in new_link:
#???TODO: add checks for www.domain.com and https://
try:
# Remove the front of the URL (http or https)
http_split = new_link.split("://", 1)
# Add all possible link variations to file of URLs that have been looked at
# Adds new link to array
crawled_urls_array.append("http://" + http_split[1])
# Adds new link to already looked at file
crawled_urls.write("http://" + http_split[1])
crawled_urls.write("\n")
# Adds new link to array
crawled_urls_array.append("https://" + http_split[1])
# Adds new link to already looked at file
crawled_urls.write("https://" + http_split[1])
crawled_urls.write("\n")
except IndexError as e:
logger.info(str(e))
def add_to_planned_urls_list(new_link, planned_urls_array, planned_urls):
# Adds new link to array
planned_urls_array.append(new_link)
# Adds new link to queue file
planned_urls.write(new_link)
planned_urls.write("\n")
def is_title_page_relevant(soup):
return True if soup.find('title', string=filter_title_regex) else False
def this_is_not_media(new_link):
path = urllib.parse.urlparse(new_link).path
ext = os.path.splitext(path)[1]
unwanted = ['.mp3', '.mp4', '.doc', '.docx', '.pdf', '.jpg', '.jpg', '.css']
if ext not in unwanted and new_link.startswith("http"):
return True
else:
return False
def create_name_from_html (html):
"""Function for creating name
Use the title of the html page as the title of the text file
Called from process_current_link
Uses string search to locate the <title> tag
Parameter html is a string
"""
name_list = (html.partition("</title")[0]).split("<title") #grab part of html before </title
name_part = name_list[-1] #grab part of html after <title
name = name_part.split(">")[-1]
if name:
# removes invalid characters from title
name = format_filename(name) + '__' + str(time.time())
logger.info('Created name ' + name)
else:
name = "no_title_" + str(time.time()) # if no title provided give a no title with a timestamp
logger.warn('Failed to create a name, using \'' + name + '\' instead')
return name
def format_filename(name):
#Taken from: https://gist.github.com/seanh/93666
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores."""
try:
valid_chars = "-_() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in name if c in valid_chars)
# Remove spaces in filename
filename = filename.strip()
filename = filename.replace(' ','_')
except TypeError as e:
filename = str(uuid.uuid4())
logger.error("Got and error: {}".format(str(e)))
return filename
def is_relevant_link_from_html(link):
"""checks that the text content of the link matches the filter_regex
input parameter is a string
"""
if filter_regex.match(link):
return True
return False
#return True #Uncomment to grab all links
def dequote(s):
"""Function for deleting paired single or double quotes
If a string has single or double quotes around it, remove them.
Make sure the pair of quotes match.
If a matching pair of quotes is not found, return the string unchanged.
"""
if (len(s)>= 2 and s[0] == s[-1]) and s.startswith(("'", '"')):
s = s[1:-1]
s = s.strip('"\'')
return s
def extract_text(soup):
"""Extract text from HTML pages and Return normalized text
https://stackoverflow.com/questions/30565404/remove-all-style-scripts-and-html-tags-from-an-html-page
return string
"""
for script in soup(["script", "style"]): # remove all javascript and stylesheet code
script.extract()
# get text, the separator keeps the paragraphs their usual short
# https://stackoverflow.com/a/38861217
text = soup.get_text(separator="\n")
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
return '\n'.join(chunk for chunk in chunks if chunk)
def request_url(url, visited_urls):
"""Fuction for requesting url
Given a URL, go to that url and get the html and return it
Called from main function
"""
# Set a header to pretend it's a browser
headers = requests.utils.default_headers()
headers.update (
{
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
}
)
# Log that this URL is being saved
logger.info('Requesting ' + url)
visited_urls.write(url)
visited_urls.write("\n")
# Use requests module to get html from url as an object
html = ''
try:
r = requests.get(url, headers=headers)
if r.ok:
if "text/html" in r.headers["content-type"]:
return r
logger.info(str(r))
return None
except requests.exceptions.Timeout:
# Maybe set up for a retry, or continue in a retry loop
print("\nTook too long to get the page.")
logger.info("Took too long to get the page.")
except requests.exceptions.RequestException as e:
# catastrophic error. bail.
print("\nCannot get the page.")
logger.info("Cannot get the page.")
except KeyboardInterrupt:
print("\n\nScript interrupted by user. Shutting down.")
logger.info("Script interrupted by user")
shut_down()
except Exception:
logger.exception("Couldn\'t request " + url)
return None
def exception(request, exception):
print("Problem: {}: {}".format(request.url, exception))
def request_urls(urls_list):
results = grequests.map((grequests.get(u) for u in urls_list), exception_handler=exception, size=5)
def get_start_page():
"""Open the visited_urls text file and count the number of lines
in it – that's how many pages the script visited
throughout its previous runs
"""
i = 1
with open("_visited_urls.txt", "r", encoding="utf-8-sig") as f:
for i, l in enumerate(f, start=1):
pass
page = i
return page
class ChDir(object):
"""
Step into a directory context on which to operate on.
https://pythonadventures.wordpress.com/2013/12/15/chdir-a-context-manager-for-switching-working-directories/
"""
def __init__(self, path):
self.old_dir = os.getcwd()
self.new_dir = path
def __enter__(self):
os.chdir(self.new_dir)
def __exit__(self, *args):
os.chdir(self.old_dir)
def get_file_content_as_list(file_name):
"""Give a filename, open and read the contents into a list
file_name - file to be opened
return list of words
"""
with open(file_name, 'r') as file_name_handle:
return file_name_handle.read().splitlines()
def setup_crawler_files():
# Open the visited_urls text file
visited_handler = codecs.open("_visited_urls.txt", "a+", "utf-8")
# Open the file with planned urls and add them to the array of planned urls
planned_handler = codecs.open("_planned_urls.txt", "a+", "utf-8")
# Open the file with crawled urls and add them to the array of crawled urls
crawled_handler = codecs.open("_crawled_urls.txt", "a+", "utf-8")
return visited_handler, planned_handler, crawled_handler
def check_domain(new_link, seed):
"""Function that checks if the link provided is in the
same domain as the seed
return: boolean
"""
new_link_domain = tldextract.extract(new_link).domain
if (new_link_domain == seed):
return True
return False
# Shut down gracefully and log it
def shut_down():
# TODO Close all the things/pipes to files
sys.exit()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Crawl and scrape a list of URLs for further searching.')
parser.add_argument(
'-w',
'--websites',
dest='websites',
default=None,
required=True,
help='The file containing list of websites URLs (mandatory)'
)
parser.add_argument(
'-b',
'--batch',
dest='batch',
default=None,
required=True,
help='Name for this batch of processing (mandatory)'
)
parser.add_argument(
'-r',
'--resume',
dest='resume',
default=30,
required=False,
help="Check if the given batch exists and attempt to resume" \
" if not complete."
)
parser.add_argument(
'-m',
'--max_pages',
dest='max_pages',
default=10000,
required=False,
help="The maximum number of pages to crawl per website"
)
# these are module global variables and can be access by any function in
# this module
args = parser.parse_args()
websites_file = args.websites
batch_name = args.batch
resume_attempt = args.resume
max_pages = int(args.max_pages)
try:
main()
except KeyboardInterrupt as e:
logger.info("Script interrupted by user")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
from peepdis.core import peep, Peeper, CallablePeeper
import numpy as np
class AttrClass:
def __init__(self, foo):
self.foo = foo
class MethodAnnotationsClass:
def method_annotations(self, a: int, b: int) -> int:
return a + b
if __name__ == '__main__':
obj = np.array([1, 2, 3])
peep(obj)
# attr_peeper = Peeper(obj)
# attr_peeper.peep(forge=True)
# attr_peeper.print(verbose=True)
|
from tap_ebay.streams.base import BaseStream
import singer
LOGGER = singer.get_logger() # noqa
class OrdersStream(BaseStream):
API_METHOD = 'GET'
TABLE = 'orders'
KEY_PROPERTIES = ['orderId']
@property
def path(self):
return '/sell/fulfillment/v1/order'
def get_stream_data(self, result):
return [
self.transform_record(record)
for record in result['orders']
]
|
import unittest
from aviatrix_poller import *
class Aviatrix_Poller_Test(unittest.TestCase):
def test_find_subnets(self):
region_id='us-east-1'
vpc_id='vpc-afdee5d7'
ec2=boto3.client('ec2',region_name=region_id)
result=find_subnets(ec2,region_id,vpc_id)
self.assertIs(type(result), list)
for subnet in result:
self.assertEqual(subnet['SubnetId'][:6],'subnet')
self.assertEqual(subnet['Name'][:8],'Unittest')
#Fail tests
region_id='us-east-2'
vpc_id='vpc-afdee5d7'
ec2=boto3.client('ec2',region_name=region_id)
with self.assertRaises(IndexError):
result=find_subnets(ec2,region_id,vpc_id)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Write stage settings here, or override base settings
"""
from __future__ import absolute_import, unicode_literals
import sentry_sdk
from sentry_sdk import configure_scope
from sentry_sdk.integrations.django import DjangoIntegration
from pipit.settings.base import * # NOQA
DEBUG = False
DATABASES["default"]["CONN_MAX_AGE"] = get_env("DATABASE_CONN_MAX_AGE", default=60)
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.db.DatabaseCache",
"LOCATION": "cache_table",
}
}
STATICFILES_STORAGE = (
"django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
) # NOQA
# Enable caching of templates in production environment
TEMPLATES[0]["OPTIONS"]["loaders"] = [
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# Prevent Man in the middle attacks with HTTP Strict Transport Security
SECURE_HSTS_SECONDS = 31536000
SECURE_HSTS_PRELOAD = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# Block content that appears to be an XSS attack in certain browsers
SECURE_BROWSER_XSS_FILTER = True
# Use a secure cookie for the session cookie
SESSION_COOKIE_SECURE = True
# Use a secure cookie for the CSRF cookie
CSRF_COOKIE_SECURE = True
# Use HttpOnly flag on the CSRF cookie
# Note: JavaScript will not to be able to access the CSRF cookie
CSRF_COOKIE_HTTPONLY = True
# Sentry
SENTRY_DSN = get_env('SENTRY_DSN')
SENTRY_ENVIRONMENT = "stage"
sentry_sdk.init(
dsn=SENTRY_DSN,
release=APP_VERSION,
environment=SENTRY_ENVIRONMENT,
debug=True,
integrations=[DjangoIntegration()],
)
# Add sentry to logging
with configure_scope() as scope:
scope.level = 'error'
|
import configparser
from caller.config.parser import parse_literal, parse_string, parse_list, parse_benchmarks, parse_bool
class ConfigMicro(object):
def __init__(self):
"""
Sets the default values for the micro benchmarks
"""
# run
self.values = 5
self.processes = 20
self.bm_cooldown = 10
self.speed = [100]
self.clear_db = True
# app
self.url = "127.0.0.1"
self.port = "5000"
self.protocol = "http"
self.webserver = "gunicorn"
self.output = "file"
# fmd
self.levels = [-1, 0, 1, 2, 3]
self.db_url = 'sqlite:///micro_fmd.db'
# benchmarks
self.benchmarks = [('pidigits', 'Compute digits of pi.')]
def init_from(self, file=None):
config_parser = configparser.RawConfigParser()
config_parser.read(file)
# parse run
self.values = parse_literal(config_parser, 'run', 'values', self.values)
self.processes = parse_literal(config_parser, 'run', 'processes', self.processes)
self.bm_cooldown = parse_literal(config_parser, 'run', 'bm_cooldown', self.bm_cooldown)
self.speed = parse_list(config_parser, 'run', 'speed', self.speed)
self.clear_db = parse_bool(config_parser, 'run', 'clear_db', self.clear_db)
# parse app
self.url = parse_string(config_parser, 'app', 'url', self.url)
self.port = parse_string(config_parser, 'app', 'port', self.port)
self.protocol = parse_string(config_parser, 'app', 'protocol', self.protocol)
self.webserver = parse_string(config_parser, 'app', 'webserver', self.webserver)
self.output = parse_string(config_parser, 'app', 'output', self.output)
# parse fmd
self.levels = parse_list(config_parser, 'fmd', 'levels', self.levels)
self.db_url = parse_string(config_parser, 'fmd', 'db_url', self.db_url)
# parse benchmarks
self.benchmarks = parse_benchmarks(config_parser, 'benchmarks', self.benchmarks)
class ConfigMacro(object):
def __init__(self):
"""
Sets the default values for the macro benchmark
"""
# run
self.values = 5
self.processes = 5
self.bm_cooldown = 10
self.users = [1, 2, 5, 10]
# app
self.app_db = 'sqlite:///macro.db'
self.url = "127.0.0.1"
self.port = "5000"
self.protocol = "http"
self.webserver = "gunicorn"
# fmd
self.levels = [-1, 0, 1, 2, 3]
self.fmd_db = 'sqlite:///macro_fmd.db'
def init_from(self, file=None):
config_parser = configparser.RawConfigParser()
config_parser.read(file)
# parse run
self.values = parse_literal(config_parser, 'run', 'values', self.values)
self.processes = parse_literal(config_parser, 'run', 'processes', self.processes)
self.bm_cooldown = parse_literal(config_parser, 'run', 'bm_cooldown', self.bm_cooldown)
self.users = parse_list(config_parser, 'run', 'users', self.users)
# parse app
self.app_db = parse_string(config_parser, 'app', 'app_db', self.app_db)
self.url = parse_string(config_parser, 'app', 'url', self.url)
self.port = parse_string(config_parser, 'app', 'port', self.port)
self.protocol = parse_string(config_parser, 'app', 'protocol', self.protocol)
self.webserver = parse_string(config_parser, 'app', 'webserver', self.webserver)
# parse fmd
self.levels = parse_list(config_parser, 'fmd', 'levels', self.levels)
self.fmd_db = parse_string(config_parser, 'fmd', 'fmd_db', self.fmd_db)
|
import config
import models
import numpy as np
import json
#Train TransR based on pretrained TransE results.
#++++++++++++++TransE++++++++++++++++++++
con = config.Config()
#Input training files from benchmarks/FB15K/ folder.
con.set_in_path("./benchmarks/FB15K/")
#True: Input test files from the same folder.
con.set_log_on(1)
con.set_work_threads(8)
con.set_train_times(1000)
con.set_nbatches(100)
con.set_alpha(0.001)
con.set_bern(0)
con.set_dimension(100)
con.set_margin(1.0)
con.set_ent_neg_rate(1)
con.set_rel_neg_rate(0)
con.set_opt_method("SGD")
#Model parameters will be exported via torch.save() automatically.
con.set_export_files("./res/transe.pt")
#Model parameters will be exported to json files automatically.
con.set_out_files("./res/transe.vec.json")
con.init()
con.set_model(models.TransE)
con.run()
parameters = con.get_parameters("numpy")
#++++++++++++++TransR++++++++++++++++++++
conR = config.Config()
#Input training files from benchmarks/FB15K/ folder.
conR.set_in_path("./benchmarks/FB15K/")
#True: Input test files from the same folder.
conR.set_work_threads(4)
conR.set_train_times(1000)
conR.set_nbatches(100)
conR.set_alpha(0.001)
conR.set_bern(0)
conR.set_dimension(100)
conR.set_margin(1)
conR.set_ent_neg_rate(1)
conR.set_rel_neg_rate(0)
conR.set_opt_method("SGD")
#Models will be exported via tf.Saver() automatically.
conR.set_export_files("./res/transr.vec.tf")
#Model parameters will be exported to json files automatically.
conR.set_out_files("./res/transr.vec.json")
#Initialize experimental settings.
conR.init()
#Load pretrained TransE results.
conR.set_model(models.TransR)
parameters["transfer_matrix.weight"] = np.array([(np.identity(100).reshape((100*100))) for i in range(conR.get_rel_total())])
conR.set_parameters(parameters)
#Train the model.
conR.run()
|
"""
test_database.py - Tests for ovirt_engine_setup/engine_common/database.py
"""
import sys
import ovirt_engine_setup.engine_common as common
import mock
import pytest
# mock imports
common.constants = mock.Mock()
mock_ovirt_setup_lib = mock.Mock()
mock_ovirt_setup_lib.hostname = mock.Mock()
mock_ovirt_setup_lib.dialog = mock.Mock()
sys.modules['ovirt_setup_lib'] = mock_ovirt_setup_lib
import ovirt_engine_setup.engine_common.database as under_test # isort:skip # noqa: E402
@pytest.mark.parametrize(
('given', 'expected'), [
('5', '5'),
('5.5', '5.5'),
('0.5', '0.5'),
('5555.5555', '5555.5555'),
('5Gb', '5Gb'),
('value-gone$#wild', 'value-gone$#wild'),
]
)
def test_value_extraction_from_conf(given, expected):
match = under_test.RE_KEY_VALUE.match('key=%s' % given)
assert match.group('value') == expected
|
import os
import re
def get_erase_char():
if os.name == 'posix':
pttrn = re.compile(r'; erase = (\^.);')
settings = os.popen('stty -a').read()
return pttrn.findall(settings)[0]
def repair_console(erase_char = '^H'):
if os.name == 'posix':
os.system('stty sane')
os.system(f'stty erase {erase_char}')
def log_info(text, console):
"Matchering's information output will be marked with a bold prefix."
console.log(f"[bold]INFO:[/bold] {text}")
def log_warning(text, console):
"The warning output will be marked with a bold, red prefix and warning sign."
console.log(f":warning: [bold red]WARNING:[/bold red] {text}")
|
from winning.std_calibration import centered_std_density
from winning.lattice_calibration import dividend_implied_ability
from winning.lattice_conventions import STD_UNIT, STD_SCALE, STD_L, STD_A
import numpy as np
# Illustrates the basic calibration
# Exactly the same but here we modify the discretization parameters
if __name__ =='__main__':
# Choose the length of the lattice, which is 2*L+1
L = 700
# Choose the unit of discretization
unit = 0.005
# The unit is used to create an approximation of a density, here N(0,1) for simplicity
density = centered_std_density(L=L, unit=unit)
# Step 2. We set winning probabilities, most commonly represented in racing as inverse probabilities ('dividends')
dividends = [2,6,np.nan, 3]
# Step 3. The algorithm implies relative ability (i.e. how much to translate the performance distributions)
# Missing values will be assigned odds of 1999:1 ... or you can leave them out.
abilities = dividend_implied_ability(dividends=dividends,density=density, nan_value=2000, unit=unit)
# That's all. Lower ability is better.
print(abilities)
# Note that if you don't supply the unit, the abilities take on greater magnitudes than before (i.e. they are offsets on the lattice)
# So you'll have to multiply them by the unit to get a scaled ability consistent with the density definition
scale_free_abilities = dividend_implied_ability(dividends=dividends, density=density, nan_value=2000)
scaled_ability = [ a*unit for a in scale_free_abilities ]
print(scaled_ability) |
"""Main module."""
import functools
import json
from typing import Any, Dict, List, Optional
import jsonschema
import toml
from toml_resume.constants import _DEFAULT, RESUME_JSON_SCHEMA
from toml_resume.encoder import neat_encoder
def read_resume_json(filename: str) -> Dict[str, Any]:
d = json.load(open(filename, "r"))
jsonschema.validate(d, RESUME_JSON_SCHEMA)
return d
def read_resume_toml(filename: str) -> Dict[str, Any]:
d = toml.load(open(filename, "r"))
return d
def write_resume_toml(d: Dict[str, Any], filename: str) -> None:
with open(filename, "w") as f:
toml.dump(d, f, neat_encoder)
def write_resume_json(
d: dict, filename: str, flavors: Optional[List[str]] = None
) -> None:
if not flavors:
flavors = []
if _DEFAULT not in flavors:
flavors.append(_DEFAULT)
flavors = clean_flavors(flavors)
output = combine_all_flavors(d, flavors)
jsonschema.validate(output, RESUME_JSON_SCHEMA)
with open(filename, "w") as f:
json.dump(output, f, indent=2)
def is_flavor(s: str) -> bool:
return s.startswith("_")
def clean_flavors(flavors: List[str]) -> List[str]:
return [f"_{x}" if not is_flavor(x) else x for x in flavors]
def get_default(d: Dict[str, Any]) -> Dict[str, Any]:
output_dict = {_DEFAULT: {}}
for k, v in d.items():
if is_flavor(k):
output_dict[k] = v
else:
output_dict[_DEFAULT][k] = v
return output_dict
def combine_all_flavors(d: Dict[str, Any], flavors: List[str]) -> Dict[str, Any]:
with_default = get_default(d)
to_combine = [
flatten_flavors_dict(with_default.get(flavor, {}), flavors)
for flavor in flavors
]
reduced = functools.reduce(lambda x, y: {**y, **x}, to_combine)
return reduced
def first_present_key(d: Dict[str, Any], lst: List[str]) -> Optional[str]:
for s in lst:
if s in d:
return s
return None
def flatten_flavors_dict(d: dict, flavors: List[str]) -> dict:
output_dict = {}
if not d:
return {}
if not isinstance(d, dict):
print(d)
raise ValueError
if all(is_flavor(x) for x in d):
chosen_key = first_present_key(d, flavors)
chosen_value = d[chosen_key]
if isinstance(chosen_value, dict):
return flatten_flavors_dict(chosen_value, flavors)
elif isinstance(chosen_value, list):
return flatten_flavors_list(chosen_value, flavors)
elif is_flavor(chosen_value):
return d[chosen_value]
else:
return chosen_value
for k, v in d.items():
if isinstance(v, dict):
if all(is_flavor(x) for x in v):
chosen_key = first_present_key(v, flavors)
if chosen_key:
chosen_value = v[chosen_key]
if isinstance(chosen_value, dict):
output_dict[k] = flatten_flavors_dict(chosen_value, flavors)
elif isinstance(chosen_value, list):
output_dict[k] = flatten_flavors_list(chosen_value, flavors)
else:
output_dict[k] = chosen_value
else:
output_dict[k] = flatten_flavors_dict(v, flavors)
elif isinstance(v, list):
output_dict[k] = flatten_flavors_list(v, flavors)
else:
output_dict[k] = v
return output_dict
def flatten_flavors_list(l: List[Any], flavors: List[str]) -> List[Any]:
output_lst = []
for li in l:
if isinstance(li, list):
output_lst.append(flatten_flavors_list(li, flavors))
elif isinstance(li, dict):
output_lst.append(flatten_flavors_dict(li, flavors))
else:
output_lst.append(li)
return [x for x in output_lst if x]
if __name__ == "__main__":
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 Leopold Schabel
# All rights reserved.
#
import sys, os
import thread
import time
import logging
import threading
from utils import s7comm
class CheckerThread(threading.Thread):
def __init__(self, plc, password, delay=60):
threading.Thread.__init__(self)
self.plc = plc
self.delay = delay
self.password = password
self.logger = logging.getLogger("plcmon.checker")
def _periodic_check(self):
self.logger.debug("Checking logging session...")
if not self.plc.logged_in:
try:
self.logger.info("Not logged in! Relogin: %s" % self.plc.login(self.password))
except s7comm.LoginFailure:
self.logger.fatal("SPS login failed! exiting..")
sys.exit(1)
else:
self.logger.debug("Login session valid!")
def run(self):
while True:
self._periodic_check()
time.sleep(self.delay)
def rpc_register(server, config):
logger = logging.getLogger("rpc.s7web")
plc = s7comm.S7WebComm(config['sps_ip'])
logger.info("Starting login check thread...")
checker = CheckerThread(plc, config['sps_password'])
checker.start()
server.register_function(plc.read_value, 'sps_read_value')
server.register_function(plc.read_values, 'sps_read_values')
server.register_function(plc.set_value, 'sps_set_value')
server.register_function(plc.set_values, 'sps_set_values')
server.register_function(plc.sps_flash_led)
server.register_function(plc.sps_information)
server.register_function(plc.sps_run)
server.register_function(plc.sps_stop)
server.register_function(plc.sps_status)
|
import os
import sys
import numpy
numpy.random.seed(42)
with open(os.path.join(sys.argv[1], "dev-other.lst"), "r") as f:
data = [line.strip() for line in f]
for n, seed_val in enumerate([0, 2, 3, 4, 5]):
numpy.random.seed(42 + seed_val)
data = numpy.random.permutation(data)
with open("tts_shuffled_{}.txt".format(n), "w") as fout:
for line in data:
line_new = line.split(" ")
new_tr = numpy.random.permutation(line_new[3:])
fout.write(line + "\n")
fout.write("{}\n".format(" ".join(new_tr)))
|
import scrapy
from scrapy.http import HtmlResponse
from jobparser.items import JobparserItem
class SjruSpider(scrapy.Spider):
name = 'sjru'
allowed_domains = ['superjob.ru']
start_urls = ['https://www.superjob.ru/vacancy/search/?keywords=Python&geo%5Bt%5D%5B0%5D=4']
def parse(self, response: HtmlResponse):
# print(response.status)
# print(response.url)
# response.xpath()
# response.css()
vacancy_link = response.xpath('//a[contains(@class, "icMQ_ _6AfZ9")]/@href'
).extract()
for link in vacancy_link:
page_link = 'https://www.superjob.ru' + link
yield response.follow(page_link, callback=self.parse_vacancies)
pass
next_page = response.xpath(
'//a[contains(@class, "icMQ_ bs_sM _3ze9n f-test-button-dalshe f-test-link-Dalshe")]/@href').get()
if next_page:
yield response.follow(next_page, callback=self.parse)
def salary_check(self, salary):
salary_max = 'null'
salary_min = 'null'
i = 0
for item in salary:
salary[i] = item.replace(u'\xa0', '')
string = salary[i]
if string != '' and string[0].isdigit() and string.isdigit() == False:
salary[i] = string[:-4]
i = i + 1
if salary[0] == 'до':
salary_max = salary[2]
elif salary[0].isdigit():
salary_min = salary[0]
salary_max = salary[4]
else:
salary_min = salary[2]
return salary_min, salary_max
def parse_vacancies(self, response: HtmlResponse):
title = response.xpath("//h1//text()").get()
salary = response.xpath("//span[contains(@class, '_1OuF_ ZON4b')]//span/text()").getall()
link = response.url
web = 'superjob.ru'
if len(salary) > 1:
salary_output = self.salary_check(salary)
salary_min = salary_output[0]
salary_max = salary_output[1]
else:
salary_min = 'null'
salary_max = 'null'
yield JobparserItem(title=title, salary_min=salary_min, salary_max=salary_max, link=link, web=web)
|
import unittest
from rdflib import Graph, ConjunctiveGraph, Namespace, URIRef, Literal, BNode
class KG_ask(unittest.TestCase):
kg = None
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
# from https://github.com/RDFLib/rdflib/issues/1003
rdf_triples_base = """
@prefix category: <http://example.org/> .
@prefix dct: <http://purl.org/dc/terms/> .
@prefix skos: <http://www.w3.org/2004/02/skos/core#> .
@base <http://example.org/> .
<> a skos:ConceptScheme ;
dct:creator _:BN ;
dct:description "Test Description"@en ;
dct:source <nick> ;
dct:title "Title"@en .
"""
cls.kg = ConjunctiveGraph()
cls.kg.parse(data=rdf_triples_base, format="turtle")
print(cls.kg.serialize(format="turtle").decode())
def test_ask_filter(self):
q1 = """
ASK {
?s dct:title ?t .
FILTER (?t = "Title")
}
"""
res = self.kg.query(q1)
for bool_r in res:
self.assertFalse(bool_r)
q2 = """
ASK {
?s dct:title ?t .
FILTER (?t = "Title"@en)
}
"""
res = self.kg.query(q2)
for bool_r in res:
self.assertTrue(bool_r)
def test_ask_filter_notIsBlank(self):
q3 = """
ASK {
?s dct:title ?t .
FILTER ( ! isBlank (?s) )
}
"""
res = self.kg.query(q3)
for bool_r in res:
self.assertTrue(bool_r)
def test_ask_filter_isIRI(self):
q4 = """
ASK {
?s dct:title ?t .
FILTER ( isIRI (?s) )
}
"""
res = self.kg.query(q4)
for bool_r in res:
self.assertTrue(bool_r)
def test_ask_filter_isBlank(self):
q5 = """
ASK {
?s dct:creator ?c .
FILTER ( isBlank (?c) )
}
"""
res = self.kg.query(q5)
for bool_r in res:
self.assertTrue(bool_r)
if __name__ == "__main__":
unittest.main()
|
from __future__ import annotations
from contextlib import contextmanager
from .logger import Logger
from ..typing import StyleOptions
class LoggedMixin:
"""
A mixin class that adds common log methods and accessors for the logger
object to which those log methods delegate.
"""
_logger: Logger
def __init__(self, logger: Logger):
self._logger = logger
super().__init__()
# -- Logger Methods --------------- --- -- -
@property
def logger(self) -> Logger:
"""Get the logger object to which log methods are delegated."""
return self._logger
@logger.setter
def logger(self, logger: Logger) -> None:
"""Set the logger object to which log methods are delegated."""
self._logger = logger
@property
def debug_enabled(self) -> bool:
"""See :meth:`logger.debug_enabled <.Logger.debug_enabled>`."""
return self._logger.debug_enabled
@property
def trace_enabled(self) -> bool:
"""See :meth:`logger.trace_enabled <.Logger.trace_enabled>`."""
return self._logger.trace_enabled
@property
def info_enabled(self) -> bool:
"""See :meth:`logger.info_enabled <.Logger.info_enabled>`."""
return self._logger.info_enabled
def debug(self,
*msgs,
bullet: str = None,
indent: str = "",
key_style: StyleOptions = None,
margin: int = 0,
style: StyleOptions = None):
"""See :meth:`logger.debug <.Logger.debug>`."""
self._logger.debug(*msgs,
bullet=bullet,
indent=indent,
key_style=key_style,
margin=margin,
style=style)
def trace(self,
*msgs,
bullet: str = None,
indent: str = "",
key_style: StyleOptions = None,
margin: int = 0,
style: StyleOptions = None):
"""See :meth:`logger.trace <.Logger.trace>`."""
self._logger.trace(*msgs,
bullet=bullet,
indent=indent,
key_style=key_style,
margin=margin,
style=style)
def info(self,
*msgs,
bullet: str = None,
indent: str = "",
key_style: StyleOptions = None,
margin: int = 0,
style: StyleOptions = None):
"""See :meth:`logger.info <.Logger.info>`."""
self._logger.info(*msgs,
bullet=bullet,
indent=indent,
key_style=key_style,
margin=margin,
style=style)
@contextmanager
def indent(self):
"""
Increase the indentation for the subsequent log calls if the log level
is info or higher.
"""
try:
if self._logger.info_enabled:
with self._logger.indent():
yield
else:
yield
finally:
pass
@contextmanager
def debug_indent(self):
"""
Increase the indentation for the subsequent log calls if the log level
is debug or higher.
"""
try:
if self._logger.debug_enabled:
with self._logger.indent():
yield
else:
yield
finally:
pass
@contextmanager
def trace_indent(self):
"""
Increase the indentation for the subsequent log calls if the log level
is trace or higher.
"""
try:
if self._logger.trace_enabled:
with self._logger.indent():
yield
else:
yield
finally:
pass
|
# Eh! python!, We are going to include isolatin characters here
# -*- coding: latin-1 -*-
import unittest
import os
import tempfile
import numpy
from tables import *
from tables.flavor import flavor_to_flavor
from tables.tests import common
from tables.tests.common import (
typecode, allequal, numeric_imported, numarray_imported)
if numarray_imported:
import numarray
if numeric_imported:
import Numeric
# To delete the internal attributes automagically
unittest.TestCase.tearDown = common.cleanup
class BasicTestCase(unittest.TestCase):
# Default values
flavor = "numpy"
type = 'int32'
shape = (2,2)
start = 0
stop = 10
step = 1
length = 1
chunkshape = (5,5)
compress = 0
complib = "zlib" # Default compression library
shuffle = 0
fletcher32 = 0
reopen = 1 # Tells whether the file has to be reopened on each test or not
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, "w")
self.rootgroup = self.fileh.root
self.populateFile()
if self.reopen:
# Close the file
self.fileh.close()
def populateFile(self):
group = self.rootgroup
if self.type == "string":
atom = StringAtom(itemsize=self.length)
else:
atom = Atom.from_type(self.type)
title = self.__class__.__name__
filters = Filters(complevel = self.compress,
complib = self.complib,
shuffle = self.shuffle,
fletcher32 = self.fletcher32)
carray = self.fileh.createCArray(group, 'carray1', atom, self.shape,
title, filters=filters,
chunkshape = self.chunkshape)
carray.flavor = self.flavor
# Fill it with data
self.rowshape = list(carray.shape)
self.objsize = self.length * numpy.prod(self.shape)
if self.flavor == "numarray":
if self.type == "string":
object = strings.array("a"*self.objsize, shape=self.shape,
itemsize=carray.atom.itemsize)
else:
type_ = numpy.sctypeNA[numpy.sctypeDict[carray.atom.type]]
object = numarray.arange(self.objsize, shape=self.shape,
type=type_)
elif self.flavor == "numpy":
if self.type == "string":
object = numpy.ndarray(buffer="a"*self.objsize,
shape=self.shape,
dtype="S%s" % carray.atom.itemsize)
else:
object = numpy.arange(self.objsize, dtype=carray.atom.dtype)
object.shape = self.shape
else: # Numeric flavor
object = Numeric.arange(self.objsize,
typecode=typecode[carray.atom.type])
object = Numeric.reshape(object, self.shape)
if common.verbose:
print "Object to append -->", repr(object)
carray[...] = object
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#----------------------------------------
def test01_readCArray(self):
"""Checking read() of chunked layout arrays"""
rootgroup = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01_readCArray..." % self.__class__.__name__
# Create an instance of an HDF5 Table
if self.reopen:
self.fileh = openFile(self.file, "r")
carray = self.fileh.getNode("/carray1")
# Choose a small value for buffer size
carray.nrowsinbuf = 3
if common.verbose:
print "CArray descr:", repr(carray)
print "shape of read array ==>", carray.shape
print "reopening?:", self.reopen
# Build the array to do comparisons
if self.flavor == "numarray":
if self.type == "string":
object_ = strings.array("a"*self.objsize, shape=self.shape,
itemsize=carray.atom.itemsize)
else:
type_ = numpy.sctypeNA[numpy.sctypeDict[carray.atom.type]]
object_ = numarray.arange(self.objsize, shape=self.shape,
type=type_)
elif self.flavor == "numpy":
if self.type == "string":
object_ = numpy.ndarray(buffer="a"*self.objsize,
shape=self.shape,
dtype="S%s" % carray.atom.itemsize)
else:
object_ = numpy.arange(self.objsize, dtype=carray.atom.dtype)
object_.shape = self.shape
else:
object_ = Numeric.arange(self.objsize,
typecode=typecode[carray.atom.type])
object_ = Numeric.reshape(object_, self.shape)
stop = self.stop
# stop == None means read only the element designed by start
# (in read() contexts)
if self.stop == None:
if self.start == -1: # corner case
stop = carray.nrows
else:
stop = self.start + 1
# Protection against number of elements less than existing
#if rowshape[self.extdim] < self.stop or self.stop == 0:
if carray.nrows < stop:
# self.stop == 0 means last row only in read()
# and not in [::] slicing notation
stop = int(carray.nrows)
# do a copy() in order to ensure that len(object._data)
# actually do a measure of its length
# Numeric 23.8 will issue an error with slices like -1:20:20
# but this is an error with this Numeric version (and perhaps
# lower ones).
object = object_[self.start:stop:self.step].copy()
# Read all the array
try:
data = carray.read(self.start,stop,self.step)
except IndexError:
if self.flavor == "numarray":
data = numarray.array(None, shape=self.shape, type=self.type)
elif self.flavor == "numpy":
data = numpy.empty(shape=self.shape, dtype=self.type)
else:
data = Numeric.zeros(self.shape, typecode[self.type])
if common.verbose:
if hasattr(object, "shape"):
print "shape should look as:", object.shape
print "Object read ==>", repr(data)
print "Should look like ==>", repr(object)
if hasattr(data, "shape"):
self.assertEqual(len(data.shape), len(self.shape))
else:
# Scalar case
self.assertEqual(len(self.shape), 1)
self.assertEqual(carray.chunkshape, self.chunkshape)
self.assertTrue(allequal(data, object, self.flavor))
def test02_getitemCArray(self):
"""Checking chunked layout array __getitem__ special method"""
rootgroup = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02_getitemCArray..." % self.__class__.__name__
if not hasattr(self, "slices"):
# If there is not a slices attribute, create it
self.slices = (slice(self.start, self.stop, self.step),)
# Create an instance of an HDF5 Table
if self.reopen:
self.fileh = openFile(self.file, "r")
carray = self.fileh.getNode("/carray1")
if common.verbose:
print "CArray descr:", repr(carray)
print "shape of read array ==>", carray.shape
print "reopening?:", self.reopen
# Build the array to do comparisons
if self.type == "string":
object_ = numpy.ndarray(buffer="a"*self.objsize,
shape=self.shape,
dtype="S%s" % carray.atom.itemsize)
else:
object_ = numpy.arange(self.objsize, dtype=carray.atom.dtype)
object_.shape = self.shape
stop = self.stop
# do a copy() in order to ensure that len(object._data)
# actually do a measure of its length
object = object_.__getitem__(self.slices).copy()
if self.flavor == "numarray":
# Convert the object to Numarray
object = flavor_to_flavor(object, 'numpy', 'numarray')
elif self.flavor == "numeric":
# Convert the object to Numeric
object = flavor_to_flavor(object, 'numpy', 'numeric')
# Read data from the array
try:
data = carray.__getitem__(self.slices)
except IndexError:
print "IndexError!"
if self.flavor == "numarray":
data = numarray.array(None, shape=self.shape, type=self.type)
elif self.flavor == "numpy":
data = numpy.empty(shape=self.shape, dtype=self.type)
else:
data = Numeric.zeros(self.shape, typecode[self.type])
if common.verbose:
print "Object read:\n", repr(data) #, data.info()
print "Should look like:\n", repr(object) #, object.info()
if hasattr(object, "shape"):
print "Original object shape:", self.shape
print "Shape read:", data.shape
print "shape should look as:", object.shape
if not hasattr(data, "shape"):
# Scalar case
self.assertEqual(len(self.shape), 1)
self.assertEqual(carray.chunkshape, self.chunkshape)
self.assertTrue(allequal(data, object, self.flavor))
def test03_setitemCArray(self):
"""Checking chunked layout array __setitem__ special method"""
rootgroup = self.rootgroup
if self.__class__.__name__ == "Ellipsis6CArrayTestCase":
# see test_earray.py BasicTestCase.test03_setitemEArray
return
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test03_setitemCArray..." % self.__class__.__name__
if not hasattr(self, "slices"):
# If there is not a slices attribute, create it
self.slices = (slice(self.start, self.stop, self.step),)
# Create an instance of an HDF5 Table
if self.reopen:
self.fileh = openFile(self.file, "a")
carray = self.fileh.getNode("/carray1")
if common.verbose:
print "CArray descr:", repr(carray)
print "shape of read array ==>", carray.shape
print "reopening?:", self.reopen
# Build the array to do comparisons
if self.type == "string":
object_ = numpy.ndarray(buffer="a"*self.objsize,
shape=self.shape,
dtype="S%s" % carray.atom.itemsize)
else:
object_ = numpy.arange(self.objsize, dtype=carray.atom.dtype)
object_.shape = self.shape
stop = self.stop
# do a copy() in order to ensure that len(object._data)
# actually do a measure of its length
object = object_.__getitem__(self.slices).copy()
if self.flavor == "numarray":
# Convert the object to numarray
object = flavor_to_flavor(object, 'numpy', 'numarray')
elif self.flavor == "numeric":
# Convert the object to Numeric
object = flavor_to_flavor(object, 'numpy', 'numeric')
if self.type == "string":
if hasattr(self, "wslice"):
object[self.wslize] = "xXx"
carray[self.wslice] = "xXx"
elif sum(object[self.slices].shape) != 0 :
object[:] = "xXx"
if object.size > 0:
carray[self.slices] = object
else:
if hasattr(self, "wslice"):
object[self.wslice] = object[self.wslice] * 2 + 3
carray[self.wslice] = carray[self.wslice] * 2 + 3
elif sum(object[self.slices].shape) != 0:
object = object * 2 + 3
if reduce(lambda x,y:x*y, object.shape) > 0:
carray[self.slices] = carray[self.slices] * 2 + 3
# Cast again object to its original type
object = numpy.array(object, dtype=carray.atom.dtype)
# Read datafrom the array
try:
data = carray.__getitem__(self.slices)
except IndexError:
print "IndexError!"
if self.flavor == "numarray":
data = numarray.array(None, shape=self.shape, type=self.type)
elif self.flavor == "numpy":
data = numpy.empty(shape=self.shape, dtype=self.type)
else:
data = Numeric.zeros(self.shape, typecode[self.type])
if common.verbose:
print "Object read:\n", repr(data) #, data.info()
print "Should look like:\n", repr(object) #, object.info()
if hasattr(object, "shape"):
print "Original object shape:", self.shape
print "Shape read:", data.shape
print "shape should look as:", object.shape
if not hasattr(data, "shape"):
# Scalar case
self.assertEqual(len(self.shape), 1)
self.assertEqual(carray.chunkshape, self.chunkshape)
self.assertTrue(allequal(data, object, self.flavor))
class BasicWriteTestCase(BasicTestCase):
type = 'int32'
shape = (2,)
chunkshape = (5,)
step = 1
wslice = 1 # single element case
class BasicWrite2TestCase(BasicTestCase):
type = 'int32'
shape = (2,)
chunkshape = (5,)
step = 1
wslice = slice(shape[0]-2,shape[0],2) # range of elements
reopen = 0 # This case does not reopen files
class EmptyCArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 2)
chunkshape = (5, 5)
start = 0
stop = 10
step = 1
class EmptyCArray2TestCase(BasicTestCase):
type = 'int32'
shape = (2, 2)
chunkshape = (5, 5)
start = 0
stop = 10
step = 1
reopen = 0 # This case does not reopen files
class SlicesCArrayTestCase(BasicTestCase):
compress = 1
complib = "lzo"
type = 'int32'
shape = (2, 2)
chunkshape = (5, 5)
slices = (slice(1,2,1), slice(1,3,1))
class EllipsisCArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 2)
chunkshape = (5, 5)
#slices = (slice(1,2,1), Ellipsis)
slices = (Ellipsis, slice(1,2,1))
class Slices2CArrayTestCase(BasicTestCase):
compress = 1
complib = "lzo"
type = 'int32'
shape = (2, 2, 4)
chunkshape = (5, 5, 5)
slices = (slice(1,2,1), slice(None, None, None), slice(1,4,2))
class Ellipsis2CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 2, 4)
chunkshape = (5, 5, 5)
slices = (slice(1,2,1), Ellipsis, slice(1,4,2))
class Slices3CArrayTestCase(BasicTestCase):
compress = 1 # To show the chunks id DEBUG is on
complib = "lzo"
type = 'int32'
shape = (2, 3, 4, 2)
chunkshape = (5, 5, 5, 5)
slices = (slice(1, 2, 1), slice(0, None, None), slice(1,4,2)) # Don't work
#slices = (slice(None, None, None), slice(0, None, None), slice(1,4,1)) # W
#slices = (slice(None, None, None), slice(None, None, None), slice(1,4,2)) # N
#slices = (slice(1,2,1), slice(None, None, None), slice(1,4,2)) # N
# Disable the failing test temporarily with a working test case
slices = (slice(1,2,1), slice(1, 4, None), slice(1,4,2)) # Y
#slices = (slice(1,2,1), slice(0, 4, None), slice(1,4,1)) # Y
slices = (slice(1,2,1), slice(0, 4, None), slice(1,4,2)) # N
#slices = (slice(1,2,1), slice(0, 4, None), slice(1,4,2), slice(0,100,1)) # N
class Slices4CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 2, 5, 6)
chunkshape = (5,5, 5, 5, 5, 5)
slices = (slice(1, 2, 1), slice(0, None, None), slice(1,4,2),
slice(0,4,2), slice(3,5,2), slice(2,7,1))
class Ellipsis3CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 2)
chunkshape = (5, 5, 5, 5)
slices = (Ellipsis, slice(0, 4, None), slice(1,4,2))
slices = (slice(1,2,1), slice(0, 4, None), slice(1,4,2), Ellipsis)
class Ellipsis4CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 5)
chunkshape = (5, 5, 5, 5)
slices = (Ellipsis, slice(0, 4, None), slice(1,4,2))
slices = (slice(1,2,1), Ellipsis, slice(1,4,2))
class Ellipsis5CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 5)
chunkshape = (5, 5, 5, 5)
slices = (slice(1,2,1), slice(0, 4, None), Ellipsis)
class Ellipsis6CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 5)
chunkshape = (5, 5, 5, 5)
# The next slices gives problems with setting values (test03)
# This is a problem on the test design, not the Array.__setitem__
# code, though. See # see test_earray.py Ellipsis6EArrayTestCase
slices = (slice(1,2,1), slice(0, 4, None), 2, Ellipsis)
class Ellipsis7CArrayTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 4, 5)
chunkshape = (5, 5, 5, 5)
slices = (slice(1,2,1), slice(0, 4, None), slice(2,3), Ellipsis)
class MD3WriteTestCase(BasicTestCase):
type = 'int32'
shape = (2, 2, 3)
chunkshape = (4, 4, 4)
step = 2
class MD5WriteTestCase(BasicTestCase):
type = 'int32'
shape = (2, 2, 3, 4, 5) # ok
#shape = (1, 1, 2, 1) # Minimum shape that shows problems with HDF5 1.6.1
#shape = (2, 3, 2, 4, 5) # Floating point exception (HDF5 1.6.1)
#shape = (2, 3, 3, 2, 5, 6) # Segmentation fault (HDF5 1.6.1)
chunkshape = (1, 1, 1, 1, 1)
start = 1
stop = 10
step = 10
class MD6WriteTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 3, 2, 5, 6)
chunkshape = (1, 1, 1, 1, 5, 6)
start = 1
stop = 10
step = 3
class MD6WriteTestCase__(BasicTestCase):
type = 'int32'
shape = (2, 2)
chunkshape = (1, 1)
start = 1
stop = 3
step = 1
class MD7WriteTestCase(BasicTestCase):
type = 'int32'
shape = (2, 3, 3, 4, 5, 2, 3)
chunkshape = (10, 10, 10, 10, 10, 10, 10)
start = 1
stop = 10
step = 2
class MD10WriteTestCase(BasicTestCase):
type = 'int32'
shape = (1, 2, 3, 4, 5, 5, 4, 3, 2, 2)
chunkshape = (5, 5, 5, 5, 5, 5, 5, 5, 5, 5)
start = -1
stop = -1
step = 10
class ZlibComprTestCase(BasicTestCase):
compress = 1
complib = "zlib"
start = 3
#stop = 0 # means last row
stop = None # means last row from 0.8 on
step = 10
class ZlibShuffleTestCase(BasicTestCase):
shuffle = 1
compress = 1
complib = "zlib"
# case start < stop , i.e. no rows read
start = 3
stop = 1
step = 10
class BloscComprTestCase(BasicTestCase):
compress = 1 # sss
complib = "blosc"
chunkshape = (10,10)
start = 3
stop = 10
step = 3
class BloscShuffleTestCase(BasicTestCase):
shape = (20,30)
compress = 1
shuffle = 1
complib = "blosc"
chunkshape = (100,100)
start = 3
stop = 10
step = 7
class LZOComprTestCase(BasicTestCase):
compress = 1 # sss
complib = "lzo"
chunkshape = (10,10)
start = 3
stop = 10
step = 3
class LZOShuffleTestCase(BasicTestCase):
shape = (20,30)
compress = 1
shuffle = 1
complib = "lzo"
chunkshape = (100,100)
start = 3
stop = 10
step = 7
class Bzip2ComprTestCase(BasicTestCase):
shape = (20,30)
compress = 1
complib = "bzip2"
chunkshape = (100,100)
start = 3
stop = 10
step = 8
class Bzip2ShuffleTestCase(BasicTestCase):
shape = (20,30)
compress = 1
shuffle = 1
complib = "bzip2"
chunkshape = (100,100)
start = 3
stop = 10
step = 6
class Fletcher32TestCase(BasicTestCase):
shape = (60,50)
compress = 0
fletcher32 = 1
chunkshape = (50,50)
start = 4
stop = 20
step = 7
class AllFiltersTestCase(BasicTestCase):
compress = 1
shuffle = 1
fletcher32 = 1
complib = "zlib"
chunkshape = (20,20) # sss
start = 2
stop = 99
step = 6
class FloatTypeTestCase(BasicTestCase):
type = 'float64'
shape = (2,2)
chunkshape = (5,5)
start = 3
stop = 10
step = 20
class ComplexTypeTestCase(BasicTestCase):
type = 'complex128'
shape = (2,2)
chunkshape = (5,5)
start = 3
stop = 10
step = 20
class StringTestCase(BasicTestCase):
type = "string"
length = 20
shape = (2, 2)
#shape = (2,2,20)
chunkshape = (5,5)
start = 3
stop = 10
step = 20
slices = (slice(0,1),slice(1,2))
class String2TestCase(BasicTestCase):
type = "string"
length = 20
shape = (2, 20)
chunkshape = (5,5)
start = 1
stop = 10
step = 2
class StringComprTestCase(BasicTestCase):
type = "string"
length = 20
shape = (20,2,10)
#shape = (20,0,10,20)
compr = 1
#shuffle = 1 # this shouldn't do nothing on chars
chunkshape = (50,50,2)
start = -1
stop = 100
step = 20
class NumarrayInt8TestCase(BasicTestCase):
flavor = "numarray"
type = "int8"
shape = (2,2)
compress = 1
shuffle = 1
chunkshape = (50,50)
start = -1
stop = 100
step = 20
class NumarrayInt16TestCase(BasicTestCase):
flavor = "numarray"
type = "int16"
shape = (2,2)
compress = 1
shuffle = 1
chunkshape = (50,50)
start = 1
stop = 100
step = 1
class NumarrayInt32TestCase(BasicTestCase):
flavor = "numarray"
type = "int32"
shape = (2,2)
compress = 1
shuffle = 1
chunkshape = (50,50)
start = -1
stop = 100
step = 20
class NumarrayFloat32TestCase(BasicTestCase):
flavor = "numarray"
type = "float32"
shape = (200,)
compress = 1
shuffle = 1
chunkshape = (20,)
start = -1
stop = 100
step = 20
class NumarrayFloat64TestCase(BasicTestCase):
flavor = "numarray"
type = "float64"
shape = (200,)
compress = 1
shuffle = 1
chunkshape = (20,)
start = -1
stop = 100
step = 20
class NumarrayComplex64TestCase(BasicTestCase):
flavor = "numarray"
type = "complex64"
shape = (4,)
compress = 1
shuffle = 1
chunkshape = (2,)
start = -1
stop = 100
step = 20
class NumarrayComplex128TestCase(BasicTestCase):
flavor = "numarray"
type = "complex128"
shape = (20,)
compress = 1
shuffle = 1
chunkshape = (2,)
start = -1
stop = 100
step = 20
class NumarrayComprTestCase(BasicTestCase):
flavor = "numarray"
type = "float64"
compress = 1
shuffle = 1
shape = (200,)
compr = 1
chunkshape = (21,)
start = 51
stop = 100
step = 7
class NumericInt8TestCase(BasicTestCase):
flavor = "numeric"
type = "int8"
shape = (2,2)
compress = 1
shuffle = 1
chunkshape = (50,50)
start = -1
stop = 100
step = 20
class NumericInt16TestCase(BasicTestCase):
flavor = "numeric"
type = "int16"
shape = (2,2)
compress = 1
shuffle = 1
chunkshape = (50,50)
start = 1
stop = 100
step = 1
class NumericInt32TestCase(BasicTestCase):
flavor = "numeric"
type = "int32"
shape = (2,2)
compress = 1
shuffle = 1
chunkshape = (50,50)
start = -1
stop = 100
step = 20
class NumericFloat32TestCase(BasicTestCase):
flavor = "numeric"
type = "float32"
shape = (200,)
compress = 1
shuffle = 1
chunkshape = (20,)
start = -1
stop = 100
step = 20
class NumericFloat64TestCase(BasicTestCase):
flavor = "numeric"
type = "float64"
shape = (200,)
compress = 1
shuffle = 1
chunkshape = (20,)
start = -1
stop = 100
step = 20
class NumericComplex64TestCase(BasicTestCase):
flavor = "numeric"
type = "complex64"
shape = (4,)
compress = 1
shuffle = 1
chunkshape = (2,)
start = -1
stop = 100
step = 20
class NumericComplex128TestCase(BasicTestCase):
flavor = "numeric"
type = "complex128"
shape = (20,)
compress = 1
shuffle = 1
chunkshape = (2,)
start = -1
stop = 100
step = 20
class NumericComprTestCase(BasicTestCase):
flavor = "numeric"
type = "float64"
compress = 1
shuffle = 1
shape = (200,)
compr = 1
chunkshape = (21,)
start = 51
stop = 100
step = 7
# It remains a test of Numeric char types, but the code is getting too messy
class OffsetStrideTestCase(unittest.TestCase):
mode = "w"
compress = 0
complib = "zlib" # Default compression library
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, self.mode)
self.rootgroup = self.fileh.root
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#----------------------------------------
def test01a_String(self):
"""Checking carray with offseted NumPy strings appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01a_String..." % self.__class__.__name__
shape = (3,2,2)
# Create an string atom
carray = self.fileh.createCArray(root, 'strings',
StringAtom(itemsize=3), shape,
"Array of strings",
chunkshape=(1,2,2))
a = numpy.array([[["a","b"],["123", "45"],["45", "123"]]], dtype="S3")
carray[0] = a[0,1:]
a = numpy.array([[["s", "a"],["ab", "f"],["s", "abc"],["abc", "f"]]])
carray[1] = a[0,2:]
# Read all the data:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Second row in carray ==>", data[1].tolist()
self.assertEqual(carray.nrows, 3)
self.assertEqual(data[0].tolist(), [["123", "45"],["45", "123"]])
self.assertEqual(data[1].tolist(), [["s", "abc"],["abc", "f"]])
self.assertEqual(len(data[0]), 2)
self.assertEqual(len(data[1]), 2)
def test01b_String(self):
"""Checking carray with strided NumPy strings appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01b_String..." % self.__class__.__name__
shape = (3,2,2)
# Create an string atom
carray = self.fileh.createCArray(root, 'strings',
StringAtom(itemsize=3), shape,
"Array of strings",
chunkshape=(1,2,2))
a = numpy.array([[["a","b"],["123", "45"],["45", "123"]]], dtype="S3")
carray[0] = a[0,::2]
a = numpy.array([[["s", "a"],["ab", "f"],["s", "abc"],["abc", "f"]]])
carray[1] = a[0,::2]
# Read all the rows:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Second row in carray ==>", data[1].tolist()
self.assertEqual(carray.nrows, 3)
self.assertEqual(data[0].tolist(), [["a","b"],["45", "123"]])
self.assertEqual(data[1].tolist(), [["s", "a"],["s", "abc"]])
self.assertEqual(len(data[0]), 2)
self.assertEqual(len(data[1]), 2)
def test02a_int(self):
"""Checking carray with offseted NumPy ints appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02a_int..." % self.__class__.__name__
shape = (3,3)
# Create an string atom
carray = self.fileh.createCArray(root, 'CAtom',
Int32Atom(), shape,
"array of ints",
chunkshape=(1,3))
a = numpy.array([(0,0,0), (1,0,3), (1,1,1), (0,0,0)], dtype='int32')
carray[0:2] = a[2:] # Introduce an offset
a = numpy.array([(1,1,1), (-1,0,0)], dtype='int32')
carray[2:3] = a[1:] # Introduce an offset
# Read all the rows:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Third row in carray ==>", data[2]
self.assertEqual(carray.nrows, 3)
self.assertTrue(allequal(data[0], numpy.array([1,1,1], dtype='int32')))
self.assertTrue(allequal(data[1], numpy.array([0,0,0], dtype='int32')))
self.assertTrue(allequal(data[2], numpy.array([-1,0,0], dtype='int32')))
def test02b_int(self):
"""Checking carray with strided NumPy ints appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02b_int..." % self.__class__.__name__
shape = (3,3)
# Create an string atom
carray = self.fileh.createCArray(root, 'CAtom',
Int32Atom(), shape,
"array of ints",
chunkshape=(1,3))
a = numpy.array([(0,0,0), (1,0,3), (1,1,1), (3,3,3)], dtype='int32')
carray[0:2] = a[::3] # Create an offset
a = numpy.array([(1,1,1), (-1,0,0)], dtype='int32')
carray[2:3] = a[::2] # Create an offset
# Read all the rows:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Third row in carray ==>", data[2]
self.assertEqual(carray.nrows, 3)
self.assertTrue(allequal(data[0], numpy.array([0,0,0], dtype='int32')))
self.assertTrue(allequal(data[1], numpy.array([3,3,3], dtype='int32')))
self.assertTrue(allequal(data[2], numpy.array([1,1,1], dtype='int32')))
class NumarrayOffsetStrideTestCase(unittest.TestCase):
mode = "w"
compress = 0
complib = "zlib" # Default compression library
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, self.mode)
self.rootgroup = self.fileh.root
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#----------------------------------------
def test02a_int(self):
"""Checking carray with offseted numarray ints appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02a_int..." % self.__class__.__name__
shape = (3,3)
# Create an string atom
carray = self.fileh.createCArray(root, 'CAtom',
Int32Atom(), shape,
"array of ints",
chunkshape=(1,3))
a = numarray.array([(0,0,0), (1,0,3), (1,1,1), (0,0,0)], type='Int32')
carray[0:2] = a[2:] # Introduce an offset
a = numarray.array([(1,1,1), (-1,0,0)], type='Int32')
carray[2:3] = a[1:] # Introduce an offset
# Read all the rows:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Third row in carray ==>", data[2]
self.assertEqual(carray.nrows, 3)
self.assertTrue(allequal(data[0], numpy.array([1,1,1], dtype='i4')))
self.assertTrue(allequal(data[1], numpy.array([0,0,0], dtype='i4')))
self.assertTrue(allequal(data[2], numpy.array([-1,0,0], dtype='i4')))
def test02b_int(self):
"""Checking carray with strided numarray ints appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02b_int..." % self.__class__.__name__
shape = (3,3)
# Create an string atom
carray = self.fileh.createCArray(root, 'CAtom',
Int32Atom(), shape,
"array of ints",
chunkshape=(1,3))
a = numarray.array([(0,0,0), (1,0,3), (1,2,1), (3,2,3)], type='Int32')
carray[0:2] = a[::3] # Create a strided object
a = numarray.array([(1,0,1), (-1,0,0)], type='Int32')
carray[2:3] = a[::2] # Create a strided object
# Read all the rows:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Third row in carray ==>", data[2]
self.assertEqual(carray.nrows, 3)
self.assertTrue(allequal(data[0], numpy.array([0,0,0], dtype='i4')))
self.assertTrue(allequal(data[1], numpy.array([3,2,3], dtype='i4')))
self.assertTrue(allequal(data[2], numpy.array([1,0,1], dtype='i4')))
class NumericOffsetStrideTestCase(unittest.TestCase):
mode = "w"
compress = 0
complib = "zlib" # Default compression library
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
self.fileh = openFile(self.file, self.mode)
self.rootgroup = self.fileh.root
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#----------------------------------------
def test02a_int(self):
"""Checking carray with offseted Numeric ints appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02a_int..." % self.__class__.__name__
shape = (3,3)
# Create an string atom
carray = self.fileh.createCArray(root, 'CAtom',
Int32Atom(), shape,
"array of ints",
chunkshape=(1,3))
a = Numeric.array([(0,0,0), (1,0,3), (1,1,1), (0,0,0)], typecode='i')
carray[0:2] = a[2:] # Introduce an offset
a = Numeric.array([(1,1,1), (-1,0,0)], typecode='i')
carray[2:3] = a[1:] # Introduce an offset
# Read all the rows:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Third row in carray ==>", data[2]
self.assertEqual(carray.nrows, 3)
self.assertTrue(allequal(data[0], numpy.array([1,1,1], dtype='i4')))
self.assertTrue(allequal(data[1], numpy.array([0,0,0], dtype='i4')))
self.assertTrue(allequal(data[2], numpy.array([-1,0,0], dtype='i4')))
def test02b_int(self):
"""Checking carray with strided Numeric ints appends"""
root = self.rootgroup
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02b_int..." % self.__class__.__name__
shape = (3,3)
# Create an string atom
carray = self.fileh.createCArray(root, 'CAtom',
Int32Atom(), shape,
"array of ints",
chunkshape=(1,3))
a=Numeric.array([(0,0,0), (1,0,3), (1,2,1), (3,2,3)], typecode='i')
carray[0:2] = a[::3] # Create a strided object
a=Numeric.array([(1,0,1), (-1,0,0)], typecode='i')
carray[2:3] = a[::2] # Create a strided object
# Read all the rows:
data = carray.read()
if common.verbose:
print "Object read:", data
print "Nrows in", carray._v_pathname, ":", carray.nrows
print "Third row in carray ==>", data[2]
self.assertEqual(carray.nrows, 3)
self.assertTrue(allequal(data[0], numpy.array([0,0,0], dtype='i')))
self.assertTrue(allequal(data[1], numpy.array([3,2,3], dtype='i')))
self.assertTrue(allequal(data[2], numpy.array([1,0,1], dtype='i')))
class CopyTestCase(unittest.TestCase):
def test01a_copy(self):
"""Checking CArray.copy() method """
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01a_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (2,2)
arr = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1[...] = numpy.array([[456, 2],[3, 457]], dtype='int16')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
#print "dirs-->", dir(array1), dir(array2)
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test01b_copy(self):
"""Checking CArray.copy() method """
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01b_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (2,2)
arr = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(5, 5))
array1[...] = numpy.array([[456, 2],[3, 457]], dtype='int16')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
#print "dirs-->", dir(array1), dir(array2)
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# By default, the chunkshape should be the same
self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test01c_copy(self):
"""Checking CArray.copy() method """
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01c_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (5,5)
arr = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1[:2,:2] = numpy.array([[456, 2],[3, 457]], dtype='int16')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
#print "dirs-->", dir(array1), dir(array2)
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test02_copy(self):
"""Checking CArray.copy() method (where specified)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (5,5)
arr = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1[:2,:2] = numpy.array([[456, 2],[3, 457]], dtype='int16')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy to another location
group1 = fileh.createGroup("/", "group1")
array2 = array1.copy(group1, 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.group1.array2
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
#print "dirs-->", dir(array1), dir(array2)
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor)
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
# Numeric is now deprecated
def _test03_copy(self):
"""Checking CArray.copy() method (Numeric flavor)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test03_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
if numeric_imported:
flavor="numeric"
else:
flavor="numpy"
arr = Int16Atom()
shape = (2,2)
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1.flavor = flavor
array1[...] = numpy.array([[456, 2],[3, 457]], dtype='int16')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor) # Very important here!
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test03c_copy(self):
"""Checking CArray.copy() method (python flavor)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test03c_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
shape = (2,2)
arr = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1.flavor = "python"
array1[...] = [[456, 2],[3, 457]]
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all elements are equal
self.assertEqual(array1.read(), array2.read())
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor) # Very important here!
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test03d_copy(self):
"""Checking CArray.copy() method (string python flavor)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test03d_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
shape = (2,2)
arr = StringAtom(itemsize=4)
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1.flavor = "python"
array1[...] = [["456", "2"],["3", "457"]]
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "type value-->", type(array2[:][0][0])
print "value-->", array2[:]
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all elements are equal
self.assertEqual(array1.read(), array2.read())
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor) # Very important here!
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test03e_copy(self):
"""Checking CArray.copy() method (chararray flavor)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test03e_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
shape = (2,2)
arr = StringAtom(itemsize=4)
array1 = fileh.createCArray(fileh.root, 'array1', arr, shape,
"title array1", chunkshape=(2, 2))
array1[...] = numpy.array([["456", "2"],["3", "457"]], dtype="S4")
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy to another location
array2 = array1.copy('/', 'array2')
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all elements are equal
self.assertTrue(allequal(array1.read(), array2.read()))
# Assert other properties in array
self.assertEqual(array1.nrows, array2.nrows)
self.assertEqual(array1.shape, array2.shape)
self.assertEqual(array1.extdim, array2.extdim)
self.assertEqual(array1.flavor, array2.flavor) # Very important here!
self.assertEqual(array1.atom.dtype, array2.atom.dtype)
self.assertEqual(array1.atom.type, array2.atom.type)
self.assertEqual(array1.title, array2.title)
self.assertEqual(str(array1.atom), str(array2.atom))
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#self.assertEqual(array1.chunkshape, array2.chunkshape)
# Close the file
fileh.close()
os.remove(file)
def test04_copy(self):
"""Checking CArray.copy() method (checking title copying)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test04_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (2,2)
atom = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', atom, shape,
"title array1", chunkshape=(2,2))
array1[...] = numpy.array([[456, 2],[3, 457]], dtype='int16')
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another Array
array2 = array1.copy('/', 'array2', title="title array2")
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
# Assert user attributes
if common.verbose:
print "title of destination array-->", array2.title
self.assertEqual(array2.title, "title array2")
# Close the file
fileh.close()
os.remove(file)
def test05_copy(self):
"""Checking CArray.copy() method (user attributes copied)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test05_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (2,2)
atom = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', atom, shape,
"title array1", chunkshape=(2,2))
array1[...] = numpy.array([[456, 2],[3, 457]], dtype='int16')
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another Array
array2 = array1.copy('/', 'array2', copyuserattrs=1)
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Assert user attributes
self.assertEqual(array2.attrs.attr1, "attr1")
self.assertEqual(array2.attrs.attr2, 2)
# Close the file
fileh.close()
os.remove(file)
def test05b_copy(self):
"""Checking CArray.copy() method (user attributes not copied)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test05b_copy..." % self.__class__.__name__
# Create an instance of an HDF5 Table
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an Array
shape = (2,2)
atom = Int16Atom()
array1 = fileh.createCArray(fileh.root, 'array1', atom, shape,
"title array1", chunkshape=(2,2))
array1[...] = numpy.array([[456, 2],[3, 457]], dtype='int16')
# Append some user attrs
array1.attrs.attr1 = "attr1"
array1.attrs.attr2 = 2
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "a")
array1 = fileh.root.array1
# Copy it to another Array
array2 = array1.copy('/', 'array2', copyuserattrs=0)
if self.close:
if common.verbose:
print "(closing file version)"
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Assert user attributes
self.assertEqual(hasattr(array2.attrs, "attr1"), 0)
self.assertEqual(hasattr(array2.attrs, "attr2"), 0)
# Close the file
fileh.close()
os.remove(file)
class CloseCopyTestCase(CopyTestCase):
close = 1
class OpenCopyTestCase(CopyTestCase):
close = 0
class CopyIndexTestCase(unittest.TestCase):
nrowsinbuf = 2
def test01_index(self):
"""Checking CArray.copy() method with indexes"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test01_index..." % self.__class__.__name__
# Create an instance of an HDF5 Array
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (100,2)
atom = Int32Atom()
array1 = fileh.createCArray(fileh.root, 'array1', atom, shape,
"title array1", chunkshape=(2,2))
r = numpy.arange(200, dtype='int32')
r.shape = shape
array1[...] = r
# Select a different buffer size:
array1.nrowsinbuf = self.nrowsinbuf
# Copy to another array
array2 = array1.copy("/", 'array2',
start=self.start,
stop=self.stop,
step=self.step)
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
r2 = r[self.start:self.stop:self.step]
self.assertTrue(allequal(r2, array2.read()))
# Assert the number of rows in array
if common.verbose:
print "nrows in array2-->", array2.nrows
print "and it should be-->", r2.shape[0]
# The next line is commented out because a copy should not
# keep the same chunkshape anymore.
# F. Alted 2006-11-27
#assert array1.chunkshape == array2.chunkshape
self.assertEqual(r2.shape[0], array2.nrows)
# Close the file
fileh.close()
os.remove(file)
def _test02_indexclosef(self):
"""Checking CArray.copy() method with indexes (close file version)"""
if common.verbose:
print '\n', '-=' * 30
print "Running %s.test02_indexclosef..." % self.__class__.__name__
# Create an instance of an HDF5 Array
file = tempfile.mktemp(".h5")
fileh = openFile(file, "w")
# Create an CArray
shape = (100,2)
atom = Int32Atom()
array1 = fileh.createCArray(fileh.root, 'array1', atom, shape,
"title array1", chunkshape=(2,2))
r = numpy.arange(200, dtype='int32')
r.shape = shape
array1[...] = r
# Select a different buffer size:
array1.nrowsinbuf = self.nrowsinbuf
# Copy to another array
array2 = array1.copy("/", 'array2',
start=self.start,
stop=self.stop,
step=self.step)
# Close and reopen the file
fileh.close()
fileh = openFile(file, mode = "r")
array1 = fileh.root.array1
array2 = fileh.root.array2
if common.verbose:
print "array1-->", array1.read()
print "array2-->", array2.read()
print "attrs array1-->", repr(array1.attrs)
print "attrs array2-->", repr(array2.attrs)
# Check that all the elements are equal
r2 = r[self.start:self.stop:self.step]
self.assertEqual(array1.chunkshape, array2.chunkshape)
self.assertTrue(allequal(r2, array2.read()))
# Assert the number of rows in array
if common.verbose:
print "nrows in array2-->", array2.nrows
print "and it should be-->", r2.shape[0]
self.assertEqual(r2.shape[0], array2.nrows)
# Close the file
fileh.close()
os.remove(file)
class CopyIndex1TestCase(CopyIndexTestCase):
nrowsinbuf = 1
start = 0
stop = 7
step = 1
class CopyIndex2TestCase(CopyIndexTestCase):
nrowsinbuf = 2
start = 0
stop = -1
step = 1
class CopyIndex3TestCase(CopyIndexTestCase):
nrowsinbuf = 3
start = 1
stop = 7
step = 1
class CopyIndex4TestCase(CopyIndexTestCase):
nrowsinbuf = 4
start = 0
stop = 6
step = 1
class CopyIndex5TestCase(CopyIndexTestCase):
nrowsinbuf = 2
start = 3
stop = 7
step = 1
class CopyIndex6TestCase(CopyIndexTestCase):
nrowsinbuf = 2
start = 3
stop = 6
step = 2
class CopyIndex7TestCase(CopyIndexTestCase):
start = 0
stop = 7
step = 10
class CopyIndex8TestCase(CopyIndexTestCase):
start = 6
stop = -1 # Negative values means starting from the end
step = 1
class CopyIndex9TestCase(CopyIndexTestCase):
start = 3
stop = 4
step = 1
class CopyIndex10TestCase(CopyIndexTestCase):
nrowsinbuf = 1
start = 3
stop = 4
step = 2
class CopyIndex11TestCase(CopyIndexTestCase):
start = -3
stop = -1
step = 2
class CopyIndex12TestCase(CopyIndexTestCase):
start = -1 # Should point to the last element
stop = None # None should mean the last element (including it)
step = 1
# The next test should be run only in **heavy** mode
class Rows64bitsTestCase(unittest.TestCase):
narows = 1000*1000 # each array will have 1 million entries
#narows = 1000 # for testing only
nanumber = 1000*3 # That should account for more than 2**31-1
def setUp(self):
# Create an instance of an HDF5 Table
self.file = tempfile.mktemp(".h5")
fileh = self.fileh = openFile(self.file, "a")
# Create an CArray
shape = (self.narows*self.nanumber,)
array = fileh.createCArray(fileh.root, 'array',
Int8Atom(), shape,
filters=Filters(complib='lzo',
complevel=1))
# Fill the array
na = numpy.arange(self.narows, dtype='int8')
#~ for i in xrange(self.nanumber):
#~ s = slice(i*self.narows, (i+1)*self.narows)
#~ array[s] = na
s = slice(0, self.narows)
array[s] = na
s = slice((self.nanumber-1)*self.narows, self.nanumber*self.narows)
array[s] = na
def tearDown(self):
self.fileh.close()
os.remove(self.file)
common.cleanup(self)
#----------------------------------------
def test01_basiccheck(self):
"Some basic checks for carrays exceeding 2**31 rows"
fileh = self.fileh
array = fileh.root.array
if self.close:
if common.verbose:
# Check how many entries there are in the array
print "Before closing"
print "Entries:", array.nrows, type(array.nrows)
print "Entries:", array.nrows / (1000*1000), "Millions"
print "Shape:", array.shape
# Close the file
fileh.close()
# Re-open the file
fileh = self.fileh = openFile(self.file)
array = fileh.root.array
if common.verbose:
print "After re-open"
# Check how many entries there are in the array
if common.verbose:
print "Entries:", array.nrows, type(array.nrows)
print "Entries:", array.nrows / (1000*1000), "Millions"
print "Shape:", array.shape
print "Last 10 elements-->", array[-10:]
stop = self.narows%256
if stop > 127:
stop -= 256
start = stop - 10
#print "start, stop-->", start, stop
print "Should look like:", numpy.arange(start, stop, dtype='int8')
nrows = self.narows*self.nanumber
# check nrows
self.assertEqual(array.nrows, nrows)
# Check shape
self.assertEqual(array.shape, (nrows,))
# check the 10 first elements
self.assertTrue(allequal(array[:10], numpy.arange(10, dtype='int8')))
# check the 10 last elements
stop = self.narows%256
if stop > 127:
stop -= 256
start = stop - 10
self.assertTrue(allequal(array[-10:],
numpy.arange(start, stop, dtype='int8')))
class Rows64bitsTestCase1(Rows64bitsTestCase):
close = 0
class Rows64bitsTestCase2(Rows64bitsTestCase):
close = 1
class BigArrayTestCase(common.TempFileMixin, common.PyTablesTestCase):
shape = (3000000000,) # more than 2**31-1
def setUp(self):
super(BigArrayTestCase, self).setUp()
# This should be fast since disk space isn't actually allocated,
# so this case is OK for non-heavy test runs.
self.h5file.createCArray('/', 'array', Int8Atom(), self.shape)
def test00_shape(self):
"""Check that the shape doesn't overflow."""
# See ticket #147.
self.assertEqual(self.h5file.root.array.shape, self.shape)
try:
self.assertEqual(len(self.h5file.root.array), self.shape[0])
except OverflowError:
# This can't be avoided in 32-bit platforms.
self.assertTrue(self.shape[0] > numpy.iinfo(int).max,
"Array length overflowed but ``int`` "
"is wide enough." )
def test01_shape_reopen(self):
"""Check that the shape doesn't overflow after reopening."""
self._reopen('r')
self.test00_shape()
# Test for default values when creating arrays.
class DfltAtomTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test00_dflt(self):
"Check that Atom.dflt is honored (string version)."
# Create a CArray with default values
self.h5file.createCArray(
'/', 'bar', StringAtom(itemsize=5, dflt="abdef"), (10,10))
if self.reopen:
self._reopen()
# Check the values
values = self.h5file.root.bar[:]
if common.verbose:
print "Read values:", values
self.assertTrue(allequal(values,
numpy.array(["abdef"]*100, "S5").reshape(10,10)))
def test01_dflt(self):
"Check that Atom.dflt is honored (int version)."
# Create a CArray with default values
self.h5file.createCArray('/', 'bar', IntAtom(dflt=1), (10,10))
if self.reopen:
self._reopen()
# Check the values
values = self.h5file.root.bar[:]
if common.verbose:
print "Read values:", values
self.assertTrue(allequal(values, numpy.ones((10,10), "i4")))
def test02_dflt(self):
"Check that Atom.dflt is honored (float version)."
# Create a CArray with default values
self.h5file.createCArray('/', 'bar', FloatAtom(dflt=1.134), (10,10))
if self.reopen:
self._reopen()
# Check the values
values = self.h5file.root.bar[:]
if common.verbose:
print "Read values:", values
self.assertTrue(allequal(values, numpy.ones((10,10), "f8")*1.134))
class DfltAtomNoReopen(DfltAtomTestCase):
reopen = False
class DfltAtomReopen(DfltAtomTestCase):
reopen = True
# Test for representation of defaults in atoms. Ticket #212.
class AtomDefaultReprTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test00a_zeros(self):
"Testing default values. Zeros (scalar)."
N = ()
atom = StringAtom(itemsize=3, shape=N, dflt="")
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", repr(ca[0])
print "Defaults-->", repr(ca.atom.dflt)
self.assertTrue(allequal(ca[0], numpy.zeros(N, 'S3')))
self.assertTrue(allequal(ca.atom.dflt, numpy.zeros(N, 'S3')))
def test00b_zeros(self):
"Testing default values. Zeros (array)."
N = 2
atom = StringAtom(itemsize=3, shape=N, dflt="")
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", ca[0]
print "Defaults-->", ca.atom.dflt
self.assertTrue(allequal(ca[0], numpy.zeros(N, 'S3')))
self.assertTrue(allequal(ca.atom.dflt, numpy.zeros(N, 'S3')))
def test01a_values(self):
"Testing default values. Ones."
N = 2
atom = Int32Atom(shape=N, dflt=1)
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", ca[0]
print "Defaults-->", ca.atom.dflt
self.assertTrue(allequal(ca[0], numpy.ones(N, 'i4')))
self.assertTrue(allequal(ca.atom.dflt, numpy.ones(N, 'i4')))
def test01b_values(self):
"Testing default values. Generic value."
N = 2
generic = 112.32
atom = Float32Atom(shape=N, dflt=generic)
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", ca[0]
print "Defaults-->", ca.atom.dflt
self.assertTrue(allequal(ca[0], numpy.ones(N, 'f4')*generic))
self.assertTrue(allequal(ca.atom.dflt, numpy.ones(N, 'f4')*generic))
def test02a_None(self):
"Testing default values. None (scalar)."
N = ()
atom = Int32Atom(shape=N, dflt=None)
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", repr(ca[0])
print "Defaults-->", repr(ca.atom.dflt)
self.assertTrue(allequal(ca.atom.dflt, numpy.zeros(N, 'i4')))
def test02b_None(self):
"Testing default values. None (array)."
N = 2
atom = Int32Atom(shape=N, dflt=None)
ca = self.h5file.createCArray('/', 'test', atom, (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", ca[0]
print "Defaults-->", ca.atom.dflt
self.assertTrue(allequal(ca.atom.dflt, numpy.zeros(N, 'i4')))
class AtomDefaultReprNoReopen(AtomDefaultReprTestCase):
reopen = False
class AtomDefaultReprReopen(AtomDefaultReprTestCase):
reopen = True
class TruncateTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test(self):
"""Test for unability to truncate Array objects."""
array1 = self.h5file.createArray('/', 'array1', [0, 2])
self.assertRaises(TypeError, array1.truncate, 0)
# Test for dealing with multidimensional atoms
class MDAtomTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test01a_assign(self):
"Assign a row to a (unidimensional) CArray with a MD atom."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2,2)), (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign one row
ca[0] = [[1,3],[4,5]]
self.assertEqual(ca.nrows, 1)
if common.verbose:
print "First row-->", ca[0]
self.assertTrue(allequal(ca[0], numpy.array([[1,3],[4,5]], 'i4')))
def test01b_assign(self):
"Assign several rows to a (unidimensional) CArray with a MD atom."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2,2)), (3,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign three rows
ca[:] = [[[1]], [[2]], [[3]]] # Simple broadcast
self.assertEqual(ca.nrows, 3)
if common.verbose:
print "Third row-->", ca[2]
self.assertTrue(allequal(ca[2], numpy.array([[3,3],[3,3]], 'i4')))
def test02a_assign(self):
"Assign a row to a (multidimensional) CArray with a MD atom."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2,)), (1,3))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign one row
ca[:] = [[[1,3],[4,5],[7,9]]]
self.assertEqual(ca.nrows, 1)
if common.verbose:
print "First row-->", ca[0]
self.assertTrue(allequal(ca[0], numpy.array([[1,3],[4,5],[7,9]], 'i4')))
def test02b_assign(self):
"Assign several rows to a (multidimensional) CArray with a MD atom."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2,)), (3,3))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign three rows
ca[:] = [[[1,-3],[4,-5],[-7,9]],
[[-1,3],[-4,5],[7,-8]],
[[-2,3],[-5,5],[7,-9]]]
self.assertEqual(ca.nrows, 3)
if common.verbose:
print "Third row-->", ca[2]
self.assertTrue(allequal(ca[2],
numpy.array([[-2,3],[-5,5],[7,-9]], 'i4')))
def test03a_MDMDMD(self):
"Complex assign of a MD array in a MD CArray with a MD atom."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2,4)), (3,2,3))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign values
# The shape of the atom should be added at the end of the arrays
a = numpy.arange(2*3*2*4, dtype='i4').reshape((2,3,2,4))
ca[:] = [a*1, a*2, a*3]
self.assertEqual(ca.nrows, 3)
if common.verbose:
print "Third row-->", ca[2]
self.assertTrue(allequal(ca[2], a*3))
def test03b_MDMDMD(self):
"Complex assign of a MD array in a MD CArray with a MD atom (II)."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2,4)), (2,3,3))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign values
# The shape of the atom should be added at the end of the arrays
a = numpy.arange(2*3*3*2*4, dtype='i4').reshape((2,3,3,2,4))
ca[:] = a
self.assertEqual(ca.nrows, 2)
if common.verbose:
print "Third row-->", ca[:,2,...]
self.assertTrue(allequal(ca[:,2,...], a[:,2,...]))
def test03c_MDMDMD(self):
"Complex assign of a MD array in a MD CArray with a MD atom (III)."
# Create an CArray
ca = self.h5file.createCArray('/', 'test', Int32Atom((2,4)), (3,1,2))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Assign values
# The shape of the atom should be added at the end of the arrays
a = numpy.arange(3*1*2*2*4, dtype='i4').reshape((3,1,2,2,4))
ca[:] = a
self.assertEqual(ca.nrows, 3)
if common.verbose:
print "Second row-->", ca[:,:,1,...]
self.assertTrue(allequal(ca[:,:,1,...], a[:,:,1,...]))
class MDAtomNoReopen(MDAtomTestCase):
reopen = False
class MDAtomReopen(MDAtomTestCase):
reopen = True
# Test for building very large MD atoms without defaults. Ticket #211.
class MDLargeAtomTestCase(common.TempFileMixin, common.PyTablesTestCase):
def test01_create(self):
"Create a CArray with a very large MD atom."
N = 2**16 # 4x larger than maximum object header size (64 KB)
ca = self.h5file.createCArray('/', 'test', Int32Atom(shape=N), (1,))
if self.reopen:
self._reopen('a')
ca = self.h5file.root.test
# Check the value
if common.verbose:
print "First row-->", ca[0]
self.assertTrue(allequal(ca[0], numpy.zeros(N, 'i4')))
class MDLargeAtomNoReopen(MDLargeAtomTestCase):
reopen = False
class MDLargeAtomReopen(MDLargeAtomTestCase):
reopen = True
#----------------------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
global numeric
niter = 1
#common.heavy = 1 # uncomment this only for testing purposes
#theSuite.addTest(unittest.makeSuite(BasicTestCase))
for n in range(niter):
theSuite.addTest(unittest.makeSuite(BasicWriteTestCase))
theSuite.addTest(unittest.makeSuite(BasicWrite2TestCase))
theSuite.addTest(unittest.makeSuite(EmptyCArrayTestCase))
theSuite.addTest(unittest.makeSuite(EmptyCArray2TestCase))
theSuite.addTest(unittest.makeSuite(SlicesCArrayTestCase))
theSuite.addTest(unittest.makeSuite(Slices2CArrayTestCase))
theSuite.addTest(unittest.makeSuite(EllipsisCArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis2CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis3CArrayTestCase))
theSuite.addTest(unittest.makeSuite(ZlibComprTestCase))
theSuite.addTest(unittest.makeSuite(ZlibShuffleTestCase))
theSuite.addTest(unittest.makeSuite(BloscComprTestCase))
theSuite.addTest(unittest.makeSuite(BloscShuffleTestCase))
theSuite.addTest(unittest.makeSuite(LZOComprTestCase))
theSuite.addTest(unittest.makeSuite(LZOShuffleTestCase))
theSuite.addTest(unittest.makeSuite(Bzip2ComprTestCase))
theSuite.addTest(unittest.makeSuite(Bzip2ShuffleTestCase))
theSuite.addTest(unittest.makeSuite(FloatTypeTestCase))
theSuite.addTest(unittest.makeSuite(ComplexTypeTestCase))
theSuite.addTest(unittest.makeSuite(StringTestCase))
theSuite.addTest(unittest.makeSuite(String2TestCase))
theSuite.addTest(unittest.makeSuite(StringComprTestCase))
# numarray is now deprecated
#if numarray_imported:
# theSuite.addTest(unittest.makeSuite(NumarrayInt8TestCase))
# theSuite.addTest(unittest.makeSuite(NumarrayInt16TestCase))
# theSuite.addTest(unittest.makeSuite(NumarrayInt32TestCase))
# theSuite.addTest(unittest.makeSuite(NumarrayFloat32TestCase))
# theSuite.addTest(unittest.makeSuite(NumarrayFloat64TestCase))
# theSuite.addTest(unittest.makeSuite(NumarrayComplex64TestCase))
# theSuite.addTest(unittest.makeSuite(NumarrayComplex128TestCase))
# theSuite.addTest(unittest.makeSuite(NumarrayComprTestCase))
# theSuite.addTest(unittest.makeSuite(NumarrayOffsetStrideTestCase))
# Numeric is now deprecated
#if numeric_imported:
# theSuite.addTest(unittest.makeSuite(NumericInt8TestCase))
# theSuite.addTest(unittest.makeSuite(NumericInt16TestCase))
# theSuite.addTest(unittest.makeSuite(NumericInt32TestCase))
# theSuite.addTest(unittest.makeSuite(NumericFloat32TestCase))
# theSuite.addTest(unittest.makeSuite(NumericFloat64TestCase))
# theSuite.addTest(unittest.makeSuite(NumericComplex64TestCase))
# theSuite.addTest(unittest.makeSuite(NumericComplex128TestCase))
# theSuite.addTest(unittest.makeSuite(NumericComprTestCase))
# theSuite.addTest(unittest.makeSuite(NumericOffsetStrideTestCase))
theSuite.addTest(unittest.makeSuite(OffsetStrideTestCase))
theSuite.addTest(unittest.makeSuite(Fletcher32TestCase))
theSuite.addTest(unittest.makeSuite(AllFiltersTestCase))
theSuite.addTest(unittest.makeSuite(CloseCopyTestCase))
theSuite.addTest(unittest.makeSuite(OpenCopyTestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex1TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex2TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex3TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex4TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex5TestCase))
theSuite.addTest(unittest.makeSuite(BigArrayTestCase))
theSuite.addTest(unittest.makeSuite(DfltAtomNoReopen))
theSuite.addTest(unittest.makeSuite(DfltAtomReopen))
theSuite.addTest(unittest.makeSuite(AtomDefaultReprNoReopen))
theSuite.addTest(unittest.makeSuite(AtomDefaultReprReopen))
theSuite.addTest(unittest.makeSuite(TruncateTestCase))
theSuite.addTest(unittest.makeSuite(MDAtomNoReopen))
theSuite.addTest(unittest.makeSuite(MDAtomReopen))
theSuite.addTest(unittest.makeSuite(MDLargeAtomNoReopen))
theSuite.addTest(unittest.makeSuite(MDLargeAtomReopen))
if common.heavy:
theSuite.addTest(unittest.makeSuite(Slices3CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Slices4CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis4CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis5CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis6CArrayTestCase))
theSuite.addTest(unittest.makeSuite(Ellipsis7CArrayTestCase))
theSuite.addTest(unittest.makeSuite(MD3WriteTestCase))
theSuite.addTest(unittest.makeSuite(MD5WriteTestCase))
theSuite.addTest(unittest.makeSuite(MD6WriteTestCase))
theSuite.addTest(unittest.makeSuite(MD7WriteTestCase))
theSuite.addTest(unittest.makeSuite(MD10WriteTestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex6TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex7TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex8TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex9TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex10TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex11TestCase))
theSuite.addTest(unittest.makeSuite(CopyIndex12TestCase))
theSuite.addTest(unittest.makeSuite(Rows64bitsTestCase1))
theSuite.addTest(unittest.makeSuite(Rows64bitsTestCase2))
return theSuite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## End:
|
#!/usr/bin/env python
#############################################################################
## drug_scafs_2sql.py
##
## Input smiles file SMILES<space>NAME.
##
## Jeremy Yang
## 18 Jan 2017
#############################################################################
import sys,os,getopt,re
PROG=os.path.basename(sys.argv[0])
#############################################################################
if __name__=='__main__':
DBSCHEMA='public'
CHEMKIT='rdkit'
usage='''
%(PROG)s - drug scaffolds, for inDrug annotation, to SQL UPDATEs
required:
--i INFILE ................... input file
options:
--o OUTFILE .................. output file [stdout]
--dbschema DBSCHEMA .......... [%(DBSCHEMA)s]
--chemkit CHEMKIT ............ rdkit|openchord [%(CHEMKIT)s]
--v .......................... verbose
--h .......................... this help
'''%{'PROG':PROG,'DBSCHEMA':DBSCHEMA,'CHEMKIT':CHEMKIT}
def ErrorExit(msg):
print >>sys.stderr,msg
sys.exit(1)
ifile=None; ofile=None;
verbose=0;
opts,pargs = getopt.getopt(sys.argv[1:],'',['h','v','vv', 'i=','o=','dbschema=',
'chemkit='])
if not opts: ErrorExit(usage)
for (opt,val) in opts:
if opt=='--h': ErrorExit(usage)
elif opt=='--i': ifile=val
elif opt=='--o': ofile=val
elif opt=='--dbschema': DBSCHEMA=val
elif opt=='--chemkit': CHEMKIT=val
elif opt=='--v': verbose=1
else: ErrorExit('Illegal option: %s'%val)
fin=file(ifile)
if not fin:
ErrorExit('ERROR: cannot open %s'%ifile)
if ofile:
fout=file(ofile,"w")
else:
fout=sys.stdout
n_in=0
n_out=0
while True:
line=fin.readline()
if not line: break
n_in+=1
line=line.strip()
if not line or line[0]=='#': continue
fields=re.split('\s',line)
smi=fields[0]
smi=re.sub(r'\\',r"'||E'\\\\'||'",smi)
fout.write("UPDATE %s.scaffold SET in_drug=TRUE "%DBSCHEMA)
if CHEMKIT=='openchord':
fout.write("WHERE scafsmi=openbabel.cansmiles('%s');\n"%smi)
else:
fout.write("FROM mols_scaf WHERE mols_scaf.scafmol @= '%s'::mol AND scaffold.id = mols_scaf.id;\n"%smi)
n_out+=1
fin.close()
if ofile:
fout.close()
print >>sys.stderr, "%s: lines in: %d ; converted to sql: %d"%(PROG,n_in,n_out)
|
import os
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from colander import (
Boolean,
Integer,
Length,
MappingSchema,
OneOf,
SchemaNode,
SequenceSchema,
String
)
from deform import (
Form,
ValidationFailure,
widget
)
here = os.path.dirname(os.path.abspath(__file__))
"""colors = (('red', 'Red'), ('green', 'Green'), ('blue', 'Blue'))"""
occaa = ( ('0', '- Selecciona -'), ('1', 'Madrid'), ('2', 'Castilla La Mancha') )
oprov = ( ('0', '- Selecciona -'), ('1', 'Madrid'), ('2', 'Toledo'), ('3', 'Cuenca') )
ociud = ( ('0', '- Selecciona -'), ('1', 'Madrid'), ('2', 'Getafe'), ('3', 'Toledo'), ('4', 'Cuenca') )
class Datos:
def __init__(self,nombre,apellidos):
self.nombre=nombre
self.apellidos=apellidos
def sacarvalores(self):
return (self.nombre + " " + self.apellidos)
class DateSchema(MappingSchema):
year = SchemaNode(Integer())
month = SchemaNode(Integer())
day = SchemaNode(Integer())
class DatesSchema(SequenceSchema):
date = DateSchema()
class MySchema(MappingSchema):
nombre = SchemaNode(String(),
description = 'Nombre del alumno')
apellidos = SchemaNode(String(),
description = 'Apellidos del alumno')
DNI = SchemaNode(String(),validator = Length(max=9),
description = 'DNI del alumno')
comunidad = SchemaNode(String(), description = 'Comunidad Autónoma', widget = widget.Select2Widget(values=occaa))
provincia = SchemaNode(String(), description = 'Provincia', widget = widget.Select2Widget(values=oprov))
ciudad = SchemaNode(String(), description = 'Ciudad', widget = widget.Select2Widget(values=ociud))
"""title = SchemaNode(String(),
widget = widget.TextInputWidget(size=40),
validator = Length(max=20),
description = 'A very short title')
password = SchemaNode(String(),
widget = widget.CheckedPasswordWidget(),
validator = Length(min=5))
is_cool = SchemaNode(Boolean(),
default = True)
dates = DatesSchema()
color = SchemaNode(String(),
widget = widget.RadioChoiceWidget(values=colors),
validator = OneOf(('red', 'blue')))"""
def form_view(request):
schema = MySchema()
myform = Form(schema, buttons=('submit','clear'))
template_values = {}
template_values.update(myform.get_widget_resources())
caracteristicas=['nombre','apellidos','DNI','comunidad','provincia','ciudad']
datos={ "nombre": "", 'apellidos': "",'DNI': "",'comunidad': "",'provincia': "",'ciudad': ""}
if 'submit' in request.POST:
controls = request.POST.items()
for item in controls:
if item[0] in caracteristicas:
datos[item[0]]=item[1]
#print(item[0] + " " + item[1])
#print(datos)
try:
myform.validate(controls)
except ValidationFailure as e:
template_values['form'] = e.render()
else:
#print(datos['nombre']+ ' ' + datos['apellidos'])
template_values['form'] = ' <a href="http://localhost:8080">Volver</a>'
return template_values
template_values['form'] = myform.render()
return template_values
if __name__ == '__main__':
settings = dict(reload_templates=True)
config = Configurator(settings=settings)
config.include('pyramid_chameleon')
config.add_view(form_view, renderer=os.path.join(here, 'form.pt'))
config.add_static_view('static', 'deform:static')
app = config.make_wsgi_app()
server = make_server('0.0.0.0', 8080, app)
server.serve_forever()
|
from .novel import Novel
from .novel_url import NovelUrl
from .volume import Volume
from .chapter import Chapter
from .asset import Asset
from .asset_type import AssetType
from .metadata import MetaData
|
""" Functions for testing what kind of Python or Python environment is in use.
"""
def is_py3():
""" Test whether we are running Python 3.
Returns
True if we are running Python 3, otherwise False
"""
import sys
return sys.version_info[0] == 3
|
from flask_wtf import FlaskForm
from wtforms import SubmitField, TextField
from wtforms.validators import DataRequired
class SomeForm(FlaskForm):
input1 = TextField("Input 1", validators=[DataRequired()])
input2 = TextField("Input 2", validators=[DataRequired()])
submit = SubmitField("Submit")
|
import subprocess
import os
import time
import StringIO
import unittest
import tempfile
import shutil
import uuid
import pkg_resources
from pynailgun import NailgunException, NailgunConnection
POSSIBLE_NAILGUN_CODES_ON_NG_STOP = [
NailgunException.CONNECT_FAILED,
NailgunException.CONNECTION_BROKEN,
NailgunException.UNEXPECTED_CHUNKTYPE,
]
if os.name == 'posix':
def transport_exists(transport_file):
return os.path.exists(transport_file)
if os.name == 'nt':
import ctypes
from ctypes.wintypes import WIN32_FIND_DATAW as WIN32_FIND_DATA
INVALID_HANDLE_VALUE = -1
FindFirstFile = ctypes.windll.kernel32.FindFirstFileW
FindClose = ctypes.windll.kernel32.FindClose
# on windows os.path.exists doen't allow to check reliably that a pipe exists
# (os.path.exists tries to open connection to a pipe)
def transport_exists(transport_path):
wfd = WIN32_FIND_DATA()
handle = FindFirstFile(transport_path, ctypes.byref(wfd))
result = handle != INVALID_HANDLE_VALUE
FindClose(handle)
return result
@unittest.skip('This test is flaky')
class TestNailgunConnection(unittest.TestCase):
def setUp(self):
self.setUpTransport()
self.startNailgun()
def setUpTransport(self):
self.tmpdir = tempfile.mkdtemp()
if os.name == 'posix':
self.transport_file = os.path.join(self.tmpdir, 'sock')
self.transport_address = 'local:{0}'.format(self.transport_file)
else:
pipe_name = u'nailgun-test-{0}'.format(uuid.uuid4().hex)
self.transport_address = u'local:{0}'.format(pipe_name)
self.transport_file = ur'\\.\pipe\{0}'.format(pipe_name)
def getNailgunUberJar(self):
stream = pkg_resources.resource_stream(__name__, 'nailgun-uber.jar')
uber_jar_path = os.path.join(self.tmpdir, 'nailgun-uber.jar')
with open(uber_jar_path, 'wb') as f:
f.write(stream.read())
return uber_jar_path
def startNailgun(self):
if os.name == 'posix':
def preexec_fn():
# Close any open file descriptors to further separate buckd from its
# invoking context (e.g. otherwise we'd hang when running things like
# `ssh localhost buck clean`).
dev_null_fd = os.open("/dev/null", os.O_RDWR)
os.dup2(dev_null_fd, 0)
os.dup2(dev_null_fd, 1)
os.dup2(dev_null_fd, 2)
os.close(dev_null_fd)
creationflags = 0
else:
preexec_fn = None
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx#DETACHED_PROCESS
DETACHED_PROCESS = 0x00000008
creationflags = DETACHED_PROCESS
self.ng_server_process = subprocess.Popen(
['java', '-Djna.nosys=true', '-jar', self.getNailgunUberJar(), self.transport_address],
close_fds=True,
preexec_fn=preexec_fn,
creationflags=creationflags,
)
self.assertIsNone(self.ng_server_process.poll())
# Give Java some time to create the listening socket.
for i in range(0, 600):
if not transport_exists(self.transport_file):
time.sleep(0.01)
self.assertTrue(transport_exists(self.transport_file))
def test_nailgun_stats_and_stop(self):
for i in range(1, 5):
output = StringIO.StringIO()
with NailgunConnection(
self.transport_address,
stderr=None,
stdin=None,
stdout=output) as c:
exit_code = c.send_command('ng-stats')
self.assertEqual(exit_code, 0)
actual_out = output.getvalue().strip()
expected_out = 'com.martiansoftware.nailgun.builtins.NGServerStats: {0}/1'.format(i)
self.assertEqual(actual_out, expected_out)
try:
with NailgunConnection(
self.transport_address,
cwd=os.getcwd(),
stderr=None,
stdin=None,
stdout=None) as c:
c.send_command('ng-stop')
except NailgunException as e:
self.assertIn(e.code, POSSIBLE_NAILGUN_CODES_ON_NG_STOP)
self.ng_server_process.wait()
self.assertEqual(self.ng_server_process.poll(), 0)
def tearDown(self):
if self.ng_server_process.poll() is None:
# some test has failed, ng-server was not stopped. killing it
self.ng_server_process.kill()
shutil.rmtree(self.tmpdir)
if __name__ == '__main__':
unittest.main()
|
string = input("Enter the string :")
print(string [len(string)::-1])
|
import numpy as np
from vispy.gloo import VertexBuffer
from vispy.visuals import Visual
from vispy.visuals.shaders import Function
from vispy.color import ColorArray
vert = """
#version 120
uniform float u_antialias;
uniform float u_px_scale;
uniform float u_scale;
attribute vec3 a_position;
attribute vec4 a_color;
attribute float a_size;
varying vec4 v_fg_color;
varying vec4 v_bg_color;
varying float v_size;
varying float v_linewidth;
varying float v_antialias;
void main() {
v_size = a_size * u_px_scale * u_scale;
v_linewidth = .0 * float(u_px_scale);
v_antialias = .25;
v_fg_color = vec4(a_color.rgba);
v_bg_color = vec4(a_color.rgba);
gl_Position = $transform(vec4(a_position, 1.0));
gl_PointSize = v_size + 4.0 * (v_linewidth + 1.5 * v_antialias);
}
"""
frag = """
#version 120
varying vec4 v_fg_color;
varying vec4 v_bg_color;
varying float v_size;
varying float v_linewidth;
varying float v_antialias;
void main ()
{
if (v_size <= 0.) discard;
float size = v_size + 4.0 * (v_linewidth + 1.5 * v_antialias);
float t = 0.5 * v_linewidth - v_antialias;
float radius = $marker(gl_PointCoord, size);
float dist = abs(radius) - t;
vec4 edgecolor = vec4(v_fg_color.rgb, v_linewidth*v_fg_color.a);
if (radius > 0.5 * v_linewidth + v_antialias) {
discard;
}
else if (dist < 0.0) {
gl_FragColor = vec4(v_fg_color.rgb, 0.5 * v_fg_color.a);
}
else
{
if (v_linewidth == 0.) {
if (radius > -v_antialias) {
float alpha = 1.0 + radius / v_antialias;
alpha = exp(-alpha*alpha);
gl_FragColor = vec4(v_bg_color.rgb, alpha*v_bg_color.a);
} else {
gl_FragColor = v_bg_color;
}
} else {
float alpha = dist / v_antialias;
alpha = exp(-alpha * alpha);
if (radius > 0) {
gl_FragColor = vec4(edgecolor.rgb, alpha * edgecolor.a);
} else {
gl_FragColor = mix(v_bg_color, edgecolor, alpha);
}
}
}
}
"""
disc = """
float disc(vec2 pointcoord, float size) {
float radius = length((pointcoord.xy - vec2(.5, .5)) * size);
radius -= $v_size / 2.;
return radius;
}
"""
_marker_dict = {
"disc": disc
}
class MarkersVisual(Visual):
def __init__(self, data, color, symbol="disc", size=3):
Visual.__init__(self, vert, frag)
self.size = size
self.symbol = symbol
self.shared_program["a_position"] = np.float32(data)
self.shared_program["a_size"] = np.repeat(self.size, data.shape[0]).astype(np.float32)
self.shared_program["a_color"] = VertexBuffer(color.rgba)
self._draw_mode = "points"
def set_data(self, data):
self.shared_program["a_position"] = np.float32(data)
@property
def symbol(self):
return self._symbol
@symbol.setter
def symbol(self, symbol):
self._marker_fun = Function(_marker_dict[symbol])
self._marker_fun["v_size"] = self.size
self.shared_program.frag["marker"] = self._marker_fun
self.update()
def _prepare_draw(self, view):
self.set_gl_state(depth_test=True, blend_func=("src_alpha", "one_minus_src_alpha"))
view.view_program["u_px_scale"] = view.transforms.pixel_scale
view.view_program["u_scale"] = 1
def _prepare_transforms(self, view):
view.view_program.vert["transform"] = view.transforms.get_transform()
|
""" some image manipulation functions like scaling, rotating, etc...
"""
from __future__ import print_function, unicode_literals, absolute_import, division
import numpy as np
from gputools import map_coordinates
from scipy import ndimage
import pytest
def create_shape(shape=(100, 110, 120)):
d = np.zeros(shape, np.float32)
ss = tuple([slice(s // 10, 9 * s // 10) for s in shape])
d[ss] = 1+np.random.uniform(0,1,d[ss].shape)
for i in range(len(shape)):
ss0 = list(slice(None) for _ in range(len(shape)))
ss0[i] = (10. / min(shape) * np.arange(shape[i])) % 2 > 1
d[ss0] = 0
return d
def check_error(func):
def test_func(check=True, nstacks=10):
np.random.seed(42)
for _ in range(nstacks):
ndim = np.random.choice((2,3))
shape = np.random.randint(22, 55, ndim)
x = create_shape(shape)
out1, out2 = func(x)
if check:
np.testing.assert_allclose(out1, out2, atol=1e-2, rtol=1.e-2)
return x, out1, out2
return test_func
@check_error
def test_map_coordinates(x):
coordinates = np.stack([np.arange(10) ** 2] * x.ndim)
coordinates = np.random.randint(0,min(x.shape),(x.ndim,100))
print(coordinates.shape, x.shape)
out1 = map_coordinates(x, coordinates, interpolation="linear")
out2 = ndimage.map_coordinates(x, coordinates, order=1, prefilter=False)
return out1, out2
if __name__ == '__main__':
x, y1, y2 = test_map_coordinates(check=False, nstacks=1)
|
from pathlib import Path
import click
from .. import plot
from .._profiling import Profiling
__all__ = ["plot_acc_eeplot"]
@click.command()
@click.argument(
"experiment",
type=click.Path(
exists=True, dir_okay=True, file_okay=False, readable=True, resolve_path=True
),
)
@click.argument("accession", type=str)
@click.argument(
"output",
type=click.Path(
exists=False, dir_okay=False, file_okay=True, writable=True, resolve_path=True
),
)
@click.option(
"--multihit/--no-multihit",
help="Keep or discard (default) multiple hits on the same target.",
default=False,
)
def plot_acc_eeplot(experiment: str, accession: str, output: str, multihit: bool):
"""
Plot accession e-values.
"""
root = Path(experiment)
prof = Profiling(root)
prof_acc = prof.read_accession(accession)
fig = plot.acc_eeplot(prof_acc, 1e-10, multihit)
outpath = Path(output)
if outpath.suffix == ".html":
fig.write_html(str(outpath))
else:
fig.write_image(str(outpath))
|
import json
import os
from pathlib import Path
import time
from typing import List
import pandas as pd
import requests
from dotenv import load_dotenv
from rich import print
from rich.progress import track
from tqdm import tqdm
# from top_github_scraper.utils import ScrapeGithubUrl, UserProfileGetter, isnotebook
import logging
from pathlib import Path
from dotenv import load_dotenv
from datetime import datetime
from github import Github
from ratelimit import limits
import requests
# get the standard UTC time
# g = Github(TOKEN)
# current_timezone = pytz.timezone('US/Pacific')
# reset_timestamp = g.get_rate_limit().core.reset.astimezone(current_timezone)
# sleep_time = (reset_timestamp - datetime.now(current_timezone)).seconds + 5
repo_list_file = "data/merged_repos.csv"
ONE_HOUR = 3600
from IPython import get_ipython
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
class RepoScraper:
"""Scrape information of repos and the
contributors of those repositories"""
def __init__(
self, repo_urls: list, max_n_top_contributors: int, USERNAME: str, TOKEN: str
):
self.repo_urls = repo_urls
self.max_n_top_contributors = max_n_top_contributors
self.USERNAME = USERNAME
self.TOKEN = TOKEN
self.github = Github(TOKEN)
@limits(calls=5000, period=ONE_HOUR)
def call_api(self, url, *args):
response = requests.get(url, auth=(self.USERNAME, self.TOKEN))
# if response.status_code == 404:
# return "Not Found"
# if response.status_code != 200:
# raise Exception("API response: {}".format(response.status_code))
return response
# This method is used to limit the rate of requests sent to GitHub
def __choke(self):
if self.github.get_rate_limit().core.remaining < 3:
naptime = (
self.github.get_rate_limit().core.reset - datetime.now()
).seconds + 5
print(f"About to exceed rate limit :/ sleeping for {naptime} seconds")
time.sleep(naptime)
print("Done sleeping - back to work!")
def get_all_top_repo_information(self):
top_repo_infos = []
if isnotebook():
for repo_url in tqdm(
self.repo_urls, desc="Scraping top GitHub repositories..."
):
top_repo_infos.append(self._get_repo_information(repo_url))
else:
for repo_url in track(
self.repo_urls, description="Scraping top GitHub repositories..."
):
top_repo_infos.append(self._get_repo_information(repo_url))
print(f"Finished getting repo info for {len(self.repo_urls)} repos!")
return top_repo_infos
def _get_repo_information(self, repo_url: str):
self.__choke()
repo_info_url = f"https://api.github.com/repos{repo_url}"
repo_important_info = {}
try:
repo_resp = self.call_api(repo_info_url)
repo_info = repo_resp.json()
info_to_scrape = [
"created_at",
"updated_at",
"pushed_at",
"size",
"stargazers_count",
"watchers_count",
"language",
"has_issues",
"has_projects",
"has_downloads",
"has_wiki",
"has_pages",
"forks_count",
]
repo_important_info["repo"] = repo_url
for info in info_to_scrape:
repo_important_info[info] = repo_info.get(info, None)
repo_important_info[
"contributors"
] = self._get_contributor_repo_of_one_repo(repo_url)
return repo_important_info
except Exception as e:
print(
f"Request for {repo_url} failed due to {e} - status code {repo_resp.status_code}"
)
repo_important_info["repo"] = repo_url
for info in info_to_scrape:
repo_important_info[info] = "Invalid Repo"
repo_important_info["contributors"] = "Invalid Repo"
return repo_important_info
def _get_contributor_repo_of_one_repo(self, repo_url: str):
self.__choke()
contributor_url = f"https://api.github.com/repos{repo_url}/contributors"
contributor_page_resp = self.call_api(contributor_url)
contributor_page = contributor_page_resp.json()
contributors_info = {"login": [], "url": [], "contributions": []}
try:
max_n_top_contributors = self._find_max_n_top_contributors(
num_contributors=len(contributor_page)
)
n_top_contributor = 0
while n_top_contributor < max_n_top_contributors:
contributor = contributor_page[n_top_contributor]
self._get_contributor_general_info(contributors_info, contributor)
n_top_contributor += 1
return contributors_info
except Exception as e:
print(contributor_page)
print(
f"Failed to retrieve top contributors for {repo_url} due to {e} - status code {contributor_page_resp.status_code}"
)
return contributors_info
@staticmethod
def _get_contributor_general_info(contributors_info: List[dict], contributor: dict):
contributors_info["login"].append(contributor["login"])
contributors_info["url"].append(contributor["url"])
contributors_info["contributions"].append(contributor["contributions"])
def _find_max_n_top_contributors(self, num_contributors: int):
if num_contributors > self.max_n_top_contributors:
return self.max_n_top_contributors
else:
return num_contributors
class RepoStatsScraper:
"""Scrape information of repos and the
contributors of those repositories"""
def __init__(self, USERNAME: str, TOKEN: str, since: datetime = None):
self.USERNAME = USERNAME
self.TOKEN = TOKEN
self.github = Github(TOKEN)
self.since = since
# This method is used to limit the rate of requests sent to GitHub
def __choke(self):
remaining = self.github.get_rate_limit().core.remaining
print(f"There are {remaining} remaining requests before ratelimiting")
if remaining < 3:
naptime = (
self.github.get_rate_limit().core.reset - datetime.now()
).seconds + 5
print(f"About to exceed rate limit :/ sleeping for {naptime} seconds")
time.sleep(naptime)
print("Done sleeping - back to work!")
def _get_repo_weekly_stats(self, repo_url: str):
self.__choke()
try:
if repo_url.startswith("/"):
repo_url = repo_url[1:]
repo = self.github.get_repo(repo_url, lazy=False)
starPages = [
(stargazer.user.login, stargazer.starred_at)
for stargazer in repo.get_stargazers_with_dates()
]
# statsContributorsPages = repo.get_stats_contributors()
statsCommitActivityPages = [
week.raw_data for week in repo.get_stats_commit_activity()
]
statsCodeFrequencyPages = [
week.raw_data for week in repo.get_stats_code_frequency()
]
# Stars over time
if len(starPages) > 0:
stargazer_dates_df = pd.DataFrame(starPages).rename(
columns={0: "stargazer", 1: "starred_at"}
)
stars_by_day = stargazer_dates_df.groupby(
pd.Grouper(key="starred_at", freq="1W")
).agg(
{
"stargazer": [list, "size"],
}
) # stargazer_dates_df.groupby(stargazer_dates_df.starred_at.dt.date).agg({'stargazer':[list, 'size'], })
stars_by_day.columns = [i + "_" + y for i, y in stars_by_day.columns]
stars_by_day.reset_index(inplace=True)
stars_by_day["starred_at"] = pd.to_datetime(stars_by_day.starred_at)
else:
stars_by_day = pd.DataFrame(
columns=[
"starred_at",
"stargazer_list",
"stargazer_size",
]
)
### Commit Frequency
if len(statsCommitActivityPages) > 0:
statsCommitActivity_df = pd.DataFrame(statsCommitActivityPages).rename(
columns={"total": "total_commits", "days": "commits_per_day"}
)
statsCommitActivity_df["week"] = pd.to_datetime(
statsCommitActivity_df.week, unit="s"
)
else:
statsCommitActivity_df = pd.DataFrame(
columns=[
"week",
"total_commits",
"commits_per_day",
]
)
### Code frequency
if len(statsCodeFrequencyPages) > 0:
statsCodeFrequencyPages_df = pd.DataFrame(statsCodeFrequencyPages)
statsCodeFrequencyPages_df.rename(
columns={0: "week", 1: "additions", 2: "deletions"}, inplace=True
)
statsCodeFrequencyPages_df["week"] = pd.to_datetime(
statsCodeFrequencyPages_df.week, unit="s"
)
else:
statsCodeFrequencyPages_df = pd.DataFrame(
columns=["week", "additions", "deletions"]
)
# merge data
commits_add_delete_df = pd.merge(
statsCodeFrequencyPages_df,
statsCommitActivity_df,
left_on="week",
right_on="week",
how="outer",
)
commits_add_delete_and_stars_df = pd.merge(
stars_by_day,
commits_add_delete_df,
left_on="starred_at",
right_on="week",
how="outer",
)
commits_add_delete_and_stars_df["repo_path"] = repo_url
return commits_add_delete_and_stars_df
except Exception as e:
print(f"Request for {repo_url} failed due to {e}")
return pd.DataFrame(
columns=[
"starred_at",
"stargazer_list",
"stargazer_size",
"week",
"additions",
"deletions",
"total_commits",
"commits_per_day",
"repo_path",
]
)
def _get_repo_weekly_stats_from_date(self, repo_url: str, from_datetime):
self.__choke()
# from_datetime = datetime.strptime('2022-01-30', '%Y-%m-%d')
# from_datetime = datetime.strptime(since, '%Y-%m-%d')
try:
if repo_url.startswith("/"):
repo_url = repo_url[1:]
repo = self.github.get_repo(repo_url, lazy=False)
# Get reversed order
starPages = []
statsCommitActivityPages = []
statsCodeFrequencyPages = []
stargazersPaginated = repo.get_stargazers_with_dates().reversed
# commitActivityPaginated = reversed(repo.get_stats_commit_activity()[-1].weeks)
# codeFrequencyPaginated = reversed(repo.get_stats_code_frequency()[-1].weeks)
commitActivityPaginated = reversed(repo.get_stats_commit_activity())
codeFrequencyPaginated = reversed(repo.get_stats_code_frequency())
# Only get new info
for stargazer in stargazersPaginated:
if stargazer.starred_at > from_datetime:
starPages.append((stargazer.user.login, stargazer.starred_at))
else:
break
for week in commitActivityPaginated:
if week.week > from_datetime:
statsCommitActivityPages.append(week.raw_data)
else:
break
for week in codeFrequencyPaginated:
if week.week > from_datetime:
statsCodeFrequencyPages.append(week.raw_data)
else:
break
# Stars over time
if len(starPages) > 0:
stargazer_dates_df = pd.DataFrame(starPages).rename(
columns={0: "stargazer", 1: "starred_at"}
)
stars_by_day = stargazer_dates_df.groupby(
pd.Grouper(key="starred_at", freq="1W")
).agg(
{
"stargazer": [list, "size"],
}
) # stargazer_dates_df.groupby(stargazer_dates_df.starred_at.dt.date).agg({'stargazer':[list, 'size'], })
stars_by_day.columns = [i + "_" + y for i, y in stars_by_day.columns]
stars_by_day.reset_index(inplace=True)
stars_by_day["starred_at"] = pd.to_datetime(stars_by_day.starred_at)
else:
stars_by_day = pd.DataFrame(
columns=[
"starred_at",
"stargazer_list",
"stargazer_size",
]
)
### Commit Frequency
if len(statsCommitActivityPages) > 0:
statsCommitActivity_df = pd.DataFrame(statsCommitActivityPages).rename(
columns={"total": "total_commits", "days": "commits_per_day"}
)
statsCommitActivity_df["week"] = pd.to_datetime(
statsCommitActivity_df.week, unit="s"
)
else:
statsCommitActivity_df = pd.DataFrame(
columns=[
"week",
"total_commits",
"commits_per_day",
]
)
### Code frequency
if len(statsCodeFrequencyPages) > 0:
statsCodeFrequencyPages_df = pd.DataFrame(statsCodeFrequencyPages)
statsCodeFrequencyPages_df.rename(
columns={0: "week", 1: "additions", 2: "deletions"}, inplace=True
)
statsCodeFrequencyPages_df["week"] = pd.to_datetime(
statsCodeFrequencyPages_df.week, unit="s"
)
else:
statsCodeFrequencyPages_df = pd.DataFrame(
columns=["week", "additions", "deletions"]
)
# merge data
commits_add_delete_df = pd.merge(
statsCodeFrequencyPages_df,
statsCommitActivity_df,
left_on="week",
right_on="week",
how="outer",
)
commits_add_delete_and_stars_df = pd.merge(
stars_by_day,
commits_add_delete_df,
left_on="starred_at",
right_on="week",
how="outer",
)
commits_add_delete_and_stars_df["repo_path"] = repo_url
return commits_add_delete_and_stars_df
except Exception as e:
print(f"Request for {repo_url} failed due to {e}")
return pd.DataFrame(
columns=[
"starred_at",
"stargazer_list",
"stargazer_size",
"week",
"additions",
"deletions",
"total_commits",
"commits_per_day",
"repo_path",
]
)
from dataclasses import dataclass
from bs4 import BeautifulSoup
import requests
from rich.progress import track
from rich import print
import pandas as pd
import os
import warnings
from dotenv import load_dotenv
from typing import List
from IPython import get_ipython
from tqdm import tqdm
import logging
load_dotenv()
warnings.filterwarnings("ignore")
TYPES = ["Users", "Repositories", "Code", "Commits", "Issues", "Packages", "Topics"]
SORT_BY = {"Users": ["followers"], "Repositories": ["", "stars"]}
SCRAPE_CLASS = {"Users": "mr-1", "Repositories": "v-align-middle"}
USERNAME = os.getenv("GITHUB_USERNAME_LIVE")
TOKEN = os.getenv("GITHUB_TOKEN_LIVE")
class ScrapeGithubUrl:
"""Scrape top Github urls based on a certain keyword and type
Parameters
-------
keyword: str
keyword to search on Github
type: str
whether to search for User or Repositories
sort_by: str
sort by best match or most stars, by default 'best_match', which will sort by best match.
Use 'stars' to sort by most stars.
start_page_num: int
page number to start scraping. The default is 0
stop_page_num: int
page number to stop scraping
Returns
-------
List[str]
"""
def __init__(
self,
keyword: str,
type: str,
sort_by: str,
start_page_num: int,
stop_page_num: int,
):
self.keyword = keyword
self.type = type
self.start_page_num = start_page_num
self.stop_page_num = stop_page_num
if sort_by == "best_match":
self.sort_by = ""
else:
self.sort_by = sort_by
@staticmethod
def _keyword_to_url(page_num: int, keyword: str, type: str, sort_by: str):
"""Change keyword to a url"""
keyword_no_space = ("+").join(keyword.split(" "))
return f"https://github.com/search?o=desc&p={str(page_num)}&q={keyword_no_space}&s={sort_by}&type={type}"
def _scrape_top_repo_url_one_page(self, page_num: int):
"""Scrape urls of top Github repositories in 1 page"""
url = self._keyword_to_url(
page_num, self.keyword, type=self.type, sort_by=self.sort_by
)
page = requests.get(url)
soup = BeautifulSoup(page.text, "html.parser")
a_tags = soup.find_all("a", class_=SCRAPE_CLASS[self.type])
urls = [a_tag.get("href") for a_tag in a_tags]
return urls
def scrape_top_repo_url_multiple_pages(self):
"""Scrape urls of top Github repositories in multiple pages"""
urls = []
if isnotebook():
for page_num in tqdm(
range(self.start_page_num, self.stop_page_num),
desc="Scraping top GitHub URLs...",
):
urls.extend(self._scrape_top_repo_url_one_page(page_num))
else:
for page_num in track(
range(self.start_page_num, self.stop_page_num),
description="Scraping top GitHub URLs...",
):
urls.extend(self._scrape_top_repo_url_one_page(page_num))
return urls
class UserProfileGetter:
"""Get the information from users' homepage"""
def __init__(self, urls: List[str]) -> pd.DataFrame:
self.urls = urls
self.profile_features = [
"login",
"url",
"type",
"name",
"company",
"location",
"email",
"hireable",
"bio",
"public_repos",
"public_gists",
"followers",
"following",
]
def _get_one_user_profile(self, profile_url: str):
profile = requests.get(profile_url, auth=(USERNAME, TOKEN)).json()
return {
key: val for key, val in profile.items() if key in self.profile_features
}
def get_all_user_profiles(self):
if isnotebook():
all_contributors = [
self._get_one_user_profile(url)
for url in tqdm(self.urls, desc="Scraping top GitHub profiles...")
]
else:
all_contributors = [
self._get_one_user_profile(url)
for url in track(
self.urls, description="Scraping top GitHub profiles..."
)
]
all_contributors_df = pd.DataFrame(all_contributors).reset_index(drop=True)
return all_contributors_df
|
from __future__ import absolute_import
from .base import (
BaseValidator,
)
from .common import (
validate_uint256,
)
from .inbound import (
validate_account as validate_inbound_account,
validate_block_hash as validate_inbound_block_hash,
validate_block_number as validate_inbound_block_number,
validate_filter_id as validate_inbound_filter_id,
validate_filter_params as validate_inbound_filter_params,
validate_private_key as validate_inbound_private_key,
validate_raw_transaction as validate_inbound_raw_transaction,
validate_timestamp as validate_inbound_timestamp,
validate_transaction as validate_inbound_transaction,
validate_transaction_hash as validate_inbound_transaction_hash,
)
from .outbound import (
validate_32_byte_string,
validate_accounts as validate_outbound_accounts,
validate_block as validate_outbound_block,
validate_block_hash as validate_outbound_block_hash,
validate_bytes as validate_outbound_bytes,
validate_log_entry as validate_outbound_log_entry,
validate_receipt as validate_outbound_receipt,
validate_transaction as validate_outbound_transaction,
)
class DefaultValidator(BaseValidator):
#
# Inbound
#
validate_inbound_account = staticmethod(validate_inbound_account)
validate_inbound_block_hash = staticmethod(validate_inbound_block_hash)
validate_inbound_block_number = staticmethod(validate_inbound_block_number)
validate_inbound_filter_id = staticmethod(validate_inbound_filter_id)
validate_inbound_filter_params = staticmethod(validate_inbound_filter_params)
validate_inbound_private_key = staticmethod(validate_inbound_private_key)
validate_inbound_raw_transaction = staticmethod(validate_inbound_raw_transaction)
validate_inbound_timestamp = staticmethod(validate_inbound_timestamp)
validate_inbound_transaction = staticmethod(validate_inbound_transaction)
validate_inbound_transaction_hash = staticmethod(validate_inbound_transaction_hash)
#
# Outbound
#
validate_outbound_accounts = staticmethod(validate_outbound_accounts)
validate_outbound_balance = staticmethod(validate_uint256)
validate_outbound_block = staticmethod(validate_outbound_block)
validate_outbound_block_hash = staticmethod(validate_outbound_block_hash)
validate_outbound_code = staticmethod(validate_outbound_bytes)
validate_outbound_gas_estimate = staticmethod(validate_uint256)
validate_outbound_nonce = staticmethod(validate_uint256)
validate_outbound_log_entry = staticmethod(validate_outbound_log_entry)
validate_outbound_receipt = staticmethod(validate_outbound_receipt)
validate_outbound_return_data = staticmethod(validate_outbound_bytes)
validate_outbound_transaction = staticmethod(validate_outbound_transaction)
validate_outbound_transaction_hash = staticmethod(validate_32_byte_string)
|
import pygame
def main():
pygame.init()
size = [700, 500]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Background images")
done = False
clock = pygame.time.Clock()
background_image = pygame.image.load("saturn_family1.jpg").convert()
player_image = pygame.image.load("player.png").convert()
click_sound = pygame.mixer.Sound("laser5.ogg")
player_image.set_colorkey([0, 0, 0])
while not done:
# Main Loop
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
print(str(event.key))
if event.type == pygame.K_RETURN:
print("user pressed Return")
if event.type == pygame.MOUSEBUTTONDOWN:
click_sound.play()
print("bang")
screen.blit(background_image, [0, 0])
player_pos = pygame.mouse.get_pos()
x = player_pos[0]
y = player_pos[1]
screen.blit(player_image, [x, y])
pygame.display.flip()
clock.tick(60)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 16:14:42 2013
@author: proto
"""
#!/usr/bin/env python
from collections import OrderedDict
import time
import libsbml
import writer.bnglWriter as writer
from optparse import OptionParser
import atomizer.moleculeCreation as mc
import sys
from os import listdir
import re
import pickle
from copy import copy
log = {'species': [], 'reactions': []}
import signal
from collections import Counter,namedtuple
import utils.structures as structures
import atomizer.analyzeRDF
from utils.util import logMess, setupLog, setupStreamLog, finishStreamLog, TranslationException
from utils import consoleCommands
from sbml2bngl import SBML2BNGL
#from biogrid import loadBioGridDict as loadBioGrid
import logging
from rulifier import postAnalysis
import pprint
import fnmatch
from collections import defaultdict
# returntype for the sbml analyzer translator and helper functions
AnalysisResults = namedtuple('AnalysisResults', ['rlength', 'slength', 'reval', 'reval2', 'clength', 'rdf', 'finalString', 'speciesDict', 'database', 'annotation'])
def loadBioGrid():
pass
def handler(signum, frame):
print "Forever is over!"
raise Exception("end of time")
def getFiles(directory,extension):
"""
Gets a list of bngl files that could be correctly translated in a given 'directory'
Keyword arguments:
directory -- The directory we will recurseviley get files from
extension -- A file extension filter
"""
matches = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, '*.{0}'.format(extension)):
filepath = os.path.abspath(os.path.join(root, filename))
matches.append([filepath,os.path.getsize(os.path.join(root, filename))])
#sort by size
#matches.sort(key=lambda filename: filename[1], reverse=False)
matches = [x[0] for x in matches]
return matches
import os.path
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def evaluation(numMolecules, translator):
originalElements = (numMolecules)
nonStructuredElements = len([1 for x in translator if '()' in str(translator[x])])
if originalElements > 0:
ruleElements = (len(translator) - nonStructuredElements)*1.0/originalElements
if ruleElements> 1:
ruleElements = (len(translator) - nonStructuredElements)*1.0/len(translator.keys())
else:
ruleElements= 0
return ruleElements
#print rules
#14,18,56,19,49.87.88.107,109,111,120,139,140,145,151,153,171,175,182,202,205
#230,253,255,256,268,269,288,313,332,333,334,335,336,362,396,397,399,406
def selectReactionDefinitions(bioNumber):
'''
This method rrough the stats-biomodels database looking for the
best reactionDefinitions definition available
'''
#with open('stats4.npy') as f:
# db = pickle.load(f)
fileName = resource_path('config/reactionDefinitions.json')
useID = True
naming = resource_path('config/namingConventions.json')
'''
for element in db:
if element[0] == bioNumber and element[1] != '0':
fileName = 'reactionDefinitions/reactionDefinition' + element[1] + '.json'
useID = element[5]
elif element[0] > bioNumber:
break
'''
return fileName,useID,naming
def resolveDependencies(dictionary, key, idx):
counter = 0
for element in dictionary[key]:
if idx < 20:
counter += resolveDependencies(dictionary, element, idx + 1)
else:
counter += 1
return len(dictionary[key]) + counter
def validateReactionUsage(reactant, reactions):
for element in reactions:
if reactant in element:
return element
return None
def readFromString(inputString,reactionDefinitions,useID,speciesEquivalence=None,atomize=False, loggingStream=None):
'''
one of the library's main entry methods. Process data from a string
'''
console = None
if loggingStream:
console = logging.StreamHandler(loggingStream)
console.setLevel(logging.DEBUG)
setupStreamLog(console)
reader = libsbml.SBMLReader()
document = reader.readSBMLFromString(inputString)
parser =SBML2BNGL(document.getModel(),useID)
bioGrid = False
pathwaycommons = True
if bioGrid:
loadBioGrid()
database = structures.Databases()
database.assumptions = defaultdict(set)
database.document = document
database.forceModificationFlag = True
database.reactionDefinitions = reactionDefinitions
database.useID = useID
database.atomize = atomize
database.speciesEquivalence = speciesEquivalence
database.pathwaycommons = True
database.isConversion = True
#if pathwaycommons:
# database.pathwaycommons = True
namingConventions = resource_path('config/namingConventions.json')
if atomize:
translator,onlySynDec = mc.transformMolecules(parser,database,reactionDefinitions,namingConventions,speciesEquivalence,bioGrid)
database.species = translator.keys()
else:
translator={}
#logging.getLogger().flush()
if loggingStream:
finishStreamLog(console)
returnArray = analyzeHelper(document, reactionDefinitions,
useID,'', speciesEquivalence, atomize, translator, database)
if atomize and onlySynDec:
returnArray = list(returnArray)
returnArray = AnalysisResults(*(list(returnArray[0:-2]) + [database] + [returnArray[-1]]))
return returnArray
def processFunctions(functions,sbmlfunctions,artificialObservables,tfunc):
'''
this method goes through the list of functions and removes all
sbml elements that are extraneous to bngl
'''
# reformat time function
for idx in range(0, len(functions)):
'''
remove calls to functions inside functions
'''
modificationFlag = True
recursionIndex = 0
# remove calls to other sbml functions
while modificationFlag and recursionIndex <20:
modificationFlag = False
for sbml in sbmlfunctions:
if sbml in functions[idx]:
temp = writer.extendFunction(functions[idx], sbml, sbmlfunctions[sbml])
if temp != functions[idx]:
functions[idx] = temp
modificationFlag = True
recursionIndex +=1
break
functions[idx] = re.sub(r'(\W|^)(time)(\W|$)', r'\1time()\3', functions[idx])
functions[idx] = re.sub(r'(\W|^)(Time)(\W|$)', r'\1time()\3', functions[idx])
functions[idx] = re.sub(r'(\W|^)(t)(\W|$)', r'\1time()\3', functions[idx])
#remove true and false
functions[idx] = re.sub(r'(\W|^)(true)(\W|$)', r'\1 1\3', functions[idx])
functions[idx] = re.sub(r'(\W|^)(false)(\W|$)', r'\1 0\3', functions[idx])
#functions.extend(sbmlfunctions)
dependencies2 = {}
for idx in range(0, len(functions)):
dependencies2[functions[idx].split(' = ')[0].split('(')[0].strip()] = []
for key in artificialObservables:
oldfunc = functions[idx]
functions[idx] = (re.sub(r'(\W|^)({0})([^\w(]|$)'.format(key), r'\1\2()\3',functions[idx]))
if oldfunc != functions[idx]:
dependencies2[functions[idx].split(' = ')[0].split('(')[0]].append(key)
for element in sbmlfunctions:
oldfunc = functions[idx]
key = element.split(' = ')[0].split('(')[0]
if re.search('(\W|^){0}(\W|$)'.format(key),functions[idx].split(' = ')[1]) != None:
dependencies2[functions[idx].split(' = ')[0].split('(')[0]].append(key)
for element in tfunc:
key = element.split(' = ')[0].split('(')[0]
if key in functions[idx].split(' = ')[1]:
dependencies2[functions[idx].split( ' = ')[0].split('(')[0]].append(key)
'''
for counter in range(0,3):
for element in dependencies2:
if len(dependencies2[element]) > counter:
dependencies2[element].extend(dependencies2[dependencies2[element][counter]])
'''
fd = []
for function in functions:
# print function,'---',dependencies2[function.split(' = ' )[0].split('(')[0]],'---',function.split(' = ' )[0].split('(')[0],0
fd.append([function,resolveDependencies(dependencies2,function.split(' = ' )[0].split('(')[0],0)])
fd = sorted(fd,key= lambda rule:rule[1])
functions = [x[0] for x in fd]
return functions
def extractAtoms(species):
'''
given a list of structures, returns a list
of individual molecules/compartment pairs
appends a number for
'''
listOfAtoms = set()
for molecule in species.molecules:
for component in molecule.components:
listOfAtoms.add(tuple([molecule.name,component.name]))
return listOfAtoms
def bondPartners(species,bondNumber):
relevantComponents = []
for molecule in species.molecules:
for component in molecule.components:
if bondNumber in component.bonds:
relevantComponents.append(tuple([molecule.name,component.name]))
return relevantComponents
def getMoleculeByName(species,atom):
'''
returns the state of molecule-component contained in atom
'''
stateVectorVector = []
for molecule in species.molecules:
if molecule.name == atom[0]:
stateVector = []
for component in molecule.components:
if component.name == atom[1]:
#get whatever species this atom is bound to
if len(component.bonds) > 0:
comp = bondPartners(species,component.bonds[0])
comp.remove(atom)
if len(comp) > 0:
stateVector.append(comp[0])
else:
stateVector.append('')
else:
stateVector.append('')
if len(component.states) > 0:
stateVector.append(component.activeState)
else:
stateVector.append('')
stateVectorVector.append(stateVector)
return tuple(stateVectorVector[0])
def extractCompartmentCoIncidence(species):
atomPairDictionary = {}
if [x.name for x in species.molecules] == ['EGF','EGF','EGFR','EGFR']:
pass
for molecule in species.molecules:
for component in molecule.components:
for component2 in molecule.components:
if component == component2:
continue
atom = tuple([molecule.name,component.name])
atom2 = tuple([molecule.name,component2.name])
molId1 = getMoleculeByName(species,atom)
molId2 = getMoleculeByName(species,atom2)
key = tuple([atom,atom2])
#print key,(molId1,molId2)
if key not in atomPairDictionary:
atomPairDictionary[key] = Counter()
atomPairDictionary[key].update([tuple([molId1,molId2])])
return atomPairDictionary
def extractCompartmentStatistics(bioNumber,useID,reactionDefinitions,speciesEquivalence):
'''
Iterate over the translated species and check which compartments
are used together, and how.
'''
reader = libsbml.SBMLReader()
document = reader.readSBMLFromFile(bioNumber)
parser =SBML2BNGL(document.getModel(),useID)
database = structures.Databases()
database.pathwaycommons = False
#call the atomizer (or not)
#if atomize:
translator,onlySynDec = mc.transformMolecules(parser,database,reactionDefinitions,speciesEquivalence)
#else:
# translator={}
compartmentPairs = {}
for element in translator:
temp = extractCompartmentCoIncidence(translator[element])
for element in temp:
if element not in compartmentPairs:
compartmentPairs[element] = temp[element]
else:
compartmentPairs[element].update(temp[element])
finalCompartmentPairs = {}
print '-----'
for element in compartmentPairs:
if element[0][0] not in finalCompartmentPairs:
finalCompartmentPairs[element[0][0]] = {}
finalCompartmentPairs[element[0][0]][tuple([element[0][1],element[1][1]])] = compartmentPairs[element]
return finalCompartmentPairs
def recursiveSearch(dictionary,element,visitedFunctions=[]):
tmp = 0
for item in dictionary[element]:
if dictionary[item] == []:
tmp +=1
else:
if item in visitedFunctions:
raise Exception("Recursive function search landed twice in the same function")
tmp += 1
tmp += (recursiveSearch(dictionary,item,[item] + visitedFunctions))
return tmp
def reorderFunctions(functions):
"""
Analyze a list of sbml functions and make sure there are no forward dependencies.
Reorder if necessary
"""
functionNames = []
tmp = []
for function in functions:
m = re.split('(?<=\()[\w)]', function)
functionName = m[0]
if '=' in functionName:
functionName = functionName.split('=')[0].strip() + '('
functionNames.append(functionName)
functionNamesDict = {x: [] for x in functionNames}
for idx, function in enumerate(functions):
tmp = [x for x in functionNames if x in function.split('=')[1] and x!= functionNames[idx]]
functionNamesDict[functionNames[idx]].extend(tmp)
newFunctionNamesDict = {}
for name in functionNamesDict:
try:
newFunctionNamesDict[name] = recursiveSearch(functionNamesDict,name,[])
#there is a circular dependency
except:
newFunctionNamesDict[name] = 99999
functionWeightsDict = {x: newFunctionNamesDict[x] for x in newFunctionNamesDict}
functionWeights = []
for name in functionNames:
functionWeights.append(functionWeightsDict[name])
tmp = zip(functions, functionWeights)
idx = sorted(tmp, key=lambda x: x[1])
return [x[0] for x in idx]
def postAnalysisHelper(outputFile, bngLocation, database):
consoleCommands.setBngExecutable(bngLocation)
outputDir = os.sep.join(outputFile.split(os.sep)[:-1])
if outputDir != '':
retval = os.getcwd()
os.chdir(outputDir)
consoleCommands.bngl2xml(outputFile.split(os.sep)[-1])
if outputDir != '':
os.chdir(retval)
bngxmlFile = '.'.join(outputFile.split('.')[:-1]) + '.xml'
#print('Sending BNG-XML file to context analysis engine')
contextAnalysis = postAnalysis.ModelLearning(bngxmlFile)
# analysis of redundant bonds
deleteBonds = contextAnalysis.analyzeRedundantBonds(database.assumptions['redundantBonds'])
for molecule in database.assumptions['redundantBondsMolecules']:
if molecule[0] in deleteBonds:
for bond in deleteBonds[molecule[0]]:
database.translator[molecule[1]].deleteBond(bond)
logMess('INFO:CTX002', 'Used context information to determine that the bond {0} in species {1} is not likely'.format(bond,molecule[1]))
def postAnalyzeFile(outputFile, bngLocation, database):
"""
Performs a postcreation file analysis based on context information
"""
#print('Transforming generated BNG file to BNG-XML representation for analysis')
postAnalysisHelper(outputFile, bngLocation, database)
# recreate file using information from the post analysis
returnArray = analyzeHelper(database.document, database.reactionDefinitions, database.useID,
outputFile, database.speciesEquivalence, database.atomize, database.translator, database)
with open(outputFile, 'w') as f:
f.write(returnArray.finalString)
# recompute bng-xml file
consoleCommands.bngl2xml(outputFile)
bngxmlFile = '.'.join(outputFile.split('.')[:-1]) + '.xml'
# recompute context information
contextAnalysis = postAnalysis.ModelLearning(bngxmlFile)
# get those species patterns that follow uncommon motifs
motifSpecies, motifDefinitions = contextAnalysis.processContextMotifInformation(database.assumptions['lexicalVsstoch'], database)
#motifSpecies, motifDefinitions = contextAnalysis.processAllContextInformation()
if len(motifDefinitions) > 0:
logMess('INFO:CTX003', 'Species with suspect context information were found. Information is being dumped to {0}_context.log'.format(outputFile))
with open('{0}_context.log'.format(outputFile), 'w') as f:
pprint.pprint(dict(motifSpecies), stream=f)
pprint.pprint(motifDefinitions, stream=f)
# score hypothetical bonds
#contextAnalysis.scoreHypotheticalBonds(assumptions['unknownBond'])
def postAnalyzeString(outputFile,bngLocation, database):
postAnalysisHelper(outputFile, bngLocation, database)
# recreate file using information from the post analysis
returnArray = analyzeHelper(database.document, database.reactionDefinitions, database.useID,
outputFile, database.speciesEquivalence, database.atomize, database.translator, database).finalString
return returnArray
def analyzeFile(bioNumber, reactionDefinitions, useID, namingConventions, outputFile,
speciesEquivalence=None, atomize=False, bioGrid=False, pathwaycommons=False, ignore=False, noConversion=False):
'''
one of the library's main entry methods. Process data from a file
'''
'''
import cProfile, pstats, StringIO
pr = cProfile.Profile()
pr.enable()
'''
setupLog(outputFile + '.log', logging.DEBUG)
logMess.log = []
logMess.counter = -1
reader = libsbml.SBMLReader()
document = reader.readSBMLFromFile(bioNumber)
if document.getModel() == None:
print 'File {0} could not be recognized as a valid SBML file'.format(bioNumber)
return
parser =SBML2BNGL(document.getModel(),useID)
parser.setConversion(not noConversion)
database = structures.Databases()
database.assumptions = defaultdict(set)
database.forceModificationFlag = True
database.pathwaycommons = pathwaycommons
database.ignore = ignore
database.assumptions = defaultdict(set)
bioGridDict = {}
if bioGrid:
bioGridDict = loadBioGrid()
translator = {}
# call the atomizer (or not). structured molecules are contained in translator
# onlysyndec is a boolean saying if a model is just synthesis of decay reactions
try:
if atomize:
translator, onlySynDec = mc.transformMolecules(parser, database, reactionDefinitions,
namingConventions, speciesEquivalence, bioGrid)
except TranslationException as e:
print "Found an error in {0}. Check log for more details. Use -I to ignore translation errors".format(e.value)
if len(logMess.log) > 0:
with open(outputFile + '.log', 'w') as f:
for element in logMess.log:
f.write(element + '\n')
return
# process other sections of the sbml file (functions reactions etc.)
'''
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats(10)
print s.getvalue()
'''
database.document = document
database.reactionDefinitions = reactionDefinitions
database.useID = useID
database.speciesEquivalence = speciesEquivalence
database.atomize = atomize
database.isConversion = not noConversion
returnArray = analyzeHelper(document, reactionDefinitions, useID, outputFile, speciesEquivalence, atomize, translator, database)
with open(outputFile, 'w') as f:
f.write(returnArray.finalString)
#with open('{0}.dict'.format(outputFile),'wb') as f:
# pickle.dump(returnArray[-1],f)
if atomize and onlySynDec:
returnArray = list(returnArray)
#returnArray.translator = -1
returnArray = AnalysisResults(*(list(returnArray[0:-2]) + [database] + [returnArray[-1]]))
return returnArray
def correctRulesWithParenthesis(rules, parameters):
'''
helper function. Goes through a list of rules and adds a parenthesis
to the reaction rates of those functions whose rate is in list
'parameters'.
'''
for idx in range(len(rules)):
tmp = [x for x in parameters if x + ' ' in rules[idx]]
#for tmpparameter in tmp:
# re.sub(r'(\W|^){0}(\W|$)'.format(tmpparameter), r'\1{0}\2'.format(dictionary[key]), tmp[1])
if len(tmp) > 0:
rules[idx].strip()
rules[idx] += '()'
def changeNames(functions, dictionary):
'''
changes instances of keys in dictionary appeareing in functions to their corresponding
alternatives
'''
tmpArray = []
for function in functions:
tmp = function.split(' = ')
# hack to avoid problems with less than equal or more than equal
# in equations
tmp = [tmp[0], ''.join(tmp[1:])]
for key in [x for x in dictionary if x in tmp[1]]:
while re.search(r'([\W,]|^){0}([\W,]|$)'.format(key), tmp[1]):
tmp[1] = re.sub(r'([\W,]|^){0}([\W,]|$)'.format(key), r'\1{0}\2'.format(dictionary[key]), tmp[1])
tmpArray.append('{0} = {1}'.format(tmp[0], tmp[1]))
return tmpArray
def changeRates(reactions, dictionary):
"""
changes instances of keys in dictionary appeareing in reaction rules to their corresponding
alternatives
"""
tmpArray = []
tmp = None
for reaction in reactions:
tmp = reaction.strip().split(' ')
for key in [x for x in dictionary if x in tmp[-1]]:
tmp[-1] = re.sub(r'(\W|^){0}(\W|$)'.format(key), r'\1{0}\2'.format(dictionary[key]), tmp[-1])
tmpArray.append(' '.join(tmp))
if tmp:
tmpArray.append(' '.join(tmp))
return tmpArray
def unrollFunctions(functions):
flag = True
# bngl doesnt accept nested function calling
while(flag):
dictionary = OrderedDict()
flag = False
for function in functions:
tmp = function.split(' = ')
for key in dictionary:
if key in tmp[1]:
tmp[1] = re.sub(r'(\W|^){0}\(\)(\W|$)'.format(key), r'\1({0})\2'.format(dictionary[key]), tmp[1])
flag = False
dictionary[tmp[0].split('()')[0]] = tmp[1]
tmp = []
for key in dictionary:
tmp.append('{0}() = {1}'.format(key, dictionary[key]))
functions = tmp
return functions
def analyzeHelper(document, reactionDefinitions, useID, outputFile, speciesEquivalence, atomize, translator, database, bioGrid=False):
'''
taking the atomized dictionary and a series of data structure, this method
does the actual string output.
'''
useArtificialRules = False
parser = SBML2BNGL(document.getModel(), useID)
parser.setConversion(database.isConversion)
#database = structures.Databases()
#database.assumptions = defaultdict(set)
#translator,log,rdf = m2c.transformMolecules(parser,database,reactionDefinitions,speciesEquivalence)
#try:
#bioGridDict = {}
#if biogrid:
# bioGridDict = biogrid()
#if atomize:
# translator = mc.transformMolecules(parser,database,reactionDefinitions,speciesEquivalence,bioGridDict)
#else:
# translator={}
#except:
# print 'failure'
# return None,None,None,None
#translator = {}
param,zparam = parser.getParameters()
rawSpecies = {}
for species in parser.model.getListOfSpecies():
rawtemp = parser.getRawSpecies(species,[x.split(' ')[0] for x in param])
rawSpecies[rawtemp['identifier']] = rawtemp
parser.reset()
molecules, initialConditions, observables, speciesDict,\
observablesDict, annotationInfo = parser.getSpecies(translator, [x.split(' ')[0] for x in param])
# finally, adjust parameters and initial concentrations according to whatever initialassignments say
param, zparam, initialConditions = parser.getInitialAssignments(translator, param, zparam, molecules, initialConditions)
# FIXME: this method is a mess, improve handling of assignmentrules since we can actually handle those
aParameters, aRules, nonzparam, artificialRules, removeParams, artificialObservables = parser.getAssignmentRules(zparam, param, rawSpecies,
observablesDict, translator)
compartments = parser.getCompartments()
functions = []
assigmentRuleDefinedParameters = []
reactionParameters, rules, rateFunctions = parser.getReactions(translator, len(compartments) > 1,
atomize=atomize, parameterFunctions=artificialObservables, database=database)
functions.extend(rateFunctions)
for element in nonzparam:
param.append('{0} 0'.format(element))
param = [x for x in param if x not in removeParams]
tags = '@{0}'.format(compartments[0].split(' ')[0]) if len(compartments) == 1 else '@cell'
molecules.extend([x.split(' ')[0] for x in removeParams])
if len(molecules) == 0:
compartments = []
observables.extend('Species {0} {0}'.format(x.split(' ')[0]) for x in removeParams)
for x in removeParams:
initialConditions.append(x.split(' ')[0] + tags + ' ' + ' '.join(x.split(' ')[1:]))
## Comment out those parameters that are defined with assignment rules
## TODO: I think this is correct, but it may need to be checked
tmpParams = []
for idx, parameter in enumerate(param):
for key in artificialObservables:
if re.search('^{0}\s'.format(key),parameter)!= None:
assigmentRuleDefinedParameters.append(idx)
tmpParams.extend(artificialObservables)
tmpParams.extend(removeParams)
tmpParams = set(tmpParams)
correctRulesWithParenthesis(rules,tmpParams)
for element in assigmentRuleDefinedParameters:
param[element] = '#' + param[element]
deleteMolecules = []
deleteMoleculesFlag = True
for key in artificialObservables:
flag = -1
for idx,observable in enumerate(observables):
if 'Species {0} {0}()'.format(key) in observable:
flag = idx
if flag != -1:
observables.pop(flag)
functions.append(artificialObservables[key])
flag = -1
if '{0}()'.format(key) in molecules:
flag = molecules.index('{0}()'.format(key))
if flag != -1:
if deleteMoleculesFlag:
deleteMolecules.append(flag)
else:
deleteMolecules.append(key)
#result =validateReactionUsage(molecules[flag],rules)
#if result != None:
# logMess('ERROR','Pseudo observable {0} in reaction {1}'.format(molecules[flag],result))
#molecules.pop(flag)
flag = -1
for idx,specie in enumerate(initialConditions):
if ':{0}('.format(key) in specie:
flag = idx
if flag != -1:
initialConditions[flag] = '#' + initialConditions[flag]
for flag in sorted(deleteMolecules,reverse=True):
if deleteMoleculesFlag:
logMess('WARNING:SIM101','{0} reported as function, but usage is ambiguous'.format(molecules[flag]) )
result = validateReactionUsage(molecules[flag], rules)
if result is not None:
logMess('ERROR:Simulation','Pseudo observable {0} in reaction {1}'.format(molecules[flag],result))
#since we are considering it an observable delete it from the molecule and
#initial conditions list
#s = molecules.pop(flag)
#initialConditions = [x for x in initialConditions if '$' + s not in x]
else:
logMess('WARNING:SIM101','{0} reported as species, but usage is ambiguous.'.format(flag) )
artificialObservables.pop(flag)
sbmlfunctions = parser.getSBMLFunctions()
functions.extend(aRules)
#print functions
processFunctions(functions,sbmlfunctions,artificialObservables,rateFunctions)
for interation in range(0,3):
for sbml2 in sbmlfunctions:
for sbml in sbmlfunctions:
if sbml == sbml2:
continue
if sbml in sbmlfunctions[sbml2]:
sbmlfunctions[sbml2] = writer.extendFunction(sbmlfunctions[sbml2],sbml,sbmlfunctions[sbml])
functions = reorderFunctions(functions)
functions = changeNames(functions, aParameters)
# change reference for observables with compartment name
functions = changeNames(functions, observablesDict)
# print [x for x in functions if 'functionRate60' in x]
functions = unrollFunctions(functions)
rules = changeRates(rules, aParameters)
if len(compartments) > 1 and 'cell 3 1.0' not in compartments:
compartments.append('cell 3 1.0')
#sbml always has the 'cell' default compartment, even when it
#doesn't declare it
elif len(compartments) == 0 and len(molecules) != 0:
compartments.append('cell 3 1.0')
if len(artificialRules) + len(rules) == 0:
logMess('ERROR:SIM203','The file contains no reactions')
if useArtificialRules or len(rules) == 0:
rules =['#{0}'.format(x) for x in rules]
evaluate = evaluation(len(observables),translator)
artificialRules.extend(rules)
rules = artificialRules
else:
artificialRules =['#{0}'.format(x) for x in artificialRules]
evaluate = evaluation(len(observables),translator)
rules.extend(artificialRules)
commentDictionary = {}
if atomize:
commentDictionary['notes'] = "'This is an atomized translation of an SBML model created on {0}.".format(time.strftime("%d/%m/%Y"))
else:
commentDictionary['notes'] = "'This is a plain translation of an SBML model created on {0}.".format(time.strftime("%d/%m/%Y"))
commentDictionary['notes'] += " The original model has {0} molecules and {1} reactions. The translated model has {2} molecules and {3} rules'".format(parser.model.getNumSpecies(),parser.model.getNumReactions(),len(molecules),len(set(rules)))
meta = parser.getMetaInformation(commentDictionary)
finalString = writer.finalText(meta, param + reactionParameters, molecules, initialConditions,
list(OrderedDict.fromkeys(observables)), list(OrderedDict.fromkeys(rules)), functions, compartments,
annotationInfo, outputFile)
logMess('INFO:SUM001','File contains {0} molecules out of {1} original SBML species'.format(len(molecules), len(observables)))
# rate of each classified rule
evaluate2 = 0 if len(observables) == 0 else len(molecules)*1.0/len(observables)
# add unit information to annotations
annotationInfo['units'] = parser.getUnitDefinitions()
return AnalysisResults(len(rules), len(observables), evaluate, evaluate2, len(compartments),
parser.getSpeciesAnnotation(), finalString, speciesDict, None, annotationInfo)
'''
if translator != {}:
for element in database.classifications:
if element not in classificationDict:
classificationDict[element] = 0.0
classificationDict[element] += 1.0/len(database.classifications)
return len(rules), evaluate,parser.getModelAnnotation(),classificationDict
'''
#return None,None,None,None
def processFile(translator, parser, outputFile):
param2 = parser.getParameters()
molecules, species, observables, observablesDict = parser.getSpecies(translator)
compartments = parser.getCompartments()
param, rules, functions = parser.getReactions(translator, True)
param += param2
writer.finalText(param, molecules, species, observables, rules,
functions, compartments, {}, outputFile)
def BNGL2XML():
pass
def getAnnotations(annotation):
annotationDictionary = []
if annotation == [] or annotation is None:
return []
for indivAnnotation in annotation:
for index in range(0, indivAnnotation.getNumAttributes()):
annotationDictionary.append(indivAnnotation.getValue(index))
return annotationDictionary
def getAnnotationsDict(annotation):
annotationDict = {}
for element in annotation:
annotationDict[element] = getAnnotations(annotation[element])
return annotationDict
def processFile2():
for bioNumber in [19]:
#if bioNumber in [398]:
# continue
#bioNumber = 175
logMess.log = []
logMess.counter = -1
reactionDefinitions,useID,naming = selectReactionDefinitions('BIOMD%010i.xml' %bioNumber)
print reactionDefinitions, useID
#reactionDefinitions = 'reactionDefinitions/reactionDefinition7.json'
#spEquivalence = 'reactionDefinitions/speciesEquivalence19.json'
spEquivalence = detectCustomDefinitions(bioNumber)
print spEquivalence
useID = False
#reactionDefinitions = 'reactionDefinitions/reactionDefinition9.json'
outputFile = 'complex/output' + str(bioNumber) + '.bngl'
analyzeFile('XMLExamples/curated/BIOMD%010i.xml' % bioNumber, reactionDefinitions,
useID,naming,outputFile,speciesEquivalence=spEquivalence,atomize=True,bioGrid=True)
if len(logMess.log) > 0:
with open(outputFile + '.log', 'w') as f:
for element in logMess.log:
f.write(element + '\n')
def detectCustomDefinitions(bioNumber):
'''
returns a speciesDefinition<bioNumber>.json fileName if it exist
for the current bioModels. None otherwise
'''
directory = 'reactionDefinitions'
onlyfiles = [ f for f in listdir('./' + directory)]
if 'speciesEquivalence{0}.json'.format(bioNumber) in onlyfiles:
return '{0}/speciesEquivalence{1}.json'.format(directory,bioNumber)
return None
import pyparsing
def main():
jsonFiles = [ f for f in listdir('./reactionDefinitions') if f[-4:-1] == 'jso']
jsonFiles.sort()
parser = OptionParser()
rulesLength = []
evaluation = []
evaluation2 = []
compartmentLength = []
parser.add_option("-i","--input",dest="input",
default='XMLExamples/curated/BIOMD0000000272.xml',type="string",
help="The input SBML file in xml format. Default = 'input.xml'",metavar="FILE")
parser.add_option("-o","--output",dest="output",
default='output.bngl',type="string",
help="the output file where we will store our matrix. Default = output.bngl",metavar="FILE")
(options, _) = parser.parse_args()
#144
rdfArray = []
#classificationArray = []
#18,32,87,88,91,109,253,255,268,338,330
#normal:51,353
#cycles 18,108,109,255,268,392
import progressbar
progress = progressbar.ProgressBar()
sbmlFiles = getFiles('XMLExamples/curated', 'xml')
for bioIdx in progress(range(len(sbmlFiles))):
bioNumber = sbmlFiles[bioIdx]
#if bioNumber in [81,151,175,205,212,223,235,255,326,328,347,370,404,428,430,431,443,444,452,453,465,474]:
# continue
#bioNumber = 175
logMess.log = []
logMess.counter = -1
#reactionDefinitions,useID,naming = selectReactionDefinitions('BIOMD%010i.xml' %bioNumber)
#print reactionDefinitions, useID
#reactionDefinitions = 'reactionDefinitions/reactionDefinition7.json'
#spEquivalence = 'reactionDefinitions/speciesEquivalence19.json'
#spEquivalence = naming
#reactionDefinitions = 'reactionDefinitions/reactionDefinition8.json'
#rlength, reval, reval2, clength,rdf = analyzeFile('XMLExamples/curated/BIOMD%010i.xml' % bioNumber,
# reactionDefinitions,False,'complex/output' + str(bioNumber) + '.bngl',
# speciesEquivalence=spEquivalence,atomize=True)
try:
fileName = bioNumber.split('/')[-1]
rlength = reval = reval2 = slength = None
analysisResults = analyzeFile(bioNumber, resource_path('config/reactionDefinitions.json'),
False,resource_path('config/namingConventions.json'),
#'/dev/null',
'complex2/' + fileName + '.bngl',
speciesEquivalence=None,atomize=True,bioGrid=False)
#rlength, slength,reval, reval2, clength,rdf, _, _ = analysisResults
#print '++++',bioNumber,rlength,reval,reval2,clength
except KeyError:
print 'keyErrorerror--------------',bioNumber
continue
except OverflowError:
print 'overFlowerror--------------',bioNumber
continue
except ValueError:
print 'valueError',bioNumber
except pyparsing.ParseException:
print 'pyparsing',bioNumber
finally:
if analysisResults.rlength != None:
rulesLength.append({'index':bioNumber,'nreactions': analysisResults.rlength,
'atomization':analysisResults.reval,'compression': analysisResults.reval2,
'nspecies':analysisResults.slength})
compartmentLength.append(analysisResults.clength)
rdfArray.append(getAnnotationsDict(analysisResults.rdf))
else:
rulesLength.append([bioNumber,-1,0,0])
compartmentLength.append(0)
rdfArray.append({})
#classificationArray.append({})
#print evaluation
#print evaluation2
#sortedCurated = [i for i in enumerate(evaluation), key=lambda x:x[1]]
print [(idx+1,x) for idx,x in enumerate(rulesLength) if x > 50]
with open('sortedD.dump','wb') as f:
pickle.dump(rulesLength,f)
with open('annotations.dump','wb') as f:
pickle.dump(rdfArray,f)
#with open('classificationDict.dump','wb') as f:
# pickle.dump(classificationArray,f)
'''
plt.hist(rulesLength,bins=[10,30,50,70,90,110,140,180,250,400])
plt.xlabel('Number of reactions',fontsize=18)
plt.savefig('lengthDistro.png')
plt.clf()
plt.hist(evaluation, bins=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,
0.8, 0.9, 1.0])
plt.xlabel('Atomization Degree',fontsize=18)
plt.savefig('ruleifyDistro.png')
plt.clf()
plt.hist(evaluation2, bins=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,
0.8, 0.9, 1.0])
plt.xlabel('Atomization Degree', fontsize=18)
plt.savefig('ruleifyDistro2.png')
plt.clf()
ev = []
idx = 1
for x, y, z in zip(rulesLength, evaluation, compartmentLength):
if idx in [18, 51, 353, 108, 109, 255, 268, 392]:
idx+=1
if x < 15 and y > 0.7 and z>1:
print '---',idx,x,y
idx+=1
#plt.hist(ev,bins =[0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0])
#plt.xlabel('Atomization Degree',fontsize=18)
#plt.savefig('ruleifyDistro3.png')
'''
def main2():
with open('../XMLExamples/curated/BIOMD0000000163.xml','r') as f:
st = f.read()
import StringIO
stringBuffer = StringIO.StringIO()
jsonPointer = 'reactionDefinitions/speciesEquivalence163.json'
readFromString(st, resource_path('config/reactionDefinitions.json'),False,jsonPointer,True,stringBuffer)
print stringBuffer.getvalue()
def isActivated(statusVector):
if statusVector[0] != '' or statusVector[1] not in ['','0']:
return True
return False
def flatStatusVector(statusVector):
if statusVector[0] != '':
return '!'
return statusVector[1]
'''
def xorBox(status1,status2):
return not(status1 & status2)
def orBox(status1,status2):
return (status1,status2)
def totalEnumerations(pairList):
xCoordinate = set()
yCoordinate = set()
for element in pairList:
xCoordinate.add(element[0])
yCoordinate.add(element[1])
xCoordinate = list(xCoordinate)
yCoordinate = list(yCoordinate)
matrix = np.zeros((len(xCoordinate),len(yCoordinate)))
for element in pairList:
matrix[xCoordinate.index(element[0])][yCoordinate.index(element[1])] = 1
return np.all(np.all(matrix))
'''
def getRelationshipDegree(componentPair,statusQueryFunction,comparisonFunction,finalComparison):
componentPairRelationshipDict = {}
for pair in componentPair:
stats = []
for state in componentPair[pair]:
status1 = statusQueryFunction(state[0])
status2 = statusQueryFunction(state[1])
comparison = comparisonFunction(status1,status2)
stats.append(comparison)
if finalComparison(stats):
print pair,componentPair[pair]
componentPairRelationshipDict[pair] = finalComparison(stats)
return componentPairRelationshipDict
'''
def createPlot(labelDict):
#f, ax = plt.subplots(int(math.ceil(len(labelDict)/4)),4)
for idx,element in enumerate(labelDict):
plt.cla()
tmp = list(set([y for x in labelDict[element] for y in x]))
xaxis = [tmp.index(x[0]) for x in labelDict[element] if labelDict[element][x]== True]
yaxis = [tmp.index(x[1]) for x in labelDict[element] if labelDict[element][x] == True]
#6print tmp,xaxis,yaxis
plt.scatter(xaxis,yaxis)
plt.xticks(range(len(tmp)),tmp)
plt.yticks(range(len(tmp)),tmp)
plt.title(element)
#ax[math.floor(idx/4)][idx%4].scatter(xaxis,yaxis)
#ax[math.floor(idx/4)][idx%4].xticks(range(len(tmp)),tmp)
#ax[math.floor(idx/4)][idx%4].yticks(range(len(tmp)),tmp)
#ax[math.floor(idx/4)][idx%4].title(element)
plt.savefig('{0}.png'.format(element))
print '{0}.png'.format(element)
'''
'''
def statFiles():
for bioNumber in [19]:
reactionDefinitions,useID = selectReactionDefinitions('BIOMD%010i.xml' %bioNumber)
#speciesEquivalence = None
speciesEquivalence = 'reactionDefinitions/speciesEquivalence19.json'
componentPairs = extractCompartmentStatistics('XMLExamples/curated/BIOMD%010i.xml' % bioNumber,useID,reactionDefinitions,speciesEquivalence)
#analyze the relationship degree betweeen the components of each molecule
#in this case we are analyzing for orBoxes, or components
#that completely exclude each other
xorBoxDict = {}
orBoxDict = {}
for molecule in componentPairs:
xorBoxDict[molecule] = getRelationshipDegree(componentPairs[molecule],isActivated,xorBox,all)
#print '----------------------',molecule,'---------'
orBoxDict[molecule] = getRelationshipDegree(componentPairs[molecule],flatStatusVector,orBox,totalEnumerations)
#createPlot(orBoxDict)
box = []
box.append(xorBoxDict)
#box.append(orBoxDict)
with open('orBox{0}.dump'.format(bioNumber),'wb') as f:
pickle.dump(box,f)
'''
def processDir(directory,atomize=True):
from os import listdir
from os.path import isfile, join
resultDir = {}
xmlFiles = [ f for f in listdir('./' + directory) if isfile(join('./' + directory,f)) and f.endswith('xml')]
blackList = [175,205,212,223,235,255,328,370,428,430,431,443,444,452,453,465]
for xml in xmlFiles:
#try:
if xml not in ['MODEL1310110034.xml'] and len([x for x in blackList if str(x) in xml]) == 0:
print xml
try:
analysisResults = analyzeFile(directory + xml,'reactionDefinitions/reactionDefinition7.json',
False, resource_path('config/namingConventions.json'),
'/dev/null', speciesEquivalence=None,atomize=True,bioGrid=False)
resultDir[xml] = [analysisResults.rlength,analysisResults.reval,analysisResults.reval2]
except:
resultDir[xml] = [-1, 0, 0]
with open('evalResults.dump','wb') as f:
pickle.dump(resultDir,f)
#except:
#continue'
def processFile3(fileName,customDefinitions=None,atomize=True,bioGrid=False,output=None):
'''
processes a file. derp.
'''
logMess.log = []
logMess.counter = -1
reactionDefinitions = resource_path('config/reactionDefinitions.json')
spEquivalence = customDefinitions
namingConventions = resource_path('config/namingConventions.json')
#spEquivalence = None
useID = False
#reactionDefinitions = 'reactionDefinitions/reactionDefinition9.json'
#rlength = -1
#reval = -1
#reval2 = -1
if output:
outputFile = output
else:
outputFile = '{0}.bngl'.format(fileName)
analysisResults = analyzeFile(fileName, reactionDefinitions,
useID,namingConventions,outputFile,speciesEquivalence=spEquivalence,atomize=atomize,bioGrid=bioGrid)
if len(logMess.log) > 0:
with open(fileName + '.log', 'w') as f:
for element in logMess.log:
f.write(element + '\n')
return analysisResults.rlength, analysisResults.reval, analysisResults.reval2
def listFiles(minReactions,directory):
'''
List of SBML files that meet a given condition
'''
from os import listdir
from os.path import isfile, join
xmlFiles = [ f for f in listdir('./' + directory) if isfile(join('./' + directory,f)) and 'xml' in f]
outputList = []
for xml in xmlFiles:
print '.',
reader = libsbml.SBMLReader()
document = reader.readSBMLFromFile(directory + xml)
model = document.getModel()
if model == None:
continue
if len(model.getListOfReactions()) > minReactions:
outputList.append(xml)
print len(outputList)
if __name__ == "__main__":
#identifyNamingConvention()
#processDatabase()
#main2()
'''
analyzeFile('../XMLExamples/curated/BIOMD0000000007.xml', resource_path('config/reactionDefinitions.json'),
False, resource_path('config/namingConventions.json'),
'BIOMD0000000027.xml' + '.bngl',
speciesEquivalence=None,atomize=True,bioGrid=False)
'''
main2()
#processFile3('XMLExamples/noncurated/MODEL2463576061.x5ml')
#processFile3('XMLExamples/jws/dupreez2.xml')
#processFile3('XMLExamples/non_curated/MODEL1012220002.xml')
#output=48
#processFile3('XMLExamples/curated/BIOMD00000000151.xml',bioGrid=False)
#param = [452]
'''
param = 2
#use 105 as an example for (2,2) reactions
#527
analyzeFile('XMLExamples/curated/BIOMD%010i.xml' % param, resource_path('config/reactionDefinitions.json'),
False, resource_path('config/namingConventions.json'),
'complex2/output' + str(param) + '.bngl', speciesEquivalence=None,atomize=True,bioGrid=False)
'''
'''
analyzeFile('plain2_sbml.xml', resource_path('config/reactionDefinitions.json'),
False, resource_path('config/namingConventions.json'),
'''
'''
analyzeFile('XMLExamples/BMID000000142971.xml', resource_path('config/reactionDefinitions.json'),
False, resource_path('config/namingConventions.json'),
'complex/BMID000000142971.xml' + '.bngl', speciesEquivalence=None,atomize=True,bioGrid=False)
'''
'''
param = '00870'
analyzeFile('test/testl2v4/{0}/{0}-sbml-l2v4.xml'.format(param), 'reactionDefinitions/reactionDefinition7.json',
False, resource_path('config/namingConventions.json'),
'complex/output' + str(param) + '.bngl', speciesEquivalence=None,atomize=True,bioGrid=False)
'''
#processFile3('XMLExamples/curated/BIOMD0000000048.xml',customDefinitions=None,atomize=True)
#processFile3('/home/proto/Downloads/compartment_test_sbml.xml',customDefinitions=None,atomize=True)
#processDir('XMLExamples/curated/')
#processFile3('hexamer.xml')
#with open('dimer.xml','r') as f:
# r = f.read()
#print readFromString(r,resource_path('config/reactionDefinitions.json'),False,None,True)
#statFiles()
#main2()
#print readFromString('dsfsdf',resource_path('config/reactionDefinitions.json'),False)
#processFile2()
#listFiles(50,'./XMLExamples/curated/')
#todo: some of the assignmentRules defined must be used instead of parameters. remove from the paraemter
#definitions those that are defined as 0'
#2:figure out which assignment rules are being used in reactions. Done before the substitution for id;s
#http://nullege.com/codes/show/src@s@e@semanticsbml-HEAD@semanticSBML@annotate.py
#http://wiki.geneontology.org/index.php/Example_Queries#Find_terms_by_GO_ID
#http://www.geneontology.org/GO.database.shtml
|
# -*- coding: utf-8 -*-
# @File Name: ikAPI.py
# @File Path: M:\MAS2\PRM_Robotic_Arm\Klampt_Robotic_Arm_Script\ikMount\ikAPI.py
# @Author: Ruige_Lee
# @Date: 2019-04-24 19:15:24
# @Last Modified by: Ruige_Lee
# @Last Modified time: 2019-05-31 16:15:56
# @Email: 295054118@whut.edu.cn
# @page: https://whutddk.github.io/
# @File Name: ikTest.py
# @File Path: M:\MAS2\PRM_Robotic_Arm\Klampt_Robotic_Arm_Script\ikMount\ikTest.py
# @Author: 29505
# @Date: 2019-04-24 10:06:47
# @Last Modified by: 29505
# @Last Modified time: 2019-04-24 12:00:43
# @Email: 295054118@whut.edu.cn
# @page: https://whutddk.github.io/
from klampt import *
from klampt.model.collide import *
import sys
import time
from klampt.sim import *
from klampt import vis
from klampt.model import ik
import random
from math import *
if __name__ == "__main__":
world = WorldModel()
res = world.readFile('../dual_anno_check.xml')
if not res:
raise RuntimeError("Unable to load model ")
del res
prmRobot = world.robot(0)
ctlRobot = world.robot(1)
vis.add("world",world)
vis.show()
# collisionTest = WorldCollider(world)
prmRobotPose = RobotPoser(prmRobot)
ctlRobotPose = RobotPoser(ctlRobot)
robot= world.robot(0)
link = prmRobot.link(7)
# obj = ik.objective(link,R=[1,0,0,0,0,1,0,-1,0],t=[0.3,0,0])
# solver = ik.solver(obj)
# solver.solve()
# for h in range(0,53):
for h in range (0,10):
obj = ik.objective(link,R=[1,0,0,0,0,1,0,-1,0],t=[0.01,0.3+h*0.01,0.1])
solver = ik.solver(obj)
solver.solve()
prmRobotPose.set(robot.getConfig())
theta1 = robot.getConfig()[1]
theta2 = robot.getConfig()[2]
theta3 = robot.getConfig()[3]
theta4 = robot.getConfig()[4]
theta5 = robot.getConfig()[5]
theta6 = robot.getConfig()[6]
prmRobotPose.set([0,theta1,theta2,theta3,theta4,theta5,theta6,0])
print (theta1,theta2,theta3,theta4,theta5,theta6)
ctlRobotPose.set([0,0,0.4,0.4,0,0,0,0])
time.sleep(0.1)
while(1):
time.sleep(0.1)
pass
|
from __future__ import division, print_function, absolute_import
|
import sys
BASE = 16777216 # 2**24
def random_n(n, base=BASE):
if n <= 0:
return 1
else:
return (161 * random_n(n-1) + 2457) % base
def main():
m = 100
even = 0
for n in range(m):
if n % 2 == 1:
result = random_n(n)
if result % 2 == 0:
even += 1
print(even)
if __name__ == '__main__':
main()
# import cProfile
# cProfile.run('main()') |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Func, IntegerField, Sum
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView
from rest_framework.response import Response
from rest_framework.views import APIView
from beyondtheadmin.companies.forms import CompanyForm
from beyondtheadmin.companies.models import Company
from beyondtheadmin.invoices.models import Invoice
class DashboardView(LoginRequiredMixin, TemplateView):
template_name = 'dashboard/dashboard.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['companies'] = Company.objects.filter(users=self.request.user)
context['company_form'] = CompanyForm()
return context
class Month(Func):
function = 'EXTRACT'
template = '%(function)s(MONTH from %(expressions)s)'
output_field = IntegerField()
class OpenInvoicesView(APIView):
def get(self, request, company_pk=None, format=None):
current_date = now()
waiting_invoices_qs = Invoice.sent.filter(due_date__gte=current_date)
overdue_invoices_qs = Invoice.sent.filter(due_date__lt=current_date)
try:
company_obj = Company.objects.get(pk=company_pk, users=request.user)
waiting_invoices_qs = waiting_invoices_qs.filter(company=company_obj)
overdue_invoices_qs = overdue_invoices_qs.filter(company=company_obj)
except (Company.DoesNotExist, ValueError):
waiting_invoices_qs = waiting_invoices_qs.filter(company__users=self.request.user)
overdue_invoices_qs = overdue_invoices_qs.filter(company__users=self.request.user)
waiting_invoices = waiting_invoices_qs.aggregate(total=Sum('total')).get('total', 0) or 0
overdue_invoices = overdue_invoices_qs.aggregate(total=Sum('total')).get('total', 0) or 0
return Response({
'total': waiting_invoices + overdue_invoices,
'waiting': waiting_invoices,
'overdue': overdue_invoices,
})
class ProfitView(APIView):
def get(self, request, company_pk, format=None):
company_obj = get_object_or_404(Company.objects.filter(users=request.user), pk=company_pk)
year = self.request.query_params.get('year', '')
try:
year = int(year)
except ValueError:
year = None
if year is None:
year = now().year
invoices = Invoice.objects.filter(company=company_obj, displayed_date__year=year)\
.annotate(month=Month('displayed_date'))\
.values('month')\
.annotate(monthly_total=Sum('total'))\
.order_by('month')
months = dict(invoices.values_list('month', 'monthly_total'))
datasets = [
{
'label': _("Earnings"),
'data': [months.get(i, 0) for i in range(1, 13)]
}
]
labels = [_("January"), _("February"), _("March"), _("April"), _("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December")]
total = sum([invoice.get('monthly_total') for invoice in invoices])
return Response({
'total': total,
'monthly_sums': {'labels': labels, 'datasets': datasets}
})
|
from rest_framework import serializers
from department.models import Department
class DepartmentSerializer(serializers.ModelSerializer):
class Meta:
model = Department
fields = (
'id',
'name',
'location',
'latitude',
'longitude'
)
|
from .connection import RexProSyncSocket, RexProSyncConnectionPool, RexProSyncConnection
|
import logging
import time
from typing import TYPE_CHECKING
import psutil
from monalyza.monitoring import scheduler
if TYPE_CHECKING:
from monalyza.monitoring.buffer import Buffer
class SingleProcessMonitoring:
"""Monitor a single process without its children."""
def __init__(self,
pid: int,
interval: float = None,
buffer: 'Buffer' = None,
hide_headers: bool = False) -> None:
self.initial_start_time = 0
self.pid = pid
self.first_run = not hide_headers
logging.debug('Initializing single process monitoring.')
self.buffer = buffer
if interval is not None:
self.scheduler = scheduler.Scheduler(interval)
else:
self.scheduler = None
def run_repeatedly(self, read_memory: bool = False,
read_cpu: bool = False) -> None:
"""Measure resource consumption of a specific process repeatedly."""
if self.scheduler is None or self.buffer is None:
raise UnboundLocalError(
'Unexpectped value:',
self.buffer,
self.scheduler,
'Either interval or buffer_size_mb is missing')
try:
resource_info = self.read_resource(read_memory, read_cpu)
except psutil.NoSuchProcess:
self.scheduler.cancel_scheduler()
self.buffer.write_data()
else:
if self.first_run:
headers = None
if read_memory and read_cpu:
headers = ('step', 'time', 'pid', 'memory', 'cpu',
'status')
elif read_memory:
headers = ('step', 'time', 'pid', 'memory', 'status')
elif read_cpu:
headers = ('step', 'time', 'pid', 'cpu', 'status')
self.buffer.append_to_buffer(headers)
self.first_run = False
self.buffer.append_to_buffer(resource_info)
self.scheduler.schedule(self.run_repeatedly,
read_memory=read_memory,
read_cpu=read_cpu)
def read_resource(self, read_memory: bool = False,
read_cpu: bool = False) -> tuple | None:
"""Measure resource consumption of a specific process."""
resource_info = None
try:
process = psutil.Process(self.pid)
time = self.generate_timestamp()
if read_memory and read_cpu:
resource_info = (time[0],
time[1],
process.pid,
process.memory_info()[0],
process.cpu_percent(interval=0.1),
process.status())
elif read_memory:
resource_info = (time[0],
time[1],
process.pid,
process.memory_info()[0],
process.status())
elif read_cpu:
resource_info = (time[0],
time[1],
process.pid,
process.cpu_percent(interval=1),
process.status())
except psutil.NoSuchProcess:
logging.error('Process %s no longer exists.', self.pid)
raise
else:
return resource_info
def generate_timestamp(self) -> tuple:
"""Get time and time difference in seconds, since initial time."""
current_time = int(time.time())
if self.initial_start_time == 0:
self.initial_start_time = current_time
return (current_time - self.initial_start_time, current_time)
|
import numpy as np
import cv2
import logging
def get_keypoints_from_indices(keypoints1, index_list1, keypoints2, index_list2):
"""Filters a list of keypoints based on the indices given"""
points1 = np.array([kp.pt for kp in keypoints1])[index_list1]
points2 = np.array([kp.pt for kp in keypoints2])[index_list2]
return points1, points2
def get_3D_point(u1, P1, u2, P2):
"""Solves for 3D point using homogeneous 2D points and the respective camera matrices"""
A = np.array([[u1[0] * P1[2, 0] - P1[0, 0], u1[0] * P1[2, 1] - P1[0, 1], u1[0] * P1[2, 2] - P1[0, 2]],
[u1[1] * P1[2, 0] - P1[1, 0], u1[1] * P1[2, 1] - P1[1, 1], u1[1] * P1[2, 2] - P1[1, 2]],
[u2[0] * P2[2, 0] - P2[0, 0], u2[0] * P2[2, 1] - P2[0, 1], u2[0] * P2[2, 2] - P2[0, 2]],
[u2[1] * P2[2, 0] - P2[1, 0], u2[1] * P2[2, 1] - P2[1, 1], u2[1] * P2[2, 2] - P2[1, 2]]])
B = np.array([-(u1[0] * P1[2, 3] - P1[0, 3]),
-(u1[1] * P1[2, 3] - P1[1, 3]),
-(u2[0] * P2[2, 3] - P2[0, 3]),
-(u2[1] * P2[2, 3] - P2[1, 3])])
X = cv2.solve(A, B, flags=cv2.DECOMP_SVD)
return X[1]
def remove_outliers_using_F(view1, view2, match_object):
"""Removes outlier keypoints using the fundamental matrix"""
pixel_points1, pixel_points2 = get_keypoints_from_indices(keypoints1=view1.keypoints,
keypoints2=view2.keypoints,
index_list1=match_object.indices1,
index_list2=match_object.indices2)
F, mask = cv2.findFundamentalMat(pixel_points1, pixel_points2, method=cv2.FM_RANSAC,
ransacReprojThreshold=0.9, confidence=0.99)
mask = mask.astype(bool).flatten()
match_object.inliers1 = np.array(match_object.indices1)[mask]
match_object.inliers2 = np.array(match_object.indices2)[mask]
return F
def calculate_reprojection_error(point_3D, point_2D, K, R, t):
"""Calculates the reprojection error for a 3D point by projecting it back into the image plane"""
reprojected_point = K.dot(R.dot(point_3D) + t)
reprojected_point = cv2.convertPointsFromHomogeneous(reprojected_point.T)[:, 0, :].T
error = np.linalg.norm(point_2D.reshape((2, 1)) - reprojected_point)
return error
def get_camera_from_E(E):
"""Calculates rotation and translation component from essential matrix"""
W = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]])
W_t = W.T
u, w, vt = np.linalg.svd(E)
R1 = u @ W @ vt
R2 = u @ W_t @ vt
t1 = u[:, -1].reshape((3, 1))
t2 = - t1
return R1, R2, t1, t2
def check_determinant(R):
"""Validates using the determinant of the rotation matrix"""
if np.linalg.det(R) + 1.0 < 1e-9:
return False
else:
return True
def check_triangulation(points, P):
"""Checks whether reconstructed points lie in front of the camera"""
P = np.vstack((P, np.array([0, 0, 0, 1])))
reprojected_points = cv2.perspectiveTransform(src=points[np.newaxis], m=P)
z = reprojected_points[0, :, -1]
if (np.sum(z > 0)/z.shape[0]) < 0.75:
return False
else:
return True
|
import cv2 as cv
class Text:
def addText(self, path, content, fontScale=5, x=0, y=0, color=(255, 255, 255), thickness=1):
img = cv.imread(path)
font = cv.FONT_HERSHEY_SIMPLEX
img = cv.putText(img, content, (x, y), font, fontScale, color, thickness, cv.LINE_AA)
cv.imwrite(path, img)
|
from pylexibank.providers.qlc import QLC
def test_QLC():
assert QLC
|
from .loopedit import LoopeditPSAfile
from .derive import DerivePSAfile
from .psa_plot_file import PlotPSAfile
from .seasave import SeasavePSAfile
|
from django.urls import path
from .views import MyView
urlpatterns = [
path('', MyView.as_view(template_name='index.html'), name='index'),
]
|
'''
Demos for designing, developing and demonstrating Azure CLI.
'''
from .. pyaz_utils import _call_az
def style(theme=None):
'''
A demo showing supported text styles.
Optional Parameters:
- theme -- The theme to format styled text. If unspecified, the default theme is used.
'''
return _call_az("az demo style", locals())
|
from .base import BaseModule
class Chat(BaseModule):
def get_message(self, **kwargs):
"""
To get messages history for a specific conversation, which can display the messages detail from sender and receiver.
:param kwargs
- offset
- page_size
- conversation_id (Required)
:return
https://open.shopee.com/documents?module=109&type=1&id=671&version=2
"""
return self.client.execute("sellerchat/get_message", "GET", kwargs)
def send_message(self, **kwargs):
"""
1.To send a message and select the correct message type (Do not use this API to send batch messages) 2.Currently TW region is not supported to send messages.
:param kwargs
- to_id (Required)
- message_type (Required)
- content (Required)
- text
- sticker_id
- sticker_package_id
- image_url
- item_id
- order_sn
:return
https://open.shopee.com/documents?module=109&type=1&id=672&version=2
"""
return self.client.execute("sellerchat/send_message", "POST", kwargs)
def get_conversation_list(self, **kwargs):
"""
To get conversation list and its params data
:param kwargs
- direction (Required)
- type (Required)
- next_timestamp_nano
- page_size
:return
https://open.shopee.com/documents?module=109&type=1&id=673&version=2
"""
return self.client.execute("sellerchat/get_conversation_list", "GET", kwargs)
def get_one_conversation(self, **kwargs):
"""
To get a specific conversation's basic information.
:param kwargs
- conversation_id (Required)
:return
https://open.shopee.com/documents?module=109&type=1&id=674&version=2
"""
return self.client.execute("sellerchat/get_one_conversation", "GET", kwargs)
def delete_conversation(self, **kwargs):
"""
To delete a specific conversation
:param kwargs
- conversation_id (Required)
:return
https://open.shopee.com/documents?module=109&type=1&id=675&version=2
"""
return self.client.execute("sellerchat/delete_conversation", "POST", kwargs)
def get_unread_conversation_count(self, **kwargs):
"""
To get the number of unread conversations from a shop (not unread messages)
:param kwargs
:return
https://open.shopee.com/documents?module=109&type=1&id=676&version=2
"""
return self.client.execute("sellerchat/get_unread_conversation_count", "GET", kwargs)
def pin_conversation(self, **kwargs):
"""
To pin a specific conversations
:param kwargs
- conversation_id (Required)
:return
https://open.shopee.com/documents?module=109&type=1&id=677&version=2
"""
return self.client.execute("sellerchat/pin_conversation", "POST", kwargs)
def unpin_conversation(self, **kwargs):
"""
To unpin a specific conversations
:param kwargs
- conversation_id (Required)
:return
https://open.shopee.com/documents?module=109&type=1&id=678&version=2
"""
return self.client.execute("sellerchat/unpin_conversation", "POST", kwargs)
def read_conversation(self, **kwargs):
"""
To send read request for a specific conversation
:param kwargs
- conversation_id (Required)
- last_read_message_id (Required)
:return
https://open.shopee.com/documents?module=109&type=1&id=679&version=2
"""
return self.client.execute("sellerchat/read_conversation", "POST", kwargs)
def unread_conversation(self, **kwargs):
"""
To mark a conversation as unread
:param kwargs
- conversation_id (Required)
:return
https://open.shopee.com/documents?module=109&type=1&id=680&version=2
"""
return self.client.execute("sellerchat/unread_conversation", "POST", kwargs)
def get_offer_toggle_status(self, **kwargs):
"""
To get the toggle status to check if the shop has allowed buyer to negotiate price with seller.
:param kwargs
:return
https://open.shopee.com/documents?module=109&type=1&id=681&version=2
"""
return self.client.execute("sellerchat/get_offer_toggle_status", "GET", kwargs)
def set_offer_toggle_status(self, **kwargs):
"""
To set the toggle status.If set as "enabled", then seller doesn't allow buyer negotiate the price.
:param kwargs
- make_offer_status (Required)
:return
https://open.shopee.com/documents?module=109&type=1&id=682&version=2
"""
return self.client.execute("sellerchat/set_offer_toggle_status", "POST", kwargs)
def upload_image(self, **kwargs):
"""
When you need to send an image type message, please request this API first to upload the image file to get image url. Then proceed to request the send message API with the image url.
:param kwargs
- file (Required)
:return
https://open.shopee.com/documents?module=109&type=1&id=682&version=2
"""
return self.client.execute("sellerchat/upload_image", "POST", kwargs)
|
from math import sin, asin, sqrt, pi
def in_tree_pixel(baf, img_width):
# baf is the Basal Area factor
# tree_width is the app measured tree pixel width
theta = 2 * asin(sqrt(baf) / 100) # radians
min_tree_width = theta / (2*pi) * img_width # also use rad(360) = 2 pi
return min_tree_width
def max_baf(img_width, tree_width):
return (100 * sin((pi * tree_width) / img_width)) ** 2
def plot_ba_calculator(baf, in_tree_num):
return baf * in_tree_num |
"""
The OptimizationManager submodule contains all the required function for
optimizing via dolfin-adjoint. To use dolfin-adjoin set::
general:
dolfin_adjoint: True
in the param.yaml file.
Todo:
* Read through an update the docstrings for these functions.
* Create specific optimization classes.
"""
import __main__
import os
### Get the name of program importing this package ###
if hasattr(__main__,"__file__"):
main_file = os.path.basename(__main__.__file__)
else:
main_file = "ipython"
### This checks if we are just doing documentation ###
if not main_file in ["sphinx-build", "__main__.py"]:
from dolfin import *
import numpy as np
import copy
from sys import platform
import time
from memory_profiler import memory_usage
### Import the cumulative parameters ###
from windse import windse_parameters
### Check if we need dolfin_adjoint ###
if windse_parameters.dolfin_adjoint:
from dolfin_adjoint import *
### This import improves the plotter functionality on Mac ###
if platform == 'darwin':
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
else:
InequalityConstraint = object
import openmdao.api as om
class ObjComp(om.ExplicitComponent):
"""
OpenMDAO component to wrap the objective computation from dolfin.
Specifically, we use the J and dJ (function and Jacobian) methods
to compute the function value and derivative values as needed by the
OpenMDAO optimizers.
"""
def initialize(self):
self.options.declare('initial_DVs', types=np.ndarray)
self.options.declare('J', types=object)
self.options.declare('dJ', types=object)
self.options.declare('callback', types=object)
def setup(self):
self.add_input('DVs', val=self.options['initial_DVs'])
self.add_output('obj', val=0.)
self.declare_partials('*', '*')
def compute(self, inputs, outputs):
m = list(inputs['DVs'])
computed_output = self.options['J'](m)
outputs['obj'] = computed_output
self.options['callback'](m)
def compute_partials(self, inputs, partials):
m = list(inputs['DVs'])
jac = self.options['dJ'](m)
partials['obj', 'DVs'] = jac
class ConsComp(om.ExplicitComponent):
"""
OpenMDAO component to wrap the constraint computation.
A small wrapper used on the fenics methods for computing constraint
and Jacobian values using the OpenMDAO syntax.
"""
def initialize(self):
self.options.declare('initial_DVs', types=np.ndarray)
self.options.declare('J', types=object)
self.options.declare('dJ', types=object)
self.options.declare('con_name', types=str)
def setup(self):
self.con_name = self.options["con_name"]
self.add_input('DVs', val=self.options['initial_DVs'])
output = self.options['J'](self.options['initial_DVs'])
self.add_output(self.con_name, val=output)
self.declare_partials('*', '*')
def compute(self, inputs, outputs):
m = list(inputs['DVs'])
computed_output = self.options['J'](m)
outputs[self.con_name] = computed_output
def compute_partials(self, inputs, partials):
m = list(inputs['DVs'])
jac = self.options['dJ'](m)
partials[self.con_name, 'DVs'] = jac
def gather(m):
"""
Helper function to gather constraint Jacobians. Adapated from fenics.
"""
if isinstance(m, list):
return list(map(gather, m))
elif hasattr(m, "_ad_to_list"):
return m._ad_to_list(m)
else:
return m # Assume it is gathered already
def om_wrapper(J, initial_DVs, dJ, H, bounds, **kwargs):
"""
Custom optimization wrapper to use OpenMDAO optimizers with dolfin-adjoint.
Follows the API as defined by dolfin-adjoint.
Parameters
----------
J : object
Function to compute the model analysis value at a design point.
initial_DVs : array
The initial design variables so we can get the array sizing correct
for the OpenMDAO implementation.
dJ : object
Function to compute the Jacobian at a design point.
H : object
Function to compute the Hessian at a design point (not used).
bounds : array
Array of lower and upper bound values for the design variables.
Returns
-------
DVs : array
The optimal design variable values.
"""
# build the model
prob = om.Problem(model=om.Group())
if 'callback' in kwargs:
callback = kwargs['callback']
else:
callback = None
prob.model.add_subsystem('obj_comp', ObjComp(initial_DVs=initial_DVs, J=J, dJ=dJ, callback=callback), promotes=['*'])
constraint_types = []
if 'constraints' in kwargs:
constraints = kwargs['constraints']
if not isinstance(constraints, list):
constraints = [constraints]
for idx, c in enumerate(constraints):
if isinstance(c, InequalityConstraint):
typestr = "ineq"
elif isinstance(c, EqualityConstraint):
typestr = "eq"
else:
raise Exception("Unknown constraint class")
def jac(x):
out = c.jacobian(x)
return [gather(y) for y in out]
constraint_types.append(typestr)
con_name = f'con_{idx}'
prob.model.add_subsystem(f'cons_comp_{idx}', ConsComp(initial_DVs=initial_DVs, J=c.function, dJ=jac, con_name=con_name), promotes=['*'])
lower_bounds = bounds[:, 0]
upper_bounds = bounds[:, 1]
# set up the optimization
if 'SLSQP' in kwargs['opt_routine']:
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
elif 'SNOPT' in kwargs['opt_routine']:
prob.driver = om.pyOptSparseDriver()
prob.driver.options['optimizer'] = 'SNOPT'
folder_output = kwargs["options"]["folder"]
prob.driver.opt_settings["Summary file"] = os.path.join(folder_output, "SNOPT_summary.out")
prob.driver.opt_settings["Print file"] = os.path.join(folder_output, "SNOPT_print.out")
prob.model.add_design_var('DVs', lower=lower_bounds, upper=upper_bounds)
prob.model.add_objective('obj', ref=kwargs["options"]["obj_ref"], ref0=kwargs["options"]["obj_ref0"])
for idx, constraint_type in enumerate(constraint_types):
con_name = f'con_{idx}'
if constraint_type == "eq":
prob.model.add_constraint(con_name, equals=0.)
else:
# Inequality means it's positive from scipy and dolfin
prob.model.add_constraint(con_name, lower=0.)
prob.setup()
prob.set_val('DVs', initial_DVs)
# Run the optimization
prob.run_driver()
# Return the optimal design variables
return(prob['DVs'])
class Optimizer(object):
"""
A GenericProblem contains on the basic functions required by all problem objects.
Args:
dom (:meth:`windse.DomainManager.GenericDomain`): a windse domain object.
"""
def __init__(self, solver):
### save a reference of option and create local version specifically of domain options ###
self.params = windse_parameters
self.solver = solver
self.problem = solver.problem
self.farm = solver.problem.farm
self.fprint = self.params.fprint
self.tag_output = self.params.tag_output
self.debug_mode = self.params.debug_mode
self.xscale = self.problem.dom.xscale
### Update attributes based on params file ###
for key, value in self.params["optimization"].items():
if isinstance(value,list):
setattr(self,key,np.array(value))
else:
setattr(self,key,value)
### Process parameters ###
if "layout" in self.control_types:
if isinstance(self.layout_bounds,(list, np.ndarray)):
self.layout_bounds = np.array(self.layout_bounds)
elif self.layout_bounds == "wind_farm":
self.layout_bounds = np.array([self.farm.ex_x,self.farm.ex_y])
else:
self.layout_bounds = np.array([[0,0],[0,0]])
self.layout_bounds = self.layout_bounds*self.xscale
self.iteration = 0
self.fprint("Setting Up Optimizer",special="header")
self.fprint("Controls: {0}".format(self.control_types))
self.CreateControls()
self.fprint("Define Bounds")
self.CreateBounds()
self.get_minimum_distance_constraint_func(self.controls, 2*np.mean(self.problem.farm.HH))
self.fprint("Define Optimizing Functional")
self.J = self.solver.J
self.Jhat = ReducedFunctional(self.J, self.controls, eval_cb_post=self.ReducedFunctionalCallback)
self.Jcurrent = self.J
self.fprint("Number of Controls: {:d}".format(len(self.controls)),special="header")
self.OptPrintFunction(self.init_vals,None)
self.fprint("",special="footer")
self.fprint("Optimizer Setup",special="footer")
def DebugOutput(self):
if self.debug_mode:
self.tag_output("n_controls", len(self.controls))
self.tag_output("obj_value", float(self.J))
### Output initial control values ###
for i, val in enumerate(self.controls):
self.tag_output("val0_"+self.names[i],val.values())
### Output gradient ###
if hasattr(self,"gradient"):
for i, d in enumerate(self.gradients):
self.tag_output("grad_"+self.names[i],float(d))
### TODO: Output taylor convergence data
if hasattr(self,"conv_rate"):
pass
### TODO: Output optimized controls
if hasattr(self,"m_opt"):
pass
def RecomputeReducedFunctional(self):
self.CreateControls()
self.J = self.solver.J
self.Jhat = ReducedFunctional(self.J, self.controls, eval_cb_post=self.ReducedFunctionalCallback)
self.Jcurrent = self.J
def ReducedFunctionalCallback(self, j, m):
self.Jcurrent = j
def CreateControls(self):
### Control pointers is a way of reference which parts of the original farm lists correspond to the controls. it is rather inefficient but it works so...
self.controls = []
self.control_pointers = []
self.names = []
self.indexes = [[],[],[],[],[],[],[]]
self.init_vals = []
j = 0
if "layout" in self.control_types:
for i in self.solver.opt_turb_id:
self.indexes[0].append(j)
j+=1
self.names.append("x_"+repr(i))
self.controls.append(Control(self.farm.mx[i]))
self.control_pointers.append((self.farm.x,i))
self.init_vals.append(self.farm.mx[i])
self.indexes[1].append(j)
j+=1
self.names.append("y_"+repr(i))
self.controls.append(Control(self.farm.my[i]))
self.control_pointers.append((self.farm.y,i))
self.init_vals.append(self.farm.my[i])
if "yaw" in self.control_types:
for i in self.solver.opt_turb_id:
self.indexes[2].append(j)
j+=1
self.names.append("yaw_"+repr(i))
self.controls.append(Control(self.farm.myaw[i]))
self.control_pointers.append((self.farm.yaw,i))
self.init_vals.append(self.farm.myaw[i])
if "axial" in self.control_types:
for i in self.solver.opt_turb_id:
self.indexes[3].append(j)
j+=1
self.names.append("axial_"+repr(i))
self.controls.append(Control(self.farm.ma[i]))
self.control_pointers.append((self.farm.a,i))
self.init_vals.append(self.farm.ma[i])
if "lift" in self.control_types:
for i in self.solver.opt_turb_id:
for k in range(self.farm.num_blade_segments):
self.control_pointers.append((self.farm.cl,[i,k]))
self.indexes[4].append(j)
j+=1
self.names.append("lift_"+repr(i)+"_"+repr(k))
self.controls.append(Control(self.farm.mcl[i][k]))
self.init_vals.append(self.farm.mcl[i][k])
if "drag" in self.control_types:
for i in self.solver.opt_turb_id:
for k in range(self.farm.num_blade_segments):
self.control_pointers.append((self.farm.cd,[i,k]))
self.indexes[5].append(j)
j+=1
self.names.append("drag_"+repr(i)+"_"+repr(k))
self.controls.append(Control(self.farm.mcd[i][k]))
self.init_vals.append(self.farm.mcd[i][k])
if "chord" in self.control_types:
for i in self.solver.opt_turb_id:
for k in range(self.farm.num_blade_segments):
self.control_pointers.append((self.farm.chord,[i,k]))
self.indexes[6].append(j)
j+=1
self.names.append("chord_"+repr(i)+"_"+repr(k))
self.controls.append(Control(self.farm.mchord[i][k]))
self.init_vals.append(self.farm.mchord[i][k])
self.num_controls = len(self.controls)
def CreateBounds(self):
lower_bounds = []
upper_bounds = []
if "layout" in self.control_types:
for i in range(self.farm.numturbs):
lower_bounds.append(Constant((self.layout_bounds[0][0])))# + self.farm.radius[i])))
lower_bounds.append(Constant((self.layout_bounds[1][0])))# + self.farm.radius[i])))
upper_bounds.append(Constant((self.layout_bounds[0][1])))# - self.farm.radius[i])))
upper_bounds.append(Constant((self.layout_bounds[1][1])))# - self.farm.radius[i])))
if "yaw" in self.control_types:
for i in range(self.farm.numturbs):
lower_bounds.append(Constant(-45*pi/180.0))
upper_bounds.append(Constant(45*pi/180.0))
if "axial" in self.control_types:
for i in range(self.farm.numturbs):
lower_bounds.append(Constant(0))
upper_bounds.append(Constant(1.))
if "lift" in self.control_types:
for i in self.solver.opt_turb_id:
for i in range(self.farm.num_blade_segments):
lower_bounds.append(Constant(0))
upper_bounds.append(Constant(2.))
if "drag" in self.control_types:
for i in self.solver.opt_turb_id:
for k in range(self.farm.num_blade_segments):
lower_bounds.append(Constant(0))
upper_bounds.append(Constant(2.))
if "chord" in self.control_types:
for i in self.solver.opt_turb_id:
c_avg = 0
for k in range(self.farm.num_blade_segments):
modifier = 2.0
max_chord = self.farm.max_chord
seg_chord = self.farm.baseline_chord[k]
lower_bounds.append(Constant(seg_chord/modifier))
upper_bounds.append(Constant(np.maximum(np.minimum(seg_chord*modifier,max_chord),c_avg)))
c_avg = (c_avg*k+seg_chord)/(k+1)
self.bounds = [lower_bounds,upper_bounds]
def Gradient(self):
"""
Returns a gradient of the objective function
"""
mem0=memory_usage()[0]
tick = time.time()
capture_memory = False
if capture_memory:
mem_out, der = memory_usage(self.Jhat.derivative,max_usage=True,retval=True,max_iterations=1)
else:
mem_out = 2*mem0
der = self.Jhat.derivative()
folder_string = self.params.folder+"data/"
if not os.path.exists(folder_string): os.makedirs(folder_string)
f = open(folder_string+"gradient_data.txt",'w')
f_header = "control value derivative"
f.write(f_header+"\n")
self.fprint('========Gradient Data========')
d_out = '%12s: %12s, %22s' % ('Control', 'Value', 'Derivative')
self.fprint(d_out)
d_global = np.zeros(self.params.num_procs, dtype=np.float64)
for i, d in enumerate(der):
ctl_val = float(self.controls[i].values())
# d_out = str(self.names[i] + " " +repr(ctl_val)+ " " +repr(float(d)))
# d_format = np.float64(d)
# self.params.comm.Gather(d_format, d_global, root=0)
# d_sum = np.sum(d_global)
d_out = '%12s: %12.5e, %22.15e' % (self.names[i], ctl_val, d)
# print('Rank %d, %s' % (self.params.rank, d_out))
self.fprint(d_out)
f.write(d_out+"\n")
f.close()
tock = time.time()
self.fprint("Time Elapsed: {:1.2f} s".format(tock-tick))
if capture_memory:
self.fprint("Memory Used: {:1.2f} MB".format(mem_out-mem0))
self.gradients = np.array(der, dtype=float)
self.DebugOutput()
return self.gradients
def ListControls(self,m):
self.fprint("Iteration "+repr(self.iteration)+" Complete",special="header")
self.fprint("Current Objective Value: " + repr(float(self.Jcurrent)))
# if "layout" in self.control_types:
# for i in range(self.farm.numturbs):
# self.fprint("Location Turbine {0:} of {1:}: {2: 4.2f}, {3: 4.2f}".format(i+1,self.farm.numturbs,self.farm.x[i],self.farm.y[i]))
# if "yaw" in self.control_types:
# for i in range(self.farm.numturbs):
# self.fprint("Yaw Turbine {0:} of {1:}: {2: 4.6f}".format(i+1,self.farm.numturbs,self.farm.yaw[i]))
self.fprint("Previous Control Values",special="header")
for i, [l, ix] in enumerate(self.control_pointers):
if not isinstance(ix,int):
self.fprint(self.names[i] +": " +repr(float(l[ix[0]][ix[1]])))
else:
self.fprint(self.names[i] +": " +repr(float(l[ix])))
self.fprint("",special="footer")
self.fprint("Next Control Values",special="header")
for i, val in enumerate(m):
self.fprint(self.names[i] +": " +repr(float(val)))
self.fprint("",special="footer")
self.fprint("Iteration "+repr(self.iteration)+" Complete",special="footer")
def SaveControls(self,m):
folder_string = self.params.folder+"/data/"
if not os.path.exists(folder_string): os.makedirs(folder_string)
# new_values = {}
# m_f = np.array(self.control_pointers,dtype=float)
# if "layout" in self.control_types:
# new_values["x"] = m_f[self.indexes[0]]
# new_values["y"] = m_f[self.indexes[1]]
# if "yaw" in self.control_types:
# new_values["yaw"] = m_f[self.indexes[2]]
# if "axial" in self.control_types:
# new_values["a"] = m_f[self.indexes[3]]
# self.problem.farm.UpdateControls(**new_values)
self.problem.farm.SaveWindFarm(val=self.iteration)
# print(m)
# print(type(m))
m_new = np.array(m,dtype=float)
m_old = []
for l, i in self.control_pointers:
if not isinstance(i,int):
m_old.append(float(l[i[0]][i[1]]))
else:
m_old.append(float(l[i]))
# print(m_new)
# print(type(m_new))
if self.iteration == 0:
#### ADD HEADER ####
self.last_m = np.zeros(self.num_controls)
for i in range(self.num_controls):
self.last_m[i]=float(m_new[i])
err = 0.0
f = open(folder_string+"opt_data.txt",'w')
header = str("Objective Change Prev_Controls: p_"+" p_".join(self.names)+" New_Controls: n_"+" n_".join(self.names)+"\n")
f.write(header)
else:
err = np.linalg.norm(m_new-self.last_m)
self.last_m = copy.copy(m_new)
f = open(folder_string+"opt_data.txt",'a')
output_data = np.concatenate(((self.Jcurrent, err, self.num_controls),m_old))
output_data = np.concatenate((output_data,(self.num_controls,)))
output_data = np.concatenate((output_data,m_new))
np.savetxt(f,[output_data])
f.close()
def OptPrintFunction(self,m,test=None):
if test is not None:
print("Hey, this method actually gives us more info")
# print(np.array(m,dtype=float))
# print(np.array(self.control_pointers,dtype=float))
# print(np.array(self.problem.farm.myaw,dtype=float))
self.SaveControls(m)
self.ListControls(m)
if "layout" in self.control_types or "yaw" in self.control_types:
self.problem.farm.PlotFarm(filename="wind_farm_step_"+repr(self.iteration),power=self.Jcurrent)
if "chord" in self.control_types:
c_lower = np.array(self.bounds[0])[self.indexes[6]]
c_upper = np.array(self.bounds[1])[self.indexes[6]]
self.problem.farm.PlotChord(filename="chord_step_"+repr(self.iteration),power=self.Jcurrent,bounds=[c_lower,c_upper])
self.iteration += 1
def get_minimum_distance_constraint_func(self, m_pos, min_distance=200):
if "layout" in self.control_types and len(self.control_types)==1:
self.dist_constraint = MinimumDistanceConstraint(m_pos, min_distance)
else:
print("minimum distance is supported when only optimizing layout")
self.dist_constraint = None
def Optimize(self):
self.fprint("Beginning Optimization",special="header")
if self.opt_type == "minimize":
opt_function = minimize
elif self.opt_type == "maximize":
opt_function = maximize
else:
raise ValueError(f"Unknown optimization type: {self.opt_type}")
options = {
"disp" : True,
"folder" : self.params.folder,
}
if hasattr(self, 'obj_ref'):
options["obj_ref"] = self.obj_ref
if hasattr(self, 'obj_ref0'):
options["obj_ref0"] = self.obj_ref0
if self.opt_type == "minimize":
opt_function = minimize
elif self.opt_type == "maximize":
opt_function = maximize
else:
raise ValueError(f"Unknown optimization type: {self.opt_type}")
# TODO : simplify this logic
if "SNOPT" in self.opt_routine or "OM_SLSQP" in self.opt_routine:
if "layout" in self.control_types:
m_opt=opt_function(self.Jhat, method="Custom", options = options, constraints = self.dist_constraint, bounds = self.bounds, callback = self.OptPrintFunction, algorithm=om_wrapper, opt_routine=self.opt_routine)
else:
m_opt=opt_function(self.Jhat, method="Custom", options = options, bounds = self.bounds, callback = self.OptPrintFunction, algorithm=om_wrapper, opt_routine=self.opt_routine)
else:
if "layout" in self.control_types:
m_opt=opt_function(self.Jhat, method=self.opt_routine, options = options, constraints = self.dist_constraint, bounds = self.bounds, callback = self.OptPrintFunction)
else:
m_opt=opt_function(self.Jhat, method=self.opt_routine, options = options, bounds = self.bounds, callback = self.OptPrintFunction)
self.m_opt = m_opt
if self.num_controls == 1:
self.m_opt = (self.m_opt,)
self.OptPrintFunction(m_opt)
# self.fprint("Assigning New Values")
# new_values = {}
# m_f = np.array(m_opt,dtype=float)
# if "layout" in self.control_types:
# new_values["x"] = m_f[self.indexes[0]]
# new_values["y"] = m_f[self.indexes[1]]
# if "yaw" in self.control_types:
# new_values["yaw"] = m_f[self.indexes[2]]
# if "axial" in self.control_types:
# new_values["a"] = m_f[self.indexes[3]]
# self.problem.farm.UpdateControls(**new_values)
# self.fprint("Solving With New Values")
# self.solver.Solve()
self.DebugOutput()
self.fprint("Optimization Finished",special="footer")
return self.m_opt
def TaylorTest(self):
self.fprint("Beginning Taylor Test",special="header")
h = []
for i,c in enumerate(self.controls):
h.append(Constant(10))
# h.append(Constant(0.01*max(abs(float(self.bounds[1][i])),abs(float(self.bounds[1][i])))))
# h.append(Constant(10.0*abs(float(self.bounds[1][i])-float(self.bounds[0][i]))/2.0))
# h.append(Constant(0.01*abs(np.mean(self.bounds[1])+np.mean(self.bounds[0]))/2.0))
print(np.array(h,dtype=float))
self.conv_rate = taylor_test(self.Jhat, self.init_vals, h)
self.DebugOutput()
self.fprint("Convergence Rates:")
self.fprint("")
self.fprint(self.conv_rate)
self.fprint("")
self.fprint("Taylor Test Finished",special="footer")
return self.conv_rate
class MinimumDistanceConstraint(InequalityConstraint):
def __init__(self, m_pos, min_distance=200):
self.min_distance = min_distance
self.m_pos = m_pos
# print("In mimimum distance constraint")
def length(self):
nconstraints = comb(len(self.m_pos)/2,2.)
return nconstraints
def function(self, m):
ieqcons = []
m_pos = m
for i in range(int(len(m_pos) / 2)):
for j in range(int(len(m_pos) / 2)):
if j > i:
ieqcons.append(((m_pos[2 * i] - m_pos[2 * j])**2 + (m_pos[2 * i + 1] - m_pos[2 * j + 1])**2) - self.min_distance**2)
arr = np.array(ieqcons)
# print("In mimimum distance constraint function eval")
# print "distances: ", arr*lengthscale
numClose = 0
for i in range(len(arr)):
if arr[i]<0:
# print(arr[i]*lengthscale)
numClose +=1
if numClose > 1:
print("Warning: Number of turbines in violation of spacing constraint: "+repr(numClose))
return np.array(ieqcons)
def jacobian(self, m):
ieqcons = []
m_pos = m
for i in range(int(len(m_pos) / 2)):
for j in range(int(len(m_pos) / 2)):
if j>i:
prime_ieqcons = np.zeros(len(m))
prime_ieqcons[2 * i] = 2 * (m_pos[2 * i] - m_pos[2 * j])
prime_ieqcons[2 * j] = -2 * (m_pos[2 * i] - m_pos[2 * j])
prime_ieqcons[2 * i + 1] = 2 * (m_pos[2 * i + 1] - m_pos[2 * j + 1])
prime_ieqcons[2 * j + 1] = -2 * (m_pos[2 * i + 1] - m_pos[2 * j + 1])
ieqcons.append(prime_ieqcons)
# print("In mimimum distance constraint Jacobian eval")
return np.array(ieqcons) |
import tensorflow as tf
import numpy as np
import subprocess
import json
import pandas as pd
from collections import OrderedDict
def scale_panda(panda, factor, bias):
if isinstance(panda, pd.Series):
filter = panda.index
if isinstance(panda, pd.DataFrame):
filter = panda.columns
panda = factor[filter] * panda + bias[filter]
return panda
def descale_panda(panda, factor, bias):
if isinstance(panda, pd.Series):
filter = panda.index
if isinstance(panda, pd.DataFrame):
filter = panda.columns
panda = (panda - bias[filter]) / factor[filter]
return panda
def descale_variable(a, b, var):
return (var - b) / a
def model_to_json(name, trainable=None, feature_names=None, target_names=None, scale_factor=None, scale_bias=None, train_set=None, settings=None):
"""
trainable: dict with all trainable values (e.g. weights and baises) get this with: `{x.name: tf.to_double(x).eval(session=sess).tolist() for x in tf.trainable_variables()}`
feature_names: List of feature names. Order matters!
target_names: List of target names. Order matters!
scale_factor: Series with the 'a' of y_scaled = a * y + b
scale_bias: Series with the 'b' of y_scaled = a * y + b
train_set: The full DataFrame used for training. To caclulate min/max values
settings: The settings dict used for training. Used to extract the activation functions.
"""
from IPython import embed
nn_dict = OrderedDict()
nn_dict['target_names'] = target_names
nn_dict['feature_names'] = feature_names
nn_dict['hidden_activation'] = settings['hidden_activation']
nn_dict['output_activation'] = settings['output_activation']
feature_a = scale_factor[feature_names].values
feature_b = scale_bias[feature_names].values
target_a = scale_factor[target_names].values
target_b = scale_bias[target_names].values
nn_dict['feature_min'] = OrderedDict(zip(feature_names,
descale_variable(feature_a,
feature_b,
train_set._features.min(axis=0)).tolist()
))
nn_dict['feature_max'] = OrderedDict(zip(feature_names,
descale_variable(feature_a,
feature_b,
train_set._features.max(axis=0)).tolist()
))
nn_dict['target_min'] = OrderedDict(zip(target_names,
descale_variable(target_a,
target_b,
train_set._target.min(axis=0)).tolist()
))
nn_dict['target_max'] = OrderedDict(zip(target_names,
descale_variable(target_a,
target_b,
train_set._target.max(axis=0)).tolist()
))
nn_dict['prescale_factor'] = OrderedDict((name, val) for name, val in scale_factor.items())
nn_dict['prescale_bias'] = OrderedDict((name, val) for name, val in scale_bias.items())
nn_dict.update(trainable)
#nn_dict['target_min'] = OrderedDict(descale_variable())
with open(name, 'w') as file_:
json.dump(nn_dict, file_, indent=4, separators=(',', ': '))
def model_to_json_legacy(name, trainable, feature_names, target_names,
train_set,
feature_scale_factor, feature_scale_bias,
target_scale_factor, target_scale_bias,
l2_scale, settings):
trainable['prescale_factor'] = dict(zip(feature_names + target_names, feature_scale_factor + target_scale_factor))
trainable['prescale_bias'] = dict(zip(feature_names + target_names, feature_scale_bias + target_scale_bias))
trainable['feature_min'] = dict(zip(feature_names, (train_set._features.min() - target_scale_bias) / target_scale_factor))
trainable['feature_min'] = dict(zip(feature_names, (train_set._features.max() - target_scale_bias) / target_scale_factor))
trainable['feature_names'] = feature_names
trainable['target_names'] = target_names
trainable['target_min'] = dict(zip(target_names, (train_set._target.min() - target_scale_bias) / target_scale_factor))
trainable['target_min'] = dict(zip(target_names, (train_set._target.max() - target_scale_bias) / target_scale_factor))
trainable['hidden_activation'] = settings['hidden_activation']
trainable['output_activation'] = settings['output_activation']
#sp_result = subprocess.run('git rev-parse HEAD',
# stdout=subprocess.PIPE,
# shell=True,
# check=True)
#nn_version = sp_result.stdout.decode('UTF-8').strip()
#metadata = {
# 'nn_develop_version': nn_version,
# 'c_L2': float(l2_scale.eval())
#}
#trainable['_metadata'] = metadata
with open(name, 'w') as file_:
json.dump(trainable, file_, sort_keys=True, indent=4, separators=(',', ': '))
def weight_variable(shape, init='normsm_1_0', dtype=tf.float64, **kwargs):
"""Create a weight variable with appropriate initialization."""
initial = parse_init(shape, init, dtype=dtype, **kwargs)
return tf.Variable(initial)
def parse_init(shape, init, dtype=np.float64, **kwargs):
if isinstance(init, str) or isinstance(init, unicode):
if init.startswith('normsm'):
__, s, m = init.split('_')
initial = tf.random_normal(shape, dtype=dtype, mean=float(m), stddev=float(s), **kwargs)
elif init == 'glorot_normal':
initial = tf.glorot_normal_initializer()(shape)
elif isinstance(init, np.ndarray):
initial = tf.constant(init, dtype=dtype)
try:
initial
except:
raise Exception('Could not parse init {!s}'.format(init))
return initial
def bias_variable(shape, init='normsm_1_0', dtype=tf.float64, **kwargs):
"""Create a bias variable with appropriate initialization."""
initial = parse_init(shape, init, dtype=dtype, **kwargs)
return tf.Variable(initial)
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization).
"""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def nn_layer(input_tensor, output_dim, layer_name, act=tf.nn.relu,
dtype=tf.float32, debug=False, weight_init='normsm_1_0', bias_init='normsm_1_0'):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses relu to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
input_dim = input_tensor.get_shape().as_list()[1]
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim], init=weight_init, dtype=dtype)
if debug:
variable_summaries(weights)
with tf.name_scope('biases'):
biases = bias_variable([output_dim], dtype=dtype, init=bias_init)
if debug:
variable_summaries(biases)
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if debug:
tf.summary.histogram('pre_activations', preactivate)
if act is not None:
activations = act(preactivate, name='activation')
else:
activations = preactivate
if debug:
tf.summary.histogram('activations', activations)
return activations
def normab(panda, a, b):
factor = (b - a) / (panda.max() - panda.min())
bias = (b - a) * panda.min() / (panda.max() - panda.min()) + a
return factor, bias
def normsm(panda, s_t, m_t):
m_s = np.mean(panda)
s_s = np.std(panda)
factor = s_t / s_s
bias = -m_s * s_t / s_s + m_t
return factor, bias
|
import cartography.intel.aws.ec2
import tests.data.aws.ec2.key_pairs
TEST_ACCOUNT_ID = '000000000000'
TEST_REGION = 'us-east-1'
TEST_UPDATE_TAG = 123456789
def test_load_ec2_key_pairs(neo4j_session, *args):
data = tests.data.aws.ec2.key_pairs.DESCRIBE_KEY_PAIRS
cartography.intel.aws.ec2.load_ec2_key_pairs(
neo4j_session,
data,
TEST_REGION,
TEST_ACCOUNT_ID,
TEST_UPDATE_TAG,
)
expected_nodes = {
(
"arn:aws:ec2:us-east-1:000000000000:key-pair/sample_key_pair_1",
"11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11",
),
(
"arn:aws:ec2:us-east-1:000000000000:key-pair/sample_key_pair_2",
"22:22:22:22:22:22:22:22:22:22:22:22:22:22:22:22:22:22:22:22",
),
(
"arn:aws:ec2:us-east-1:000000000000:key-pair/sample_key_pair_3",
"33:33:33:33:33:33:33:33:33:33:33:33:33:33:33:33:33:33:33:33",
),
}
nodes = neo4j_session.run(
"""
MATCH (k:EC2KeyPair) return k.arn, k.keyfingerprint
"""
)
actual_nodes = {
(
n['k.arn'],
n['k.keyfingerprint'],
)
for n in nodes
}
assert actual_nodes == expected_nodes
|
__all__ = ["acquisition", "config", "exploration", "ncut", "ntds_utils",
"preprocessing", "utils", "subpackage"] |
#!/usr/bin/env python3
import math
import numpy as np
import tinyarray as ta
from sph_core import SPH_APP, Point, domain, mid_domain, norm
dx = 1.0
density0 = dx**(-len(domain))
stiffness = 1.0e2
viscosity = 1.0e0
nudge = 0.0
k = 7
gravitiy = 1.0e2
ca = math.sqrt(stiffness*k/density0)
print(ca)
class LiquidPoint(Point):
def force(self, model):
res = ta.array([0.0, 0.0])
res += -model.gradient('pressure', self)
if viscosity:
res += viscosity*model.laplace('velocity', self)
if nudge:
res -= self.velocity*nudge
if model.t < 1.0:
res -= self.velocity*1.0
res = res*ta.array([0.0, 10.0])
res += ta.array([0.0, gravitiy])
return res
@property
def color(self):
return '#FF0000' # if self.velocity[0] > 0 else '#FFFF00'
@property
def pressure(self):
return stiffness*((self.density/density0)**k - 1)
class Wall(LiquidPoint):
def __init__(self, position):
velocity = ta.zeros(len(position))
super().__init__(position, velocity)
def force(self, model):
res = ta.array([0.0, 0.0])
return res
@property
def color(self):
return '#FFFFFF'
def create_wall(*positions):
positions = list(map(ta.array, positions))
end = positions[0]
dist = 0.95*dx
for i in range(len(positions) - 1):
start = end
end = (positions[i+1] - positions[i]) + start
length = norm(start-end)
unit = (end - start)/length
num = math.ceil(length/dist)
for k in range(num):
yield Wall(start + unit*(dist*k))
end = start + unit*dist*k
def main():
walls = tuple(create_wall(
(63.5, 0.5),
(63.5, 31.5),
(0.5, 31.5),
(0.5, 0.5),
))
wallxmin = min(wall.position[0] for wall in walls)
wallymax = max(wall.position[1] for wall in walls)
xs = np.arange(wallxmin + dx, 16.0, dx)
ys = np.arange(2.0, wallymax - dx, dx)
vmax = 8.0
vxs = np.random.uniform(-vmax*1e-3, vmax*1e-3, len(ys))
vys = np.random.uniform(-vmax*1e-3, vmax*1e-3, len(xs))
xs, ys = map(lambda a: np.reshape(a, (-1, )), np.meshgrid(xs, ys))
vys, vxs = map(lambda a: np.reshape(a, (-1, )), np.meshgrid(vys, vxs))
positions = map(ta.array, map(list, zip(xs, ys)))
velocities = map(ta.array, map(list, zip(vxs, vys)))
points = tuple(map(LiquidPoint, positions, velocities))
points = points + walls
model = SPH_APP(points, dt=0.002, nsnapshot=10)
model.run()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import sys
import numpy as np
argument_list_length = len(sys.argv)
if argument_list_length != 2 and argument_list_length != 4:
print " "
print " Program returns average FWHM of the stellar profile"
print " Usage:"
print " ./fwhm.py <input_data_file> [flux_min] [flux_max]"
print " "
print " Important requirement:"
print " input data file have to be an oputput of peak program"
print " part of the ESO ECLIPSE package, use: peak -F -P '8 25 35'"
print " ZK, ver. 2017-11-14"
print " "
exit()
n_sd=2.7 # scaling factor for sigma-clipping
n_it=5 # number of iterations
flux_min=1000.0
flux_max=30000.0
id_r=[]
phot_data = sys.argv[1]
if argument_list_length == 4:
flux_min = float(sys.argv[2])
flux_max = float(sys.argv[3])
f1 = open(phot_data, 'r')
fmax, fx, fy = np.loadtxt(f1, usecols=(8,9,10), unpack=True)
f1.close()
for j in range(len(fmax)):
if fmax[j]<flux_min or fmax[j]>flux_max:
id_r.append(j)
if fx[j]<0.0 or fy[j]<0.0:
id_r.append(j)
fx_c=np.delete(fx, id_r)
fy_c=np.delete(fy, id_r)
av_fx=np.average(fx_c)
sd_fx=np.std(fx_c)
av_fy=np.average(fy_c)
sd_fy=np.std(fy_c)
for k in range(n_it):
fx_up_l=av_fx+(n_sd*sd_fx)
fx_low_l=av_fx-(n_sd*sd_fx)
fy_up_l=av_fy+(n_sd*sd_fy)
fy_low_l=av_fy-(n_sd*sd_fy)
for l in range(len(fx)):
if fx[l]>fx_up_l or fx[l]<fx_low_l:
id_r.append(l)
if fy[l]>fy_up_l or fy[l]<fy_low_l:
id_r.append(l)
fx_c=np.delete(fx, id_r)
fy_c=np.delete(fy, id_r)
av_fx=np.average(fx_c)
sd_fx=np.std(fx_c)
av_fy=np.average(fy_c)
sd_fy=np.std(fy_c)
print "%5d %7.3f %7.3f %7.3f" % (len(fx_c), av_fx, av_fy, (av_fx+av_fy)/2.0)
|
# Copyright (c) 2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from neutron.tests import base
from networking_cisco import backwards_compatibility as bc
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
exceptions as excep)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
nexus_restapi_network_driver as rest_driver)
from networking_cisco.plugins.ml2.drivers.cisco.nexus.extensions import (
cisco_providernet)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus
from networking_cisco.plugins.ml2.drivers.cisco.nexus import nexus_db_v2
from networking_cisco.plugins.ml2.drivers.cisco.nexus import nexus_models_v2
from neutron.plugins.ml2 import driver_api as api
from neutron.tests.unit import testlib_api
NETWORK_ID = 'network_id'
VLAN_ID = 'vlan_id'
DEVICE_ID = 'device_id'
HOST_ID = 'host_id'
PORT_ID = 'port_id'
IP_ADDR = 'ipaddr'
INTF_TYPE = 'intf_type'
NEXUS_PORT = 'nexus_port'
IS_NATIVE = True
NO_VNI = 0
NETWORK = {'id': NETWORK_ID,
'is_provider_network': True,
api.NETWORK_TYPE: 'vlan',
api.SEGMENTATION_ID: VLAN_ID,
bc.providernet.SEGMENTATION_ID: VLAN_ID}
PORT = {'device_id': DEVICE_ID,
bc.portbindings.VNIC_TYPE: 'normal',
bc.portbindings.HOST_ID: HOST_ID}
class TestCiscoNexusProvider(testlib_api.SqlTestCase):
"""Test the provider network code added to the cisco nexus MD."""
def setUp(self):
super(TestCiscoNexusProvider, self).setUp()
self._nexus_md = mech_cisco_nexus.CiscoNexusMechanismDriver()
self._nexus_md._get_port_uuid = mock.Mock(return_value='test_uuid')
self._func = mock.Mock()
self.context = mock.Mock()
self.context.current = NETWORK
def test_create_network(self):
self._nexus_md.create_network_precommit(self.context)
self.assertTrue(nexus_db_v2.is_provider_network(NETWORK_ID))
self.assertTrue(nexus_db_v2.is_provider_vlan(VLAN_ID))
def test_create_network_no_provider(self):
NETWORK_NO_PROVIDER = NETWORK.copy()
del NETWORK_NO_PROVIDER['is_provider_network']
self.context.current = NETWORK_NO_PROVIDER
self._nexus_md.create_network_precommit(self.context)
self.assertFalse(nexus_db_v2.is_provider_network(NETWORK_ID))
self.assertFalse(nexus_db_v2.is_provider_vlan(VLAN_ID))
def test_create_network_false_provider(self):
NETWORK_FALSE_PROVIDER = NETWORK.copy()
NETWORK_FALSE_PROVIDER['is_provider_network'] = False
self.context.current = NETWORK_FALSE_PROVIDER
self._nexus_md.create_network_precommit(self.context)
self.assertFalse(nexus_db_v2.is_provider_network(NETWORK_ID))
self.assertFalse(nexus_db_v2.is_provider_vlan(VLAN_ID))
def test_delete_network(self):
self._nexus_md.create_network_precommit(self.context)
self._nexus_md.delete_network_postcommit(self.context)
self.assertFalse(nexus_db_v2.is_provider_network(NETWORK_ID))
self.assertFalse(nexus_db_v2.is_provider_vlan(VLAN_ID))
def test_delete_network_no_id(self):
mock_subport_get_object = mock.patch.object(
nexus_db_v2, 'delete_provider_network').start()
self._nexus_md.delete_network_postcommit(self.context)
self.assertFalse(mock_subport_get_object.call_count)
def test_port_action_vlan_provider(self):
self._nexus_md.create_network_precommit(self.context)
self._nexus_md._port_action_vlan(PORT, NETWORK, self._func, 0)
self._func.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY, True)
def test_port_action_vlan_no_provider(self):
self._nexus_md._port_action_vlan(PORT, NETWORK, self._func, 0)
self._func.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, mock.ANY, mock.ANY, False)
class TestCiscoNexusProviderExtension(base.BaseTestCase):
"""Test the provider network extension class used by the cisco nexus MD."""
def setUp(self):
super(TestCiscoNexusProviderExtension, self).setUp()
self._context = mock.Mock()
self._data = {}
self._result = {}
self._provider_net_driver = cisco_providernet.CiscoProviderNetDriver()
def test_extension_alias(self):
self.assertTrue(self._provider_net_driver.extension_alias ==
'provider')
def test_create_network_vlan(self):
self._data[bc.providernet.SEGMENTATION_ID] = VLAN_ID
self._provider_net_driver.process_create_network(
self._context, self._data, self._result)
self.assertTrue(self._result['is_provider_network'])
def test_create_network_no_vlan(self):
self._provider_net_driver.process_create_network(
self._context, self._data, self._result)
self.assertFalse(self._result.get('is_provider_network'))
def test_create_network_none_vlan(self):
self._data[bc.providernet.SEGMENTATION_ID] = None
self._provider_net_driver.process_create_network(
self._context, self._data, self._result)
self.assertFalse(self._result.get('is_provider_network'))
class TestCiscoNexusProviderConfiguration(base.BaseTestCase):
"""Test the provider network configuration used by the cisco nexus MD."""
def setUp(self):
super(TestCiscoNexusProviderConfiguration, self).setUp()
mock.patch.object(rest_driver.CiscoNexusRestapiDriver,
'__init__', return_value=None).start()
self._nexus_md = mech_cisco_nexus.CiscoNexusMechanismDriver()
self._nexus_md.set_switch_ip_and_active_state = mock.Mock()
self._nexus_md.configure_next_batch_of_vlans = mock.Mock()
self._nexus_md.driver = rest_driver.CiscoNexusRestapiDriver()
self._nexus_md.driver.capture_and_print_timeshot = mock.Mock()
self._get_active_host_connections_mock = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_get_active_port_connections',
return_value=[[IP_ADDR, INTF_TYPE, NEXUS_PORT, IS_NATIVE,
None]]).start()
self._save_switch_vlan_range_mock = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_save_switch_vlan_range').start()
self._restore_port_binding_mock = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_restore_port_binding').start()
self._create_and_trunk_vlan_mock = mock.patch.object(
rest_driver.CiscoNexusRestapiDriver,
'create_and_trunk_vlan').start()
self._create_vlan_mock = mock.patch.object(
rest_driver.CiscoNexusRestapiDriver,
'create_vlan').start()
self._send_enable_vlan_on_trunk_int_mock = mock.patch.object(
rest_driver.CiscoNexusRestapiDriver,
'send_enable_vlan_on_trunk_int').start()
self._disable_vlan_on_trunk_int_mock = mock.patch.object(
rest_driver.CiscoNexusRestapiDriver,
'disable_vlan_on_trunk_int').start()
self._get_nexusvlan_binding_mock = mock.patch.object(
nexus_db_v2, 'get_nexusvlan_binding').start()
self._is_provider_vlan_mock = mock.patch.object(
nexus_db_v2, 'is_provider_vlan').start()
mock.patch.object(
nexus_db_v2, 'get_port_vlan_switch_binding',
side_effect=excep.NexusPortBindingNotFound).start()
mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_delete_port_channel_resources').start()
def _set_provider_configuration(self, auto_create, auto_trunk):
cfg.CONF.set_override('provider_vlan_auto_create', auto_create,
'ml2_cisco')
cfg.CONF.set_override('provider_vlan_auto_trunk', auto_trunk,
'ml2_cisco')
def _test_pnet_configure(
self, auto_create, auto_trunk, is_provider_vlan=True):
self._set_provider_configuration(auto_create, auto_trunk)
self._nexus_md._configure_port_binding(
is_provider_vlan, None, IS_NATIVE, IP_ADDR, VLAN_ID, INTF_TYPE,
NEXUS_PORT, NO_VNI)
def _test_pnet_delete(
self, auto_create, auto_trunk, is_provider_vlan=True):
self._set_provider_configuration(auto_create, auto_trunk)
self._nexus_md._delete_switch_entry(
PORT, VLAN_ID, DEVICE_ID, HOST_ID, NO_VNI, is_provider_vlan)
def _test_pnet_replay(
self, auto_create, auto_trunk, is_provider_vlan=True):
self._set_provider_configuration(auto_create, auto_trunk)
self._is_provider_vlan_mock.return_value = is_provider_vlan
port_bindings = nexus_models_v2.NexusPortBinding(
port_id=PORT_ID, vlan_id=VLAN_ID, vni=NO_VNI, switch_ip=IP_ADDR,
instance_id=0, is_native=False, channel_group=0)
self._nexus_md.configure_switch_entries(IP_ADDR, [port_bindings])
def test_pnet_configure_create_and_trunk(self):
self._test_pnet_configure(auto_create=True, auto_trunk=True)
self._create_and_trunk_vlan_mock.assert_called_once_with(
IP_ADDR, VLAN_ID, INTF_TYPE, NEXUS_PORT, NO_VNI, True)
self.assertFalse(self._create_vlan_mock.call_count)
self.assertFalse(self._send_enable_vlan_on_trunk_int_mock.call_count)
def test_pnet_configure_create(self):
self._test_pnet_configure(auto_create=True, auto_trunk=False)
self.assertFalse(self._create_and_trunk_vlan_mock.call_count)
self._create_vlan_mock.assert_called_once_with(
IP_ADDR, VLAN_ID, NO_VNI)
self.assertFalse(self._send_enable_vlan_on_trunk_int_mock.call_count)
def test_pnet_configure_trunk(self):
self._test_pnet_configure(auto_create=False, auto_trunk=True)
self.assertFalse(self._create_and_trunk_vlan_mock.call_count)
self.assertFalse(self._create_vlan_mock.call_count)
self._send_enable_vlan_on_trunk_int_mock.assert_called_once_with(
IP_ADDR, VLAN_ID, INTF_TYPE, NEXUS_PORT, True)
def test_pnet_configure_not_providernet(self):
self._test_pnet_configure(
auto_create=False, auto_trunk=False, is_provider_vlan=False)
self._create_and_trunk_vlan_mock.assert_called_once_with(
IP_ADDR, VLAN_ID, INTF_TYPE, NEXUS_PORT, NO_VNI, True)
self.assertFalse(self._create_vlan_mock.call_count)
self.assertFalse(self._send_enable_vlan_on_trunk_int_mock.call_count)
def test_pnet_delete_create_and_trunk(self):
self._test_pnet_delete(auto_create=True, auto_trunk=True)
self._disable_vlan_on_trunk_int_mock.assert_called_once_with(
IP_ADDR, VLAN_ID, INTF_TYPE, NEXUS_PORT, IS_NATIVE)
self._get_nexusvlan_binding_mock.assert_called_once_with(
VLAN_ID, IP_ADDR)
def test_pnet_delete_trunk(self):
self._test_pnet_delete(auto_create=False, auto_trunk=True)
self._disable_vlan_on_trunk_int_mock.assert_called_once_with(
IP_ADDR, VLAN_ID, INTF_TYPE, NEXUS_PORT, IS_NATIVE)
self.assertFalse(self._get_nexusvlan_binding_mock.call_count)
def test_pnet_delete_create(self):
self._test_pnet_delete(auto_create=True, auto_trunk=False)
self.assertFalse(self._disable_vlan_on_trunk_int_mock.call_count)
self._get_nexusvlan_binding_mock.assert_called_once_with(
VLAN_ID, IP_ADDR)
def test_pnet_delete_not_providernet(self):
self._test_pnet_delete(
auto_create=False, auto_trunk=False, is_provider_vlan=False)
self._disable_vlan_on_trunk_int_mock.assert_called_once_with(
IP_ADDR, VLAN_ID, INTF_TYPE, NEXUS_PORT, IS_NATIVE)
self._get_nexusvlan_binding_mock.assert_called_once_with(
VLAN_ID, IP_ADDR)
def test_pnet_replay_not_providernet(self):
self._test_pnet_replay(
auto_create=False, auto_trunk=False, is_provider_vlan=False)
self._save_switch_vlan_range_mock.assert_called_once_with(
IP_ADDR, [(VLAN_ID, NO_VNI)])
self._restore_port_binding_mock.assert_called_once_with(
IP_ADDR, set([VLAN_ID]), mock.ANY, mock.ANY)
def test_pnet_replay_providernet_create_and_trunk(self):
self._test_pnet_replay(
auto_create=True, auto_trunk=True, is_provider_vlan=True)
self._save_switch_vlan_range_mock.assert_called_once_with(
IP_ADDR, [(VLAN_ID, NO_VNI)])
self._restore_port_binding_mock.assert_called_once_with(
IP_ADDR, set([VLAN_ID]), mock.ANY, mock.ANY)
def test_pnet_replay_providernet_create(self):
self._test_pnet_replay(
auto_create=True, auto_trunk=False, is_provider_vlan=True)
self._save_switch_vlan_range_mock.assert_called_once_with(
IP_ADDR, [(VLAN_ID, NO_VNI)])
self.assertFalse(self._restore_port_binding_mock.call_count)
def test_pnet_replay_providernet_trunk(self):
self._test_pnet_replay(
auto_create=False, auto_trunk=True, is_provider_vlan=True)
self.assertFalse(self._save_switch_vlan_range_mock.call_count)
self._restore_port_binding_mock.assert_called_once_with(
IP_ADDR, set([VLAN_ID]), mock.ANY, mock.ANY)
|
from yacs.config import CfgNode
def get_cfg() -> CfgNode:
from .default import _C
return _C.clone()
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg |
"""
Some utilities.
"""
from __future__ import unicode_literals
import array
import fcntl
import os
import termios
__all__ = (
'pty_make_controlling_tty',
'set_terminal_size',
'nonblocking',
)
def pty_make_controlling_tty(tty_fd):
"""
This makes the pseudo-terminal the controlling tty. This should be
more portable than the pty.fork() function. Specifically, this should
work on Solaris.
Thanks to pexpect:
http://pexpect.sourceforge.net/pexpect.html
"""
child_name = os.ttyname(tty_fd)
# Disconnect from controlling tty. Harmless if not already connected.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
if fd >= 0:
os.close(fd)
# which exception, shouldnt' we catch explicitly .. ?
except:
# Already disconnected. This happens if running inside cron.
pass
os.setsid()
# Verify we are disconnected from controlling tty
# by attempting to open it again.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
if fd >= 0:
os.close(fd)
raise Exception('Failed to disconnect from controlling '
'tty. It is still possible to open /dev/tty.')
# which exception, shouldnt' we catch explicitly .. ?
except:
# Good! We are disconnected from a controlling tty.
pass
# Verify we can open child pty.
fd = os.open(child_name, os.O_RDWR)
if fd < 0:
raise Exception("Could not open child pty, " + child_name)
else:
os.close(fd)
# Verify we now have a controlling tty.
if os.name != 'posix':
# Skip this on BSD-like systems since it will break.
fd = os.open("/dev/tty", os.O_WRONLY)
if fd < 0:
raise Exception("Could not open controlling tty, /dev/tty")
else:
os.close(fd)
def set_terminal_size(stdout_fileno, rows, cols):
"""
Set terminal size.
(This is also mainly for internal use. Setting the terminal size
automatically happens when the window resizes. However, sometimes the
process that created a pseudo terminal, and the process that's attached to
the output window are not the same, e.g. in case of a telnet connection, or
unix domain socket, and then we have to sync the sizes by hand.)
"""
# Buffer for the C call
# (The first parameter of 'array.array' needs to be 'str' on both Python 2
# and Python 3.)
buf = array.array(str('h'), [rows, cols, 0, 0])
# Do: TIOCSWINSZ (Set)
fcntl.ioctl(stdout_fileno, termios.TIOCSWINSZ, buf)
class nonblocking(object):
"""
Make fd non blocking.
"""
def __init__(self, fd):
self.fd = fd
def __enter__(self):
self.orig_fl = fcntl.fcntl(self.fd, fcntl.F_GETFL)
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl | os.O_NONBLOCK)
def __exit__(self, *args):
fcntl.fcntl(self.fd, fcntl.F_SETFL, self.orig_fl)
|
"""user_fuel_database.py
Create your own fuel database
"""
from pyrolib.fuelmap import FuelDatabase, FuelMap, BalbiFuel
# create fuel db
my_db = FuelDatabase()
# create fuel corresponding to your needs
short_grass_balbi = BalbiFuel(e=0.3)
tall_grass_balbi = BalbiFuel(e=0.3)
# add fuels to the database
## the key BalbiFuel means you are adding a BalbiFuel
my_db['short_grass'] = {"BalbiFuel": short_grass_balbi}
my_db['tall_grass'] = {"BalbiFuel": tall_grass_balbi}
# # save database
my_db.dump_database(
filename = "user_db",
info = "some information about the db",
compact = True
) |
"""
Get a raw dataset with gps coordinates and push dump it into a dictionnary with format:
dict(
column_name -> dict(
code -> value
)
)
Each of these dictionnary must have a 'lat' and a 'lon' column (latitude, longitude).
So, if you want to check the coordinates of the city with postal code 75014, you can get them using:
```
import dill
with open('data/prepared/insee_codes.pkl', 'rb') as file_id:
postal_codes = dill.load(file_id)
lat = postal_codes['lat']['75114']
lon = postal_codes['lon']['75114']
print(lat, lon)
```
All of the output files are put into the /data/prepared folder
"""
import os
import dill
import pandas as pd
RAW_DATA_PATH = "/data/raw/chorus-dt"
PREPARED_DATA_PATH = "/data/prepared"
def prepare_insee():
"""
Prepare the postal codes to gps coordinates data using laposte dataset available here:
here: https://www.data.gouv.fr/en/datasets/base-officielle-des-codes-postaux/
"""
insee_geocode = pd.read_csv(os.path.join(RAW_DATA_PATH, "laposte_hexasmal.csv"), delimiter=";")
insee_geocode = insee_geocode.set_index("Code_commune_INSEE").dropna(subset=["coordonnees_gps"])
insee_geocode[["lat", "lon"]] = insee_geocode["coordonnees_gps"].str.split(",", expand=True)
insee_geocode = insee_geocode.to_dict()
with open(os.path.join(PREPARED_DATA_PATH, "insee_codes.pkl"), "wb") as file_id:
dill.dump(insee_geocode, file_id)
def prepare_uic():
"""
Prepare the UIC codes using data available here : https://ressources.data.sncf.com/explore/dataset/liste-des-gares/table/
The UIC codes in the chorus dump are truncated UIC codes.
"""
stations_list = pd.read_csv(os.path.join(RAW_DATA_PATH, "liste-des-gares.csv"), delimiter=";")
stations_list["truncated_code_uic"] = stations_list["CODE_UIC"].apply(lambda x: int(str(x)[:-1]))
stations_list = stations_list.set_index("truncated_code_uic").dropna(subset=["C_GEO"])
stations_list.rename(columns={"X_WGS84": "lon", "Y_WGS84": "lat"}, inplace=True)
stations_list = stations_list.to_dict()
with open(os.path.join(PREPARED_DATA_PATH, "uic_codes.pkl"), "wb") as file_id:
dill.dump(stations_list, file_id)
def prepare_tvs():
"""
Get the TVS codes available here:
https://ressources.data.sncf.com/explore/dataset/referentiel-gares-voyageurs
This is not use right now as it seems the chorus dump is not very well filled, for example, we have this kind of rows:
TR_FRECO - Quimperle
But ECO is the TVS code of Écommoy, and the TVS code of Quimplerle is QPL
"""
tvs_codes = pd.read_csv(os.path.join(RAW_DATA_PATH, "referentiel-gares-voyageurs.csv"), delimiter=";")
tvs_codes = tvs_codes.dropna(subset=["TVS", "WGS 84"])
tvs_codes["extended_TVS"] = "TR_FR" + tvs_codes["TVS"]
tvs_codes = tvs_codes.set_index("extended_TVS")
tvs_codes[["lat", "lon"]] = tvs_codes["WGS 84"].str.split(",", expand=True)
tvs_codes = tvs_codes.to_dict()
with open(os.path.join(PREPARED_DATA_PATH, "tvs_codes.pkl"), "wb") as file_id:
dill.dump(tvs_codes, file_id)
def prepare_iata():
"""
Get the IATA codes available here:
'https://ourairports.com/data/airports.csv'
Some IATA coded-airports lack geocoding info
"""
iata_codes = pd.read_csv(os.path.join(RAW_DATA_PATH, "airports.csv")).set_index("iata_code")
iata_codes = iata_codes.loc[~iata_codes.latitude_deg.isna() & ~iata_codes.longitude_deg.isna()]
iata_codes[["lat", "lon"]] = iata_codes[["latitude_deg", "longitude_deg"]]
iata_codes = iata_codes.to_dict()
with open(os.path.join(PREPARED_DATA_PATH, "iata_codes.pkl"), "wb") as file_id:
dill.dump(iata_codes, file_id)
def main():
prepare_uic()
prepare_insee()
prepare_tvs()
prepare_iata()
if __name__ == "__main__":
main()
|
import os
import shutil
from platform import system
from urllib.request import urlretrieve
from zipfile import ZipFile
class OCSIUpdater:
def __init__(self):
self.data = []
self.local = []
self.zip = "https://github.com/KernelWanderers/OCSysInfo/archive/main.zip"
self.delim = "\\" if system().lower() == "windows" else "/"
self.root = self.delim.join(
os.path.dirname(__file__).split(self.delim)[:-1]
)
def run(self):
self.obtain_updated()
for path in ["main.py", "src", "update"]:
self.obtain_relative(path)
self.handle_diffs()
def handle_diffs(self):
if not self.data or not self.local:
return
self.handle_diff(self.local, self.data)
def handle_diff(self, parent, to_cmp):
matched = [{"matched": False} | x for x in to_cmp]
for value in parent:
found = False
abs_lcl = os.path.join(self.root, value.get("path"))
for cmp in to_cmp:
conditions = {
"contents": cmp.get("contents", "x") == value.get("contents", "y"),
"name": cmp.get("name", "x") == value.get("name", "y"),
"path": cmp.get("path", "x") == value.get("path", "y"),
"dir": os.path.dirname(cmp.get("path", "/x/z")) == os.path.dirname(value.get("path", "/y/q"))
}
match_found = list(
filter(
lambda x: x.get("name", "x") == cmp.get("name", "y") and x.get(
"path", "/x/z") == cmp.get("path", "/y/q"),
matched
)
)
abs_cmp = os.path.join(self.root, cmp.get("path"))
# File was edited
if (
not conditions["contents"] and
conditions["name"] and
conditions["dir"]
):
found = True
if match_found:
match_found[0]["matched"] = True
try:
print(f"Editing {value.get('name')}...")
with open(abs_lcl, "w") as file:
file.seek(0)
file.write(cmp.get("contents"))
file.truncate()
file.close()
print(f"Successfully edited {value.get('name')}!\n")
except Exception as e:
print(
f"Failed to edit {value.get('name')}!\n\t^^^^^^^{str(e)}\n")
break
# File was renamed
elif (
conditions["contents"] and
not conditions["name"] and
conditions["dir"]
):
found = True
if match_found:
match_found["matched"] += 1
try:
print(
f"Attempting to rename '{os.path.basename(abs_lcl)}' to '{os.path.basename(abs_cmp)}'...")
os.rename(abs_lcl, abs_cmp)
print(
f"Successfully renamed '{os.path.basename(abs_lcl)}' to '{os.path.basename(abs_cmp)}'!\n")
except Exception as e:
print(
f"Failed to rename '{os.path.basename(abs_lcl)}' to '{os.path.basename(abs_cmp)}'!\n\t^^^^^^^{str(e)}\n")
break
# File was moved
elif (
conditions["contents"] and
conditions["name"] and
not conditions["dir"]
):
found = True
if match_found:
match_found[0]["matched"] = True
try:
print(f"Deleting {abs_lcl} and creating {abs_cmp}...")
os.remove(abs_lcl)
with open(abs_cmp, "w") as file:
file.write(cmp.get("contents"))
file.close()
print(
f"Successfully deleted '{abs_lcl}' and created '{abs_cmp}'!\n")
except Exception as e:
print(
f"Failed to delete '{abs_lcl}' and create '{abs_cmp}'!\n\t^^^^^^^{str(e)}\n")
break
# If the file stays the same,
# do nothing.
elif (
conditions["contents"] and
conditions["name"] and
conditions["dir"]
):
if match_found:
match_found[0]["matched"] = True
found = True
break
# If nothing matches,
# it means the file was removed.
if not found:
try:
print(f"Deleting {abs_lcl}...")
os.remove(abs_lcl)
print(f"Successfully deleted {abs_lcl}!\n")
except Exception as e:
print(f"Failed to delete {abs_lcl}!\n\t^^^^^^^{str(e)}\n")
# “Clever” way of determining
# whether or not new files were pushed
# to the repository.
for match in matched:
if not match["matched"]:
try:
print(f"Creating '{match.get('name')}'...")
with open(os.path.join(self.root, match.get('path')), 'w') as file:
file.write(match.get('contents'))
file.close()
print(
f"Successfully created '{match.get('name')}' at '{match.get('path')}'!")
except Exception as e:
print(
f"Failed to create '{match.get('name')}'!\n\t^^^^^^^{str(e)}")
continue
def obtain_relative(self, path="src", o_type="local"):
if (
"__pycache__" in path.lower() and
os.path.isdir(os.path.join(self.root, path))
):
return
pp = os.path.join(
self.root,
path
)
if os.path.isfile(pp):
data = {
"name": path.split(self.delim)[-1],
"path": pp.split(f"OCSysInfo{self.delim}")[-1].split(f"OCSysInfo-main{self.delim}")[-1],
"contents": open(pp, "r").read()
}
if o_type == "github":
self.data.append(data)
else:
self.local.append(data)
return
dir = os.listdir(pp)
for item in dir:
abs_path = os.path.join(
self.root,
path,
item
)
if os.path.isdir(abs_path):
self.obtain_relative(os.path.join(path, item), o_type)
continue
elif (
".ds_store" in item.lower() and
os.path.isfile(abs_path)
):
continue
to_split = f"src{self.delim}" if f"{self.delim}src{self.delim}" in abs_path else f"update{self.delim}"
data = {
"name": item,
"path": to_split + abs_path.split(to_split)[1],
"contents": open(abs_path, "r").read()
}
if o_type == "github":
self.data.append(data)
else:
self.local.append(data)
def obtain_updated(self):
try:
update_dir = os.path.join(
self.root,
"UpdateTemp"
)
if not os.path.isdir(update_dir):
os.mkdir(update_dir)
path = os.path.join(
update_dir,
"OCSysInfo_Update.zip"
)
print("Downloading ZIP file...")
urlretrieve(
self.zip,
path
)
print("Successfully downloaded ZIP!\n")
except Exception:
print("[CONERROR]: Unable to download ZIP, ignoring – cancelling...\n")
self.data = []
return -1
with ZipFile(path, "r") as zip:
zip.extractall(update_dir)
zip.close()
name = ([x for x in os.listdir(update_dir)
if "ocsysinfo" in x.lower()] + [-1])[0]
if name == -1:
self.data = []
return
for path in ["main.py", "src", "update"]:
self.obtain_relative(
path=os.path.join(
"UpdateTemp",
name,
path
),
o_type="github"
)
# Remove temporary update directory
# after we're finished.
shutil.rmtree(
os.path.join(
self.root,
"UpdateTemp"
)
)
|
def find_minimum_number_of_moves(rows, cols, start_row, start_col, end_row, end_col):
# row_low = start_row if start_row < end_row else end_row
# row_high = start_row if start_row > end_row else end_row
# col_low = start_col if start_col < end_col else end_col
# col_high = start_col if start_col > end_col else end_col
deltas = [(-2, -1), (-2, +1), (+2, -1), (+2, +1), (-1, -2), (-1, +2), (+1, -2), (+1, +2)]
def getAllValidMoves(y0, x0):
validPositions = []
for (x, y) in deltas:
xCandidate = x0 + x
yCandidate = y0 + y
if 0 <= xCandidate < end_col and 0 <= yCandidate < end_row:
validPositions.append([yCandidate, xCandidate])
return validPositions
q = [(start_row, start_col, 0)]
while q:
row, col, level = q.pop(0)
if row == end_row and col == end_col:
return level
for move in getAllValidMoves(row, col):
# if move[1] >= row_low and move[1] <= row_high or move[0] >= col_low and move[0] <= col_high:
q.append((move[0], move[1], level + 1))
return -1
|
# Module: eapenum.py
# Description: Enumerates insecure Extensible Authentication Protocol (EAP) user identities.
# The Eapenum module will perform a deauthentication attack against a single access point/BSSID,
# while client probes attempt to reconnect Eapenum will sniff for insecure EAP user identities.
# Author(s): Nick Sanzotta
# Version: v 1.09282017
try:
import os, sys, signal, threading
from datetime import datetime
from scapy.all import *
from theme import *
from dbcommands import DB
from helpers import monitormode
except Exception as e:
print('\n [!] EAPENUM - Error: ' % (e))
sys.exit(1)
# Keep outside of class eapEnum()
identities = set()
bssid = set()
class eapEnum(threading.Thread):
def __init__(self, db_path, apmac, timeout, interface, channel):
threading.Thread.__init__(self)
self.setDaemon(0) # non-daemon
self.apmac = apmac
self.timeout = timeout # must be >0, if you choose not to include a timeout the args must me removed.
self.interface = interface
self.channel = channel
self.counter = 0
self.userDict = {}
self.wifiDict = {}
self.future = time.time()+10
self.wirelessInt = str(self.interface.get_ifname())
self.log_timestamp = '{:%Y-%m-%d_%H:%M:%S}'.format(datetime.now())
self.identities_log = 'data/identities/ch%s_%s.%s' % (self.channel, self.apmac, self.log_timestamp)
self.db_path = db_path
def run(self):
self.database_connect() # Connect to database
self.datafolders_check() # Check Identities folder exists
try:
print(normal('*')+'Packet sniffing on %s for the next %s seconds.' % (self.wirelessInt, self.timeout))
print(blue('*')+'Identities Log: %s ' % (self.identities_log))
print(normal('*')+'(Press Ctrl-C to quit)\n')
sniff(iface=self.wirelessInt, timeout=self.timeout, prn=self.packethandler, count=0)
print(normal('*')+'Packet Sniffing Stopped, %s seconds has exceeded: ' % (self.timeout))
monitormode.monitor_stop(self.wirelessInt)
except Exception as e:
# monitormode.monitor_stop(self.wirelessInt)
print('\n'+red('!')+'Packet Sniffing Aborted: %s' % (e))
def datafolders_check(self):
# Creates Identities folder if missing
identities_directory = 'data/identities'
if not os.path.exists(identities_directory):
os.makedirs(identities_directory)
def packethandler(self, pkt):
essid = ''
# clients=[]
# bssid=set()
# mgmtFrameTypes = (0,2,4)
# dataFrameSubTypes = ()
if pkt.haslayer(EAP):
# Filter out value: None and duplicate identities.
if pkt.getlayer(EAP).identity != None and (pkt.getlayer(EAP).identity not in identities):
identity = pkt.getlayer(EAP).identity
# Append to set: identities
identities.add(identity)
# Filter out Request Identity Packets which produce a NULL entry
if identity not in 'Request':
print(green('*')+'%s' % (identity))
# Write to Identity log.
with open(self.identities_log, 'a') as f1:
f1.write(identity)
f1.write('\n')
# Commit to database
try:
self.db.identity_commit(identity, essid)
except Exception as e:
print(red('!')+'WARNING - (EAPENUM) Could not save to database: %s' % (e))
def database_connect(self):
try:
self.db = DB(self.db_path)
except Exception as e:
print(red('!')+'WARNING - (EAPENUM) Could not connect to database: %s' % (e))
pass
|
import pytest
import json
import os.path
from fixture.application import MyApplication
import clr
clr.AddReferenceByName('Microsoft.Office.Interop.Excel, Version=14.0.0.0, Culture=neutral, PublicKeyToken=71e9bce111e9429c')
from Microsoft.Office.Interop import Excel
fixture = None
target = None
def load_config(c_file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), c_file)
with open(config_file) as f:
target = json.load(f)
return target
def pytest_addoption(parser):
parser.addoption("--target", action="store", default="target.json")
@pytest.fixture()
def app(request):
global fixture
web_config = load_config(request.config.getoption("--target"))
if fixture is None:
fixture = MyApplication(web_config["path_to_app"], web_config["main_window_header"])
return fixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.destroy()
request.addfinalizer(fin)
return fixture
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("xlsx_"):
testdata = load_from_xlsx(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
def load_from_xlsx(x_file):
data_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/%s.xlsx" % x_file)
excel = Excel.ApplicationClass()
workbook = excel.Workbooks.Open(data_file)
sheet = workbook.ActiveSheet
data = []
sheet.Rows.ClearFormats()
rows_with_data = sheet.UsedRange.Rows.Count
for i in range(rows_with_data):
data.append(str(sheet.Range["A%s" % (i + 1)].Value2))
workbook.Close(SaveChanges=False)
excel.Quit()
return data
|
def SoSoSplotchy(n):
if n==0:
return 1
elif n==1:
return 2
else:
return 2 * SoSoSplotchy(n-1) + SoSoSplotchy(n-2)
def main():
print (SoSoSplotchy(5))
main()
|
import splunklib.client as client
class ServerSettings(client.Entity):
def __init__(self, service, **kwargs):
client.Entity.__init__(self, service, 'server/settings', **kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.